blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
893c4688b698c4ec2e1983497eef4117057736cb
|
64341466a2f01a718abece011226197d0585b393
|
/tests/testthat/test-gg-plots.R
|
9fa3ef44a1fc9f5f2c5695a01bffc64c069c8bff
|
[] |
no_license
|
ggobi/ggally
|
2aa29f340250005c084e1a913a94c69af685ccaa
|
d79b4efae51ae78bc3fc2d4c94504327169e1b42
|
refs/heads/master
| 2023-09-03T21:00:58.614094
| 2023-07-03T02:07:11
| 2023-07-03T02:07:11
| 1,384,794
| 558
| 124
| null | 2022-11-04T16:15:43
| 2011-02-19T00:18:46
|
R
|
UTF-8
|
R
| false
| false
| 7,902
|
r
|
test-gg-plots.R
|
context("gg-plots")
# This file takes too long
testthat::skip_on_cran()
data(tips, package = "reshape")
data(nasa)
nas <- subset(nasa, x <= 2 & y == 1)
test_that("denstrip", {
expect_message(
suppressWarnings(print(ggally_denstrip(tips, mapping = aes_string("sex", "tip")))),
"`stat_bin()` using `bins = 30`", fixed = TRUE
)
expect_message(
suppressWarnings(print(ggally_denstrip(tips, mapping = aes_string("tip", "sex")))),
"`stat_bin()` using `bins = 30`", fixed = TRUE
)
})
test_that("density", {
p <- ggally_density(
tips,
mapping = ggplot2::aes_string(x = "total_bill", y = "tip", fill = "..level..")
) + ggplot2::scale_fill_gradient(breaks = c(0.05, 0.1, 0.15, 0.2))
expect_equal(p$labels$fill, "level")
})
test_that("cor", {
ti <- tips
class(ti) <- c("NOTFOUND", "data.frame")
p <- ggally_cor(ti, ggplot2::aes(x = total_bill, y = tip, color = day), use = "complete.obs")
expect_equal(mapping_string(get("mapping", envir = p$layers[[2]])$colour), "labelp")
p <- ggally_cor(
ti,
ggplot2::aes(x = total_bill, y = tip, color = I("blue")),
use = "complete.obs"
)
expect_equal(mapping_string(get("mapping", envir = p$layers[[1]])$colour), "I(\"blue\")")
expect_err <- function(..., msg = NULL) {
expect_error(
ggally_cor(
ti, ggplot2::aes(x = total_bill, y = tip),
...
),
msg
)
}
vdiffr::expect_doppelganger(
"cor-green",
ggally_cor(ti, ggplot2::aes(x = total_bill, y = tip, color = I("green")))
)
ti3 <- ti2 <- ti
ti2[2, "total_bill"] <- NA
ti3[2, "total_bill"] <- NA
ti3[3, "tip"] <- NA
ti3[4, "total_bill"] <- NA
ti3[4, "tip"] <- NA
expect_warn <- function(data, msg) {
expect_warning(
ggally_cor(data, ggplot2::aes(x = total_bill, y = tip)),
msg
)
}
expect_warn(ti2, "Removing 1 row that")
expect_warn(ti3, "Removed 3 rows containing")
expect_error(
ggally_cor(
ti,
ggplot2::aes(x = total_bill, y = tip, color = size)
),
"must be categorical"
)
expect_silent(
ggally_cor(
ti,
ggplot2::aes(x = total_bill, y = tip, color = as.factor(size))
)
)
})
test_that("diagAxis", {
p <- ggally_diagAxis(iris, ggplot2::aes(x = Petal.Width))
pDat1 <- get("data", envir = p$layers[[2]])
attr(pDat1, "out.attrs") <- NULL
testDt1 <- data.frame(
xPos = c(0.076, 0.076, 0.076, 0.076, 0.076, 0.076, 0.500, 1.000, 1.500, 2.000, 2.500),
yPos = c(0.500, 1.000, 1.500, 2.000, 2.500, 0.076, 0.076, 0.076, 0.076, 0.076, 0.076),
lab = as.character(c(0.5, 1, 1.5, 2, 2.5, 0, 0.5, 1, 1.5, 2, 2.5)),
hjust = c(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.5, 0.5, 0.5, 0.5, 0.5),
vjust = c(0.5, 0.5, 0.5, 0.5, 0.5, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0),
stringsAsFactors = FALSE
)
rownames(testDt1) <- 2:12
expect_equal(pDat1, testDt1)
p <- ggally_diagAxis(iris, ggplot2::aes(x = Species))
pDat2 <- get("data", envir = p$layers[[2]])
attr(pDat2, "out.attrs") <- NULL
testDt2 <- data.frame(
x = c(0.125, 0.500, 0.875),
y = c(0.875, 0.500, 0.125),
lab = c("setosa", "versicolor", "virginica")
)
expect_equal(pDat2, testDt2)
expect_error({
ggally_diagAxis(iris, mapping = ggplot2::aes(y = Sepal.Length))
}, "mapping\\$x is null.") # nolint
})
test_that("dates", {
class(nas) <- c("NOTFOUND", "data.frame")
p <- ggally_cor(nas, ggplot2::aes(x = date, y = ozone))
expect_equal(get("aes_params", envir = p$layers[[1]])$label, "Corr:\n0.278***")
p <- ggally_barDiag(nas, ggplot2::aes(x = date))
expect_equal(mapping_string(p$mapping$x), "date")
expect_equal(as.character(p$labels$y), "count")
})
test_that("cor stars are aligned", {
p <- ggally_cor(iris, ggplot2::aes(x = Sepal.Length, y = Petal.Width, color = as.factor(Species)))
expect_equal(get("aes_params", envir = p$layers[[1]])$label, "Corr: 0.818***")
#expect_equal(get("aes_params", envir = p$layers[[1]])$family, "mono")
labels <- eval_data_col(p$layers[[2]]$data, p$layers[[2]]$mapping$label)
expect_equal(as.character(labels), c(" setosa: 0.278. ", "versicolor: 0.546***", " virginica: 0.281* "))
})
test_that("ggally_statistic handles factors", {
simple_chisq <- function(x, y) {
scales::number(chisq.test(x, y)$p.value, accuracy = .001)
}
expect_silent({
p <- ggally_statistic(reshape::tips, aes(x = sex, y = day), text_fn = simple_chisq, title = "Chi^2")
})
})
test_that("rescale", {
p <- ggally_densityDiag(tips, mapping = ggplot2::aes(x = day), rescale = FALSE)
expect_true(p$labels$y == "density")
vdiffr::expect_doppelganger("rescale-false", p)
p <- ggally_densityDiag(tips, mapping = ggplot2::aes(x = day), rescale = TRUE)
expect_true(! identical(p$labels$y, "density"))
vdiffr::expect_doppelganger("rescale-true", p)
p <- ggally_barDiag(tips, mapping = ggplot2::aes(x = tip), binwidth = 0.25, rescale = FALSE)
expect_true(p$labels$y == "count")
vdiffr::expect_doppelganger("rescale-false-binwidth", p)
p <- ggally_barDiag(tips, mapping = ggplot2::aes(x = tip), binwidth = 0.25, rescale = TRUE)
expect_true(! identical(p$labels$y, "count"))
vdiffr::expect_doppelganger("rescale-true-binwidth", p)
})
test_that("shrink", {
p <- ggally_smooth_loess(iris, mapping = ggplot2::aes(Sepal.Width, Petal.Length))
expect_true(!is.null(p$coordinates$limits$y))
vdiffr::expect_doppelganger("shrink-true", p)
p <- ggally_smooth_loess(iris, mapping = ggplot2::aes(Sepal.Width, Petal.Length), shrink = FALSE)
expect_true(is.null(p$coordinates$limits$y))
vdiffr::expect_doppelganger("shrink-false", p)
})
test_that("smooth_se", {
p <- ggally_smooth_loess(iris, mapping = ggplot2::aes(Sepal.Width, Petal.Length), se = TRUE)
expect_equal(p$layers[[2]]$stat_params$se, TRUE)
vdiffr::expect_doppelganger("smooth-se-true", p)
p <- ggally_smooth_loess(iris, mapping = ggplot2::aes(Sepal.Width, Petal.Length), se = FALSE)
expect_equal(p$layers[[2]]$stat_params$se, FALSE)
vdiffr::expect_doppelganger("smooth-se-false", p)
})
test_that("ggally_count", {
p <- ggally_count(
as.data.frame(Titanic),
ggplot2::aes(x = Class, y = Survived, weight = Freq)
)
vdiffr::expect_doppelganger("titanic-count", p)
p <- ggally_count(
as.data.frame(Titanic),
ggplot2::aes(x = Class, y = Survived, weight = Freq),
fill = "red"
)
vdiffr::expect_doppelganger("titanic-count-red", p)
p <- ggally_count(
as.data.frame(Titanic),
ggplot2::aes(x = Class, y = Survived, weight = Freq, fill = Sex)
)
vdiffr::expect_doppelganger("titanic-count-sex", p)
p <- ggally_count(
as.data.frame(Titanic),
ggplot2::aes(x = Class, y = Survived, weight = Freq, fill = Class)
)
vdiffr::expect_doppelganger("titanic-count-class", p)
p <- ggally_count(
as.data.frame(Titanic),
ggplot2::aes(x = Survived, y = interaction(Sex, Age), weight = Freq, fill = Class)
)
vdiffr::expect_doppelganger("titanic-count-interaction", p)
# check that y character vectors are rendering
p <- ggally_count(
as.data.frame(Titanic),
ggplot2::aes(x = Class, y = toupper(Survived), weight = Freq, fill = Class)
)
vdiffr::expect_doppelganger("titanic-count-toupper", p)
# check countDiag
p <- ggally_countDiag(
as.data.frame(Titanic),
ggplot2::aes(x = Survived, weight = Freq, fill = Class)
)
vdiffr::expect_doppelganger("titanic-count-diag", p)
# change size of tiles
p <- ggally_count(
as.data.frame(Titanic),
ggplot2::aes(x = Class, y = Survived, weight = Freq, fill = Class),
x.width = .5
)
vdiffr::expect_doppelganger("titanic-count-diag-class", p)
# no warnings expected if na.rm = TRUE
p <- ggally_count(
as.data.frame(Titanic),
ggplot2::aes(x = interaction(Class, Age), y = Survived, weight = Freq, fill = Class),
na.rm = TRUE
)
vdiffr::expect_doppelganger("titanic-count-diag-interaction", p)
})
|
2e99b93e4f48864566576095460bb2592d919fe6
|
2bfec775dbfbc1e6db53f47ce1b0c6e18e22512e
|
/glucosinolate_profiling/script_representativeEicPlot.R
|
6b9cda1a620c13bb2410bea30ca159819d82cf37
|
[] |
no_license
|
noctillion/TOUA_Glucosinolate_GWAS
|
982669679c44c61877ad7ee46fc84cd1a19fbaf2
|
7839e2cc3de76c480be58641967704d80a7f6849
|
refs/heads/main
| 2023-08-01T14:27:09.831491
| 2021-09-13T17:58:46
| 2021-09-13T17:58:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,044
|
r
|
script_representativeEicPlot.R
|
library("xcms"); library("magrittr")
eicPlotter = function(msObjs, mzR, rtR, plot.title){
MSnExpObj %>%
filterRt(rt = rtR) %>%
filterMz(mz = mzR) %>%
chromatogram(aggregationFun = "max") %>%
plot(col = "dodgerblue", main = plot.title)
}
peak.int.input = read.csv("~/glucMetadata/GSL_List_corrected_ADG2019Aug09.csv", h=T) # molecule names, RT and m/z ranges
## representative samples
filepath = "/glucFiles_mzML/Converted4xcms/sets23_only/"
files = list.files(filepath, pattern = "*_MS1converted.mzML", recursive = TRUE, full.names = TRUE)
MSnExpObj = readMSData(files[1150], msLevel. = 1, mode = "onDisk") # pick one sample, #1150 in this instance
mols = c("gsl.R2hBuen", "gsl.Pren", "gsl.S2hBuen","gsl.4mSOb","gsl.5mSOp","gsl.2hPeen","gsl.Buen",
"gsl.6mSOh","gsl.1hIM","gsl.7mSOh","gsl.Peen","gsl.8mSOo","gsl.IM","gsl.4moIM","gsl.1moIM",
"gsl.7mSh","gsl.8MTO","gsl.3mSOp")
# all peaks together (full m/z and retention time range for the entire QQQ run)
pdf("/Figures/eic_rep_sample.pdf", h = 4, w = 6.5)
eicPlotter(MSnExpObj, mzR = c(300,520), rtR = c(200,1150), plot.title = "representative sample")
dev.off()
# individual molecule peaks separately (plotting the full retention time range for each m/z range)
pdf("/Figures/eic_rep_sample_indiv.pdf", h = 30, w = 6.5)
par(mfrow = c(9,1))
for(i in mols[1:9]){
focal.mol = peak.int.input[peak.int.input$name == i,]
mzR1 = focal.mol$mz.min
mzR2 = focal.mol$mz.max
rt1 = focal.mol$rt.min
rt2 = focal.mol$rt.max
eicPlotter(MSnExpObj, rtR = c(200,1200), mzR = c(mzR1, mzR2), plot.title = paste0(focal.mol$name,"--",rt1,"-",rt2))
}
dev.off()
pdf("/Figures/eic_rep_sample_indiv2.pdf", h = 30, w = 6.5)
par(mfrow = c(9,1))
for(i in mols[10:18]){
focal.mol = peak.int.input[peak.int.input$name == i,]
mzR1 = focal.mol$mz.min
mzR2 = focal.mol$mz.max
rt1 = focal.mol$rt.min
rt2 = focal.mol$rt.max
eicPlotter(MSnExpObj, rtR = c(200,1200), mzR = c(mzR1, mzR2), plot.title = paste0(focal.mol$name,"--",rt1,"-",rt2))
}
dev.off()
|
38a506406100906a8c9682623bdebc76fc34b0ff
|
bba76e5ffb0b60e746870654e8970b88dd22550c
|
/man/diffEnrich.Rd
|
3a1c3f4d650f2e5d9928299ae0da8ad544cf05a3
|
[] |
no_license
|
SabaLab/diffEnrich
|
7ce95a828b2baaa68ee76a2ba25c1ec4cea27469
|
1c772eaf63ff7897ab9efbb50eadcb1ecd49990e
|
refs/heads/master
| 2022-07-27T20:18:58.878631
| 2022-06-27T17:27:35
| 2022-06-27T17:27:35
| 163,883,307
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 5,153
|
rd
|
diffEnrich.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/diffEnrich.R
\name{diffEnrich}
\alias{diffEnrich}
\alias{print.diffEnrich}
\alias{summary.diffEnrich}
\title{diffEnrich}
\usage{
diffEnrich(list1_pe, list2_pe, method = "BH", cutoff = 0.05)
\method{print}{diffEnrich}(x, ...)
\method{summary}{diffEnrich}(object, ...)
}
\arguments{
\item{list1_pe}{object of class \code{pathEnrich} generated from \code{\link{pathEnrich}}.
See example for \code{\link{pathEnrich}}.}
\item{list2_pe}{object of class \code{pathEnrich} generated from \code{\link{pathEnrich}}.
See example for \code{\link{pathEnrich}}.}
\item{method}{character. Character string telling \code{diffEnrich} which method to
use for multiple testing correction. Available methods are thos provided by
\code{\link{p.adjust}}, and the default is "BH", or False Discovery Rate (FDR).}
\item{cutoff}{Numeric. The p-value threshold to be used as the cutoff when determining statistical significance, and used to filter list of significant pathways.}
\item{x}{object of class \code{diffEnrich}}
\item{\dots}{Unused}
\item{object}{object of class \code{diffEnrich}}
}
\value{
A list object of class \code{diffEnrich} that contains 5 items:
\describe{
\item{species}{The species used in enrichment}
\item{padj}{The method used to correct for multiple testing for the differential enrichment}
\item{sig_paths}{The KEGG pathways the reached statistical significance after multiple testing correction.}
\item{path_intersect}{the number of pathways that were shared (and therefore tested) between the gene lists.}
\item{de_table}{A data frame that summarizes the results of the differential enrichment analysis and contains the following variables:}
}
\describe{
\item{KEGG_PATHWAY_ID}{KEGG Pathway Identifier}
\item{KEGG_PATHWAY_description}{Description of KEGG Pathway (provided by KEGG)}
\item{KEGG_PATHWAY_cnt}{Number of Genes in KEGG Pathway}
\item{KEGG_DATABASE_cnt}{Number of Genes in KEGG Database}
\item{KEGG_PATHWAY_in_list1}{Number of Genes from gene list 1 in KEGG Pathway}
\item{KEGG_DATABASE_in_list1}{Number of Genes from gene list 1 in KEGG Database}
\item{expected_list1}{Expected number of genes from list 1 to be in KEGG pathway by chance (i.e., not enriched)}
\item{enrich_p_list1}{P-value for enrichment of list 1 genes related to KEGG pathway}
\item{p_adj_list1}{Multiple testing adjustment of enrich_p_list1 (default = False Discovery Rate (Benjamini and Hochberg))}
\item{fold_enrichment_list1}{KEGG_PATHWAY_in_list1/expected_list1}
\item{KEGG_PATHWAY_in_list2}{Number of Genes from gene list 2 in KEGG Pathway}
\item{KEGG_DATABASE_in_list2}{Number of Genes from gene list 2 in KEGG Database}
\item{expected_list2}{Expected number of genes from list 2 to be in KEGG pathway by chance (i.e., not enriched)}
\item{enrich_p_list2}{P-value for enrichment of list 2 genes related to KEGG pathway}
\item{p_adj_list2}{Multiple testing adjustment of enrich_p_list2 (default = False Discovery Rate (Benjamini and Hochberg))}
\item{fold_enrichment_list2}{KEGG_PATHWAY_in_list2/expected_list2}
\item{odd_ratio}{Odds of a gene from list 2 being from this KEGG pathway / Odds of a gene from list 1 being from this KEGG pathway}
\item{diff_enrich_p}{P-value for differential enrichment of this KEGG pathway between list 1 and list 2}
\item{diff_enrich_adjusted}{Multiple testing adjustment of diff_enrich_p (default = False Discovery Rate (Benjamini and Hochberg))}
}
}
\description{
This function takes the objects generated from \code{\link{pathEnrich}}.
If performing a dfferential enrichment analysis, the user will have 2 objects. There
will be one for the genes of interest in gene list 1 and one for the genes of interest in gene list 2 (see example for \code{\link{pathEnrich}}).
This function then uses a Fisher's Exact test to identify differentially enriched
pathways between the terms enriched in the gene-of-interest lists. \code{diffEnrich}
will remove KEGG pathways that do not contain any genes from either gene list as these
cannot be tested, and will print a warning message telling the user how many pathways
were removed.
\code{diffEnrich} returns a dataframe containing differentially enriched
pathways with their associated estimated odds ratio, unadjusted p-value, and fdr adjusted
p-value. S3 generic functions for \code{print} and \code{summary} are
provided. The \code{print} function prints the results table as a \code{tibble}, and the
\code{summary} function returns the number of pathways that reached statistical significance
as well as their descriptions, the number of genes used from the KEGG data base, the KEGG species,
the number of pathways that were shared (and therefore tested) between the gene lists and the
method used for multiple testing correction.
}
\examples{
## Generate individual enrichment reults
list1_pe <- pathEnrich(gk_obj = kegg, gene_list = geneLists$list1)
list2_pe <- pathEnrich(gk_obj = kegg, gene_list = geneLists$list2)
## Perform differential enrichment
dif_enrich <- diffEnrich(list1_pe = list1_pe, list2_pe = list2_pe, method = 'none', cutoff = 0.05)
}
|
fadbffecdf63b4cb995ee541c2522b0b0520864c
|
ea0b31ab522319d621872e256420f0ed34af8da9
|
/mymain.R
|
a562b63d032dff8657fe0cad8ef1c4eed19e9aae
|
[] |
no_license
|
nishant1995/LoanStatus
|
c90d8e77dc2c34bc53eccb0cd41171291b1d63c9
|
3cb16d91f958f96f7335783b83d836795c9e3f8f
|
refs/heads/master
| 2020-05-15T14:29:59.389569
| 2019-04-19T23:46:58
| 2019-04-19T23:46:58
| 182,338,160
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,607
|
r
|
mymain.R
|
###################################################################################################################
# Function to check if packages are installed
###################################################################################################################
begin_time = Sys.time()
check.packages <- function(pkg){
new.pkg <- pkg[!(pkg %in% installed.packages()[, "Package"])]
if (length(new.pkg))
install.packages(new.pkg, dependencies = TRUE)
sapply(pkg, require, character.only = TRUE)
}
packages = c("lubridate","caret","glmnet")
check.packages(packages)
###################################################################################################################
# Read in data
###################################################################################################################
train <- read.csv("train.csv")
test <- read.csv("test.csv")
train$loan_status <- ifelse(train$loan_status == "Fully Paid", 0, 1)
test = data.frame(test, loan_status = rep(0,nrow(test)))
data = rbind(train, test)
###################################################################################################################
# Treatment of Missing Values
###################################################################################################################
levels(data$emp_length) <- c(levels(data$emp_length), "N")
data$emp_length[is.na(data$emp_length)] = "N"
data$revol_util[is.na(data$revol_util)] = 53
data$mort_acc[is.na(data$mort_acc)] = 2
data$dti[is.na(data$dti)] = 18
data$pub_rec_bankruptcies[is.na(data$pub_rec_bankruptcies)] = 0
###################################################################################################################
# Recoding some variables
###################################################################################################################
cr_line_dates = parse_date_time(data$earliest_cr_line, orders=c("by"))
base_date = rep("2007-01-01", nrow(data))
base_date = ymd(base_date)
cr_line_dates = ymd(cr_line_dates)
cr_line_dates = interval(base_date, cr_line_dates) %/% months(1)
data$earliest_cr_line = cr_line_dates
data['fico_score'] = 0.5*data$fico_range_high + 0.5*data$fico_range_low
###################################################################################################################
# Log - Transformations
###################################################################################################################
data$annual_inc = log(1 + data$annual_inc)
data$revol_bal = log(1 + data$revol_bal)
data$revol_util = log(1 + data$revol_util)
data$pub_rec_bankruptcies = log(1 + data$pub_rec_bankruptcies)
data$mort_acc = log(1 + data$mort_acc)
data$open_acc = log(1 + data$open_acc)
data$fico_score = log(1 + data$fico_score)
###################################################################################################################
# Function to remove unecessary or dominating categorical columns
###################################################################################################################
remove_columns = function(data){
data$emp_title = NULL
data$title = NULL
data$zip_code = NULL
data$fico_range_high = NULL
data$fico_range_low = NULL
data$grade = NULL
return(data)
}
data = remove_columns(data)
###################################################################################################################
# One-hot encoding
###################################################################################################################
dmy = dummyVars("~.", data = data)
data = data.frame(predict(dmy, newdata=data))
###################################################################################################################
# Getting the splits back
###################################################################################################################
I.D = test$id
train = data[1:nrow(train),]
test = data[nrow(train)+1:nrow(test), names(data) != "loan_status"]
train$id = NULL
test$id = NULL
###################################################################################################################
# Logistic Regression Model
###################################################################################################################
model = glm(loan_status ~ ., data = train, family = binomial)
pred = predict(model, test, type = "response")
output = data.frame(I.D, pred)
colnames(output) = c('id','prob')
write.csv(output,'mysubmission1.txt',row.names = FALSE)
end_time = Sys.time()
Run_Time = end_time - begin_time
|
37db44627a2b676421a55eda68a8dc6735913da6
|
4e35775e2a3b6903b68ca5ae2ce0ecbd25b1b5a2
|
/R/draw_posterior.R
|
f2b1c2f1b4159129f36344f43873ec88a5b46f65
|
[
"MIT"
] |
permissive
|
FrankLef/eflRethinking
|
93ab747e7ebe93ec4ca3fe5e5a80e6952a45d04e
|
d9b2a868923134b3e549c96982f1b00092b0c3b2
|
refs/heads/main
| 2023-08-07T12:19:05.414807
| 2021-10-08T18:56:57
| 2021-10-08T18:56:57
| 410,892,718
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 858
|
r
|
draw_posterior.R
|
#' Posterior samples using \code{rethinking::extract.samples} as
#' \code{draws_rvars}
#'
#' Posterior samples using \code{rethinking::extract.samples} as
#' \code{draws_rvars}.
#'
#' get posterior samples from a \code{map} object formatted as a \code{draws_rvars}
#' object to be used by the \code{posterior} package.
#'
#' @param obj S4 object of class \code{map}.
#' @param n Sample size.
#'
#' @importFrom MASS mvrnorm
#'
#' @return \code{draws_rvars} object
#' @export
draw_posterior_quap <- function(obj, n = 1L) {
checkmate::assert_class(obj, class = "map", ordered = TRUE)
checkmate::assert_count(n, positive = TRUE)
out <- rethinking::extract.samples(obj, n = n)
# out <- MASS::mvrnorm(n = n, mu = obj@coef, Sigma = obj@vcov)
# cat("\n", "inside draw_posterior_map", "\n")
# print(out)
# cat("\n")
posterior::as_draws_rvars(out)
}
|
ac9440365d18268a24dd69e693f44e5de95f861e
|
73c9172f37a4fa924bd34e5e50ee8a2d83cb01fc
|
/deprecated/bfrmE2F3.R
|
5badb9b6f0316bd2a8e2145975bfaa892a31de29
|
[] |
no_license
|
Sage-Bionetworks/Cancer-Pathway-Sparse-Factor-Models
|
a99cc1b1ce2697fe0cbed83f3a44cd7985d59f94
|
c8552ed3fdcac1316a979a29ced357cf836a4d36
|
refs/heads/master
| 2021-01-21T11:45:57.577338
| 2012-12-20T20:50:28
| 2012-12-20T20:50:28
| 3,369,825
| 1
| 0
| null | 2012-09-05T15:55:17
| 2012-02-06T18:58:29
|
R
|
UTF-8
|
R
| false
| false
| 1,007
|
r
|
bfrmE2F3.R
|
## bfrmE2F3.R
## Erich S. Huang
## Sage Bionetworks
## Seattle, Washington
## erich.huang@sagebase.org
##########
# RUN BFRM IN THE SPARSE ANOVA MODE
##########
require(bfrm)
require(synapseClient)
e2f3Anova <- bfrm(dat2, design = ifelse(treatment == 'E2F3', 1, 0))
mPPib <- e2f3Anova@results$mPostPib
topProbeLogical <- mPPib[ , 2] >= 0.99
topProbeInd <- grep("TRUE", topProbeLogical)
##########
# RUN BFRM IN THE FACTOR DISCOVERY MODE
##########
bCatEvolveFactor <- evolve(dat2,
init = as.numeric(topProbeInd),
priorpsia = 2,
priorpsib = 0.005,
varThreshold = 0.85,
facThreshold = 0.95,
maxVarIter = 30,
minFacVars = 10,
maxFacVars = length(topProbeInd),
maxFacs = 50,
maxVars = length(topProbeInd)
)
|
21be2117f92e0ff0802ae6cf45aba8ac64747898
|
c9120a5553c6edcd1fc34da5a3b1ae22949808a8
|
/markdown2/R/markdown2-package.r
|
406f4c93180b04010943cae3a5592ff20562070c
|
[] |
no_license
|
mathematicalcoffee/markdown2-package
|
2fe99bd39f9844bdad14a7da81e5d8b7040d3556
|
60a7744cacb0cf1c0cc50759f1aa33eadc1814db
|
refs/heads/master
| 2021-01-22T07:10:39.145948
| 2013-03-27T07:10:25
| 2013-03-27T07:10:25
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 209
|
r
|
markdown2-package.r
|
#' Convert from markdown to other formats, customisable.
#'
#' TODO.
#'
#' @name markdown2
#' @aliases markdown2-package package-markdown2
#' @keywords markdown2
#' @docType package
#' @keywords package
NULL
|
47cde934047b5367c6e8698a1d28ceb3c91d31b5
|
80887ad16c46b0c32127f186a603b71161aedb82
|
/scripts/create_peak_graphs.R
|
80d66d47f1eb2174155414c82ff56881f2b21811
|
[] |
no_license
|
durrantmm/mustache_OLD
|
df95fffe4326eb9ef2dc3e1077005c5283cad3fb
|
39c9b4209681de0f4b0dc0f76e4408d6e00b2a2a
|
refs/heads/master
| 2021-06-26T00:17:22.401944
| 2017-08-22T20:37:51
| 2017-08-22T20:37:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 906
|
r
|
create_peak_graphs.R
|
library(readr); packageVersion("readr")
library(ggplot2); packageVersion("ggplot2")
library(tools)
args = commandArgs(trailingOnly=TRUE)
peak_depths_folder = args[1]
output_folder = args[2]
dir.create(output_folder)
peak_depths <- list.files(peak_depths_folder, full.names="TRUE")
for (filein in peak_depths){
depths <- read_tsv(filein)
gg <- qplot(data=depths, x=POS, y=VALID_DEPTH, geom='line', color='Total Read Depth') +
geom_line(aes(x=POS, y=REVERSE_DEPTH, color='Reverse Read Depth'), linetype='dashed') +
geom_line(aes(x=POS, y=FORWARD_DEPTH, color='Forward Read Depth'), linetype='dotted') +
scale_color_manual(values=c("red", "blue", "black")) +
xlab("Genomic Position") +
ylab("Read Depth") +
theme_bw()
outfile <- file.path(output_folder, basename(filein))
outpdf <- paste(file_path_sans_ext(outfile), '.pdf', sep='')
ggsave(outpdf, width=5, height=3)
}
|
a3312278281f19491891a3403a68c0e98aeaef51
|
253d4a133a8a6e568f30523447358689d182f473
|
/man/L7_ETMs.Rd
|
7a76fb2e1555994d93ff51ca492606db8d76de4b
|
[
"Apache-2.0"
] |
permissive
|
r-spatial/stars
|
081a7fc96089aeb276051107911145d17365de79
|
0e17daf1c49b69f990ec77275e80cfd471210aee
|
refs/heads/main
| 2023-09-04T10:58:52.888008
| 2023-08-19T09:15:37
| 2023-08-19T09:15:37
| 78,360,080
| 489
| 126
|
Apache-2.0
| 2023-03-20T09:17:18
| 2017-01-08T17:48:24
|
R
|
UTF-8
|
R
| false
| true
| 703
|
rd
|
L7_ETMs.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/datasets.R
\docType{data}
\name{L7_ETMs}
\alias{L7_ETMs}
\title{Landsat-7 bands for a selected region around Olinda, BR}
\format{
An object of class \code{stars_proxy} (inherits from \code{stars}) of dimension 349 x 352 x 6.
}
\usage{
L7_ETMs
}
\description{
Probably containing the six 30 m bands:
\itemize{
\item Band 1 Visible (0.45 - 0.52 µm) 30 m
\item Band 2 Visible (0.52 - 0.60 µm) 30 m
\item Band 3 Visible (0.63 - 0.69 µm) 30 m
\item Band 4 Near-Infrared (0.77 - 0.90 µm) 30 m
\item Band 5 Short-wave Infrared (1.55 - 1.75 µm) 30 m
\item Band 7 Mid-Infrared (2.08 - 2.35 µm) 30 m
}
}
\keyword{datasets}
|
d0217f629ebd252b79efb0c7a3d37eedc0020cdd
|
fb2f2d40ad5ebe227aa244835ee628d465cf7acc
|
/R/tstamp.R
|
8e2b5696ce39a9ce068595d32e99eeeb47501ebc
|
[] |
no_license
|
wepelham3/sack2
|
a1c14d7cb4c860130a80acd50296963a13e139fc
|
9dc2210304a6e374e8925682ec7d877f18cf02b5
|
refs/heads/master
| 2023-08-03T10:55:47.149177
| 2023-07-24T23:38:52
| 2023-07-24T23:38:52
| 87,333,414
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 341
|
r
|
tstamp.R
|
#' Create a timestamp
#'
#' Create a timestamp with date and time, formatted as such: 2020_10_07_215301.
#'
#' @export
#' @examples
#' tstamp()
#**********************************************************
tstamp <- function() {
format(Sys.time(),
"%Y_%m_%d_%H%M%S")
}
#**********************************************************
|
a5c81133255b4526a16ac359f6ab69c9656b6858
|
f2662bea82470d47297b5ff6f1b6751c506da28d
|
/ui.R
|
3c0efe65503ed39951cb42b417538be959852102
|
[] |
no_license
|
kurakuradave/hgran_shiny
|
cb80b3754d08e56bf136b9e3d7ad4a009fe572b7
|
48465899404297fb0231196b5426dbd0f6efa89d
|
refs/heads/master
| 2021-04-09T16:53:13.286901
| 2018-03-18T04:42:02
| 2018-03-18T04:42:02
| 125,689,207
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,771
|
r
|
ui.R
|
library(shiny)
library(plotly)
shinyUI(fluidPage(
titlePanel("Heatmap Granularity"),
sidebarLayout(
sidebarPanel(
tags$p( tags$i( "Final Project for Course 9 - Developing Data Products" ) ),
tags$p( tags$i( "David Effendi, Mar 16 2018" ) ),
tags$br(),
tags$p( "This heatmap visualizes count of incidents on a time-of-day X weekday plane." ),
tags$p( "Select a different time interval to see the heatmap at different levels of granularity." ),
radioButtons("tInterval", "Select Time Interval: (mins)",
c(1, 5,15, 30, 60, 120, 180 ),
selected = 60,
inline = TRUE ),
submitButton("Submit"),
tags$p( tags$i( "Please be patient if the heatmap doesn't immediately show up") ),
tags$p("This demonstrates the effects of different levels of granularity on heatmaps. When it is too fine (very granular), it might be hard to observe any pattern. And when it's too coarse (very aggregated), some details might be glossed over."),
tags$br(),
tags$h4("Server Computation & Reactivity:"),
tags$p("The dataset is on a per-minute basis. Everytime the 'Submit' button is clicked, R take note of the chosen granularity value and performs re-grouping on the dataset. aggregating the number of incidents in each time interval, then pass the aggregated data to plotly to be turned into a heatmap."),
tags$br(),
tags$h4("Data Source:"),
tags$a(href="https://data.cityofnewyork.us/Public-Safety/NYPD-Motor-Vehicle-Collisions/h9gi-nx95", "NYPD open data - Motor Vehicle Collisions")
),
mainPanel(
h2( "Heatmap - number of incidents across time" ),
plotlyOutput("plot1"),
textOutput("text1")
)
)
))
|
dd36c0cb32f68a2396afb7844a96fdf213cca787
|
8280c90aa90bc6e8a14312ac14ecf42fb2e30eef
|
/man/cnSpec.Rd
|
1eb880407c2db9c488e73bf8f7242fd4cd580094
|
[
"CC0-1.0"
] |
permissive
|
hjanime/GenVisR
|
bd34e60ce49c5c7495cbc5727aa8b44491962494
|
a25e4f877ba140f126acecd9b94fc44d7fc11897
|
refs/heads/master
| 2017-12-02T04:48:14.237489
| 2015-08-05T14:37:51
| 2015-08-05T14:37:51
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,454
|
rd
|
cnSpec.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/cnSpec.R
\name{cnSpec}
\alias{cnSpec}
\title{Construct CN cohort plot}
\usage{
cnSpec(x, y = NULL, genome = "hg19", title = NULL,
CN_low_colour = "#002EB8", CN_high_colour = "#A30000", x_lab_size = 12,
y_lab_size = 12, facet_lab_size = 10, layers = NULL)
}
\arguments{
\item{x}{object of class data frame containing columns "chromosome", "start", "end", "segmean", "sample" consisting of CN segment calls}
\item{y}{object of class data frame containing columns "chromosome", "start", "end" specifying chromosome boundary coordinates for all chromosomes in a genome (optional)}
\item{genome}{character string specifying UCSC genome from which data is derived, defaults to "hg19"}
\item{title}{character string specifying title of plot}
\item{CN_low_colour}{character string specifying low value of colour gradient to plot}
\item{CN_high_colour}{character string specifying high value of colour gradient to plot}
\item{x_lab_size}{integer specifying the size of the x labels on the plot}
\item{y_lab_size}{integer specifying the size of the y label on the plot}
\item{facet_lab_size}{integer specifying the size of the faceted labels}
\item{layers}{Additional ggplot2 layers to plot}
}
\value{
ggplot object
}
\description{
given a data frame construct a plot to display CN information for a group of samples
}
\examples{
cnSpec(LucCNseg, genome="hg19")
}
|
6780f3466dc8ad81b56c39a011a0364e5268ab50
|
10be09d12f10a29050a391098c46debe7cf85a65
|
/ps_7_app/app.R
|
9cce3dc4fc67da6ef50c599c66d081597ebee810
|
[] |
no_license
|
kakenzua98/ps_7
|
8322fbc37e847ebc89354e5b401d500c86edba8e
|
d6489d8d204578639e4e185978fe33deb4e76f16
|
refs/heads/master
| 2020-04-07T11:10:11.928100
| 2018-11-21T14:49:19
| 2018-11-21T14:49:19
| 158,314,959
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,892
|
r
|
app.R
|
#
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(tidyverse)
library(shiny)
library(ggrepel)
# Here, I am reading in the data with the file path to read from
black_voters <- read_rds("black_voters.rds")
asian_voters <- read_rds("asian_voters.rds")
white_voters <- read_rds("white_voters.rds")
hisp_voters <- read_rds("hisp_voters.rds")
other_voters <- read_rds("other_voters.rds")
all_voters <- read_rds("all_voters.rds")
# I globally set education choices for clarity and ease when/if I need to edit the choices. Thanks to Albert for this good tip.
# I don't want to user to see/select options that are the same as the variable names I need later. As a result, I set what user sees
# and then set that equal to the variable name that I'll need later
education_choices <- c("Postgraduate" = "postgrad",
"College" = "college_grad",
"Some College" = "some_college",
"High School (or Less)" = "hs")
race_choices <- c("Asian" = "asian",
"Black" = "black",
"Hispanic" = "hispanic",
"White" = "white",
"Other" = "other")
# Define UI for application that draws a histogram
ui <- fluidPage(
# Application title
titlePanel("The relationship between Race/Education and Errors in Polling Predictions"),
sidebarLayout(
sidebarPanel(
p("Use the selections below to see how Race and Education Levels Correlate with Errors in Polling Predictions"),
# I have two selectInputs: one for education, and one for race.
# The choices, which is the list of values to select from, are set equal lists created earlier.
# Here the user selects with a drop down menu. I could have also had different tabs per ethnicity, but this was
# more seamless.
selectInput(inputId = "educ",
label = "Education:",
choices = education_choices,
selected = "black_postgrad"),
selectInput(inputId = "race",
label = "Race:",
choices = race_choices,
selected = "black_postgrad"),
# I have two checkboxInputs: one for the best fit line and the other for district labels.
# When the user clicks on either checkbox, it is set equal to true and calculations done later on alter the graph shown.
checkboxInput(inputId = "line",
label = "Show Best Fit Line",
value = FALSE),
checkboxInput(inputId = "district",
label = "Show District Labels",
value = FALSE)
),
# Show a plot of the generated distribution
# Here, I provide a summary of the graphs shown based on my limited understanding of data analysis.
mainPanel(
plotOutput("distPlot"),
br(),
h3("Calculation Details"),
p("Prediction Error in Democrat Votes (%) is calculated by subtracting the Democratic Share of Votes as predicted by the last wave of Upshot/Sienna Polls by the Democratic Share of Votes in the actual election."),
p("The x-axis measures the percentage of each education response in the selected ethnic group. For example, 'Asian with Some College Education' measures the share of respondents with Some College Education from the group of respondents that identified as Asian. The percentages are not based off of all respondents but, instead, the respondents in their ethnic group"),
br(),
h3("Summary:"),
h4("Asian:"),
p("Error in polling predictions for Democratic Advantage seems to increase as the percentage of Asian respondents with less than a college degree increase. The share of Democratic votes was higher than the polling suggested. On the other hand, an increase in the share of Asian respondents with a college or postgraduate degree led to a decrease in prediction error. Another interesting takeaway is that most of the districts shown have two times (or more) the number of Asian respondents with a completed college education than Asian respondents with a high school education (or less). "),
h4("Black:"),
p("Amidst black respondent, the prediction error in relation to % of blacks with some college education and blacks with postgraduate educations were relatively similar. There was, however, a slight increase in prediction error as the percentage of black respondents with a postgraduate degree increase. There is a much larger difference in prediction error between black respondents with a completed college degree and those with a high school education or less. Generally, a higher percentage of blacks with a college degree led to a high prediction error with Democrats winning more votes than predicted. As the number of black respondents with a high school or less education increased, the prediction error decreased; this is also very different from the prediction for Asian with a high school education or less."),
h4("Hispanic:"),
p("Differences in the education level of Hispanic voters had a smaller effect than it did amidst other ethnicities shown. The primary takeaways here is that prediction error slightly decreases as the percent of Hispanic respondents with a college education increases. On the other hand, predict error increases as the percentage of Hispanic voters with some college education increases. The prediction error for Hispanics with a Postgraduate degree and those with High School or less show little change in prediction error as the percent for that education bloc changes."),
h4("White:"),
p("Amidst white respondents, prediction error increases as the following education groups increase: postgraduates, college graduates, and some college. For whites with high school or less, the prediction error decreases and dips slightly below 0. "),
h4("Other:"),
p("For respondents that did not fall into any of the previous ethnic identities, the prediction error decreases as the percentage of respondents with a high school (or less) education or a college education rise. For the former, there is a large decrease and prediction error falls to -2.5%. For Other respondents with a postgraduate education or some college education, prediction error rises with the percentage of the respective education groups."),
br(),
h5("Source of Data:"),
p("Upshot/Sienna Polls and Mr. Schroeder")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
output$distPlot <- renderPlot({
# If the user selects this ethnicity, the code below will run. I could have streamlined this into one table (all_voters)
# but I had some trouble with concantenating strings and setting them to aesthetic variables.
# I need to use aes_string (thanks for your help on this, Nick) because the chosen values from the selectInput function
# in the ui transmits a string to the server. That initially did not work to get the variable name but does with aes_string. An
# important thing to remember here is to put quotes around the non-string variable (in this case, dem_error) because aes_string
# won't be able to find it if you don't.
if(input$race == "black") {
black_plot <- black_voters %>%
ggplot(aes_string(x = paste(input$race,input$educ, sep = "_"), y = "dem_error", color = "dem_error")) +
geom_point() +
ylab("Prediction Error in Democrat Votes (%)") +
xlab(c(paste(names(race_choices[which(race_choices == input$race)]), "with",
names(education_choices[which(education_choices == input$educ)]), "Education (%)"))) +
guides(color=guide_legend("Polling Error (%)")) +
ggtitle("The percentage of Blacks with varying education levels \ncompared to errors in polling predictions")
# The data before before the plot is printed changes based on the checkboxInput in the UI
# If the checkbox is selected, the input$inputId is set to TRUE and the plot is updated accordingly
# For the plots, I used geom_label_repel which adds text to the plot; here, it's the district.
# My best fit line code is based off of the code of Ms. Gayton and Mr. Arellano/Ms. Fridkin but different to fit
# my different usage/code.
# The district labels pulls inspiration from the work of Mr. Schroeder (ever helpful) and Mr. Cordeiro
if (input$line == TRUE) {
black_plot <- black_plot + geom_smooth(method = lm, se = FALSE)
}
if (input$district == TRUE) {
black_plot <- black_plot + geom_label_repel(aes(label = toupper(district)), size = 3, force = 3)
}
black_plot
}
# If the user selects this ethnicity, the code below will run. I could have streamlined this into one table (all_voters)
# but I had some trouble with concantenating strings and setting them to aesthetic variables.
# I need to use aes_string (thanks for your help on this, Nick) because the chosen values from the selectInput function
# in the ui transmits a string to the server. That initially did not work to get the variable name but does with aes_string. An
# important thing to remember here is to put quotes around the non-string variable (in this case, dem_error) because aes_string
# won't be able to find it if you don't.
else if(input$race == "asian") {
asian_plot <- asian_voters %>%
#filter(!is.na(input$type)) %>%
ggplot(aes_string(x = paste(input$race,input$educ, sep = "_"), y = "dem_error", color = "dem_error")) + geom_point() +
ylab("Prediction Error in Democrat Votes (%)") +
xlab(c(paste(names(race_choices[which(race_choices == input$race)]), "with",
names(education_choices[which(education_choices == input$educ)]), "Education (%)"))) +
guides(color=guide_legend("Polling Error (%)")) +
ggtitle("The percentage of Asians with varying education levels \ncompared to errors in polling predictions")
# The data before before the plot is printed changes based on the checkboxInput in the UI
# If the checkbox is selected, the input$inputId is set to TRUE and the plot is updated accordingly
# For the plots, I used geom_label_repel which adds text to the plot; here, it's the district.
# My best fit line code is based off of the code of Ms. Gayton and Mr. Arellano/Ms. Fridkin but different to fit
# my different usage/code.
# The district labels pulls inspiration from the work of Mr. Schroeder (ever helpful) and Mr. Cordeiro
if (input$line == TRUE) {
asian_plot <- asian_plot + geom_smooth(method = lm, se = FALSE)
}
if (input$district == TRUE) {
asian_plot <- asian_plot + geom_label_repel(aes(label = toupper(district)), size = 3, force = 3)
}
asian_plot
}
# If the user selects this ethnicity, the code below will run. I could have streamlined this into one table (all_voters)
# but I had some trouble with concantenating strings and setting them to aesthetic variables.
# I need to use aes_string (thanks for your help on this, Nick) because the chosen values from the selectInput function
# in the ui transmits a string to the server. That initially did not work to get the variable name but does with aes_string. An
# important thing to remember here is to put quotes around the non-string variable (in this case, dem_error) because aes_string
# won't be able to find it if you don't.
else if(input$race == "hispanic") {
hisp_plot <- hisp_voters %>%
#filter(!is.na(input$type)) %>%
ggplot(aes_string(x = paste(input$race,input$educ, sep = "_"), y = "dem_error", color = "dem_error")) + geom_point() +
ylab("Prediction Error in Democrat Votes (%)") +
xlab(c(paste(names(race_choices[which(race_choices == input$race)]), "with",
names(education_choices[which(education_choices == input$educ)]), "Education (%)"))) +
guides(color=guide_legend("Polling Error (%)")) +
ggtitle("The percentage of Hispanics with varying education levels \ncompared to errors in polling predictions")
# The data before before the plot is printed changes based on the checkboxInput in the UI
# If the checkbox is selected, the input$inputId is set to TRUE and the plot is updated accordingly
# For the plots, I used geom_label_repel which adds text to the plot; here, it's the district.
# My best fit line code is based off of the code of Ms. Gayton and Mr. Arellano/Ms. Fridkin but different to fit
# my different usage/code.
# The district labels pulls inspiration from the work of Mr. Schroeder (ever helpful) and Mr. Cordeiro
if (input$line == TRUE) {
hisp_plot <- hisp_plot + geom_smooth(method = lm, se = FALSE)
}
if (input$district == TRUE) {
hisp_plot <- hisp_plot + geom_label_repel(aes(label = toupper(district)), size = 3, force = 3)
}
hisp_plot
}
# If the user selects this ethnicity, the code below will run. I could have streamlined this into one table (all_voters)
# but I had some trouble with concantenating strings and setting them to aesthetic variables.
# I need to use aes_string (thanks for your help on this, Nick) because the chosen values from the selectInput function
# in the ui transmits a string to the server. That initially did not work to get the variable name but does with aes_string. An
# important thing to remember here is to put quotes around the non-string variable (in this case, dem_error) because aes_string
# won't be able to find it if you don't.
else if(input$race == "white") {
white_plot <- white_voters %>%
#filter(!is.na(input$type)) %>%
ggplot(aes_string(x = paste(input$race,input$educ, sep = "_"), y = "dem_error", color = "dem_error")) + geom_point() +
ylab("Prediction Error in Democrat Votes (%)") +
xlab(c(paste(names(race_choices[which(race_choices == input$race)]), "with",
names(education_choices[which(education_choices == input$educ)]), "Education (%)")))+
guides(color=guide_legend("Polling Error (%)")) +
ggtitle("The percentage of Whites with varying education levels \ncompared to errors in polling predictions")
# The data before before the plot is printed changes based on the checkboxInput in the UI
# If the checkbox is selected, the input$inputId is set to TRUE and the plot is updated accordingly
# For the plots, I used geom_label_repel which adds text to the plot; here, it's the district.
# My best fit line code is based off of the code of Ms. Gayton and Mr. Arellano/Ms. Fridkin but different to fit
# my different usage/code.
# The district labels pulls inspiration from the work of Mr. Schroeder (ever helpful) and Mr. Cordeiro
if (input$line == TRUE) {
white_plot <- white_plot + geom_smooth(method = lm, se = FALSE)
}
if (input$district == TRUE) {
white_plot <- white_plot + geom_label_repel(aes(label = toupper(district)), size = 3, force = 3)
}
white_plot
}
# If the user selects this ethnicity, the code below will run. I could have streamlined this into one table (all_voters)
# but I had some trouble with concantenating strings and setting them to aesthetic variables.
# I need to use aes_string (thanks for your help on this, Nick) because the chosen values from the selectInput function
# in the ui transmits a string to the server. That initially did not work to get the variable name but does with aes_string. An
# important thing to remember here is to put quotes around the non-string variable (in this case, dem_error) because aes_string
# won't be able to find it if you don't.
else if(input$race == "other") {
other_plot <- other_voters %>%
#filter(!is.na(input$type)) %>%
ggplot(aes_string(x = paste(input$race,input$educ, sep = "_"), y = "dem_error", color = "dem_error")) +
geom_point() +
ylab("Prediction Error in Democrat Votes (%)") +
xlab(c(paste(names(race_choices[which(race_choices == input$race)]), "with",
names(education_choices[which(education_choices == input$educ)]), "Education (%)"))) +
guides(color=guide_legend("Polling Error (%)")) +
ggtitle("The percentage of Other with varying education levels \ncompared to errors in polling predictions")
# The data before before the plot is printed changes based on the checkboxInput in the UI
# If the checkbox is selected, the input$inputId is set to TRUE and the plot is updated accordingly
# For the plots, I used geom_label_repel which adds text to the plot; here, it's the district.
# My best fit line code is based off of the code of Ms. Gayton and Mr. Arellano/Ms. Fridkin but different to fit
# my different usage/code.
# The district labels pulls inspiration from the work of Mr. Schroeder (ever helpful) and Mr. Cordeiro
if (input$line == TRUE) {
other_plot <- other_plot + geom_smooth(method = lm, se = FALSE)
}
if (input$district == TRUE) {
other_plot <- other_plot + geom_label_repel(aes(label = toupper(district)), size = 3, force = 3)
}
other_plot
}
})
}
# Run the application
shinyApp(ui = ui, server = server)
|
2a0290fbace5297e7b9fc9fcb5acb142512c3959
|
eb7786382848c787a9a2aee7dbbd50ad677cf5db
|
/man/plot_twochoiceRL.Rd
|
5da09a8029b47439d43b73d7c9cb7c3afab94c63
|
[] |
no_license
|
psuthaharan/twochoiceRL
|
389b61ed3649dd2b7a06d8c0883c72124d4d649c
|
dafaf0d237536e5ed88b74e50f14a761b359136a
|
refs/heads/master
| 2023-06-07T09:34:42.525688
| 2021-06-22T17:10:13
| 2021-06-22T17:10:13
| 335,135,408
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,165
|
rd
|
plot_twochoiceRL.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_twochoiceRL.R
\name{plot_twochoiceRL}
\alias{plot_twochoiceRL}
\title{Visualize behavior in a two-choice decision task}
\usage{
plot_twochoiceRL(
data = NULL,
subj = 100,
colors = c("#009999", "#0000FF"),
plot_type = "static"
)
}
\arguments{
\item{data}{simulated task data}
\item{subj}{subject data to plot. Defaults to subject 100.}
\item{colors}{color to represent the two choices - choice A or choice B.
Defaults to orange for choice A and purple for choice B.}
\item{plot_type}{produce either static or animated version of the plot. Defaults to static.}
}
\value{
A plot of individual \code{subj}'s expected value across trials, a plot of individual \code{subj}'s
probability across trials, and a side-by-side plot of both of the previous plots.
}
\description{
This function plots the simulated two-choice task data.
}
\examples{
# Save simulated task data to a variable, say, data_sim
data_sim <- simulate_twochoiceRL(trials_unique = FALSE)
# Plot the behavior of individual 100
plot_twochoiceRL(data = data_sim, subj = 100, colors = c("#009999","#0000FF"))
}
|
8a42b7e799d7606287a12c66cae35c05e0cc4b6b
|
06b6a2c2008c7f5e8400f8eb402d490ebb4bfd54
|
/R/transformationFunctions.R
|
bddc6317955f6646ffa2c21a2be82c1c64461740
|
[
"MIT"
] |
permissive
|
BMEngineeR/midasHLA
|
55265be7baae2259c976bb5ea7f112737c0b7d1a
|
9ce02c8192852c16a296f63ecbd3e4791e5dbd83
|
refs/heads/master
| 2023-03-05T15:59:52.362313
| 2021-02-17T00:53:19
| 2021-02-17T00:53:19
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 46,191
|
r
|
transformationFunctions.R
|
#' Generate amino acid variation matrix
#'
#' \code{hlaToAAVariation} convert HLA calls data frame to a matrix of variable
#' amino acid positions.
#'
#' Variable amino acid positions are found by comparing elements of the
#' alignment column wise. Some of the values in alignment can be treated
#' specially using \code{indels} and \code{unkchar} arguments. Function
#' processes alignments for all HLA genes found in \code{hla_calls}.
#'
#' Variable amino acid position uses protein alignments from
#' \href{ftp://ftp.ebi.ac.uk/pub/databases/ipd/imgt/hla/alignments/}{EBI database}.
#'
#' @inheritParams checkHlaCallsFormat
#' @param indels Logical indicating whether indels should be considered when
#' checking variability.
#' @param unkchar Logical indicating whether unknown characters in the alignment
#' should be considered when checking variability.
#' @param as_df Logical indicating if data frame should be returned.
#' Otherwise a matrix is returned.
#'
#' @return Matrix or data frame containing variable amino acid positions.
#' Rownames corresponds to ID column in \code{hla_calls}, and colnames to
#' alignment positions. If no variation is found one column matrix filled with
#' \code{NA}'s is returned.
#'
#' @examples
#' hlaToAAVariation(MiDAS_tut_HLA)
#'
#' @importFrom assertthat assert_that see_if
#' @importFrom stringi stri_split_fixed
#' @export
hlaToAAVariation <- function(hla_calls,
indels = TRUE,
unkchar = FALSE,
as_df = TRUE){
assert_that(
checkHlaCallsFormat(hla_calls),
isTRUEorFALSE(indels),
isTRUEorFALSE(unkchar),
isTRUEorFALSE(as_df)
)
ids <- hla_calls[, 1]
hla_calls <- hla_calls[, -1]
# get names of genes
gene_names <- vapply(X = colnames(hla_calls),
FUN = function(x) stri_split_fixed(x, "_")[[1]][1], # colnames are of form A_1, A_2, B_1, ...
FUN.VALUE = character(length = 1)
)
gene_names_uniq <- unique(gene_names)
# assert that alignment files are available
available_genes <- list.files(
path = system.file("extdata", package = "midasHLA"),
pattern = "_prot.Rdata$"
)
available_genes <- vapply(
X = stri_split_fixed(available_genes, "_prot.Rdata"),
`[[`, 1,
FUN.VALUE = character(length = 1)
)
av_genes_idx <- gene_names_uniq %in% available_genes
assert_that(
any(av_genes_idx),
msg = sprintf("Alignments for genes %s are not available.",
paste(gene_names_uniq[! av_genes_idx], collapse = ", ")
)
)
if (! all(av_genes_idx)) {
warn(sprintf(
"Alignments for genes %s are not available and will be omitted.",
paste(gene_names_uniq[! av_genes_idx], collapse = ", ")
))
gene_names_uniq <- gene_names_uniq[av_genes_idx]
}
# read alignment matrices in all resolutions
hla_aln <- lapply(X = gene_names_uniq,
FUN = function(x) {
alns <- lapply(
X = c(2, 4, 6, 8),
FUN = function(res) {
readHlaAlignments(
gene = x,
resolution = res,
unkchar = "*"
)
}
)
aln <- do.call(rbind, alns)
aln <- aln[! duplicated(rownames(aln)), ]
return(aln)
}
)
# get aa variations for each gene
aa_variation <- list()
for (i in seq_along(gene_names_uniq)) {
x_calls <- hla_calls[, gene_names == gene_names_uniq[i], drop = FALSE]
# mark alleles w/o reference as NAs
x_calls_unlist <- unlist(x_calls)
ref_allele <- rownames(hla_aln[[i]])
mask_alleles_wo_ref <- ! x_calls_unlist %in% ref_allele
if (any(mask_alleles_wo_ref[! is.na(x_calls_unlist)], na.rm = TRUE)) {
warn(sprintf(
"Alignments for alleles %s are not available and will be omitted.",
paste(x_calls_unlist[mask_alleles_wo_ref], collapse = ", ")
))
x_calls_unlist[mask_alleles_wo_ref] <- NA
}
# check if there is possibility for variability
x_calls_uniq <- na.omit(unique(x_calls_unlist))
if (length(x_calls_uniq) <= 1) next()
# get variable aa positions
hla_aln[[i]] <- hla_aln[[i]][x_calls_uniq, ]
var_pos <- getVariableAAPos(hla_aln[[i]],
varchar = sprintf("[A-Z%s%s]",
ifelse(indels, "\\.", ""),
ifelse(unkchar, "\\*", "")
)
)
var_aln <- lapply(colnames(x_calls), function(allele) {
mask <- seq_len(nrow(hla_aln[[i]])) # NAs in character index gives oob error, so it is needed to refer to indexes
names(mask) <- rownames(hla_aln[[i]])
x <- hla_aln[[i]][mask[x_calls[, allele]], var_pos, drop = FALSE]
colnames(x) <- paste0(allele, "_", "AA_", colnames(x))
return(x)
})
var_aln <- do.call(cbind, var_aln)
ord <- as.vector(vapply(seq_along(var_pos),
function(j) {
c(j, j + length(var_pos))
},
FUN.VALUE = numeric(length = 2)
))
var_aln <- var_aln[, ord, drop = FALSE]
aa_variation[[length(aa_variation) + 1]] <- var_aln
}
if (length(aa_variation) > 1) {
aa_variation <- do.call(cbind, aa_variation)
rownames(aa_variation) <- ids
} else if (length(aa_variation) == 1) {
aa_variation <- aa_variation[[1]]
rownames(aa_variation) <- ids
} else {
aa_variation <- matrix(nrow = length(ids))
rownames(aa_variation) <- ids
}
if (as_df) {
aa_variation <- as.data.frame(aa_variation,
optional = TRUE,
stringsAsFactors = FALSE
)
aa_variation <- cbind(ID = ids, aa_variation, stringsAsFactors = FALSE)
rownames(aa_variation) <- NULL
}
return(aa_variation)
}
#' Convert HLA calls to variables
#'
#' \code{hlaToVariable} converts HLA calls data frame to additional variables.
#'
#' \code{dictionary} file should be a tsv format with header and two columns.
#' First column should hold allele numbers and second corresponding additional
#' variables. Optionally a data frame formatted in the same manner can be passed
#' instead.
#'
#' \code{dictionary} can be also used to access dictionaries shipped with the
#' package. They can be referred to by using one of the following strings:
#' \describe{
#' \item{\code{"allele_HLA_Bw"}}{
#' Translates HLA-B alleles together with A*23, A*24 and A*32 into Bw4 and
#' Bw6 allele groups. In some cases HLA alleles containing Bw4 epitope, on
#' nucleotide level actually carries a premature stop codon. Meaning that
#' although on nucleotide level the allele would encode a Bw4 epitope it's
#' not really there and it is assigned to Bw6 group. However in 4-digit
#' resolution these alleles can not be distinguished from other Bw4 groups.
#' Since alleles with premature stop codons are rare, Bw4 group is assigned.
#' }
#' \item{\code{"allele_HLA-B_only_Bw"}}{
#' Translates HLA-B alleles (without A*23, A*24 and A*32) into Bw4 and Bw6
#' allele groups.
#' }
#' \item{\code{"allele_HLA-C_C1-2"}}{
#' Translates HLA-C alleles into C1 and C2 allele groups.
#' }
#' \item{\code{"allele_HLA_supertype"}}{
#' Translates HLA-A and HLA-B alleles into supertypes, a classification that
#' group HLA alleles based on peptide binding specificities.
#' }
#' \item{\code{"allele_HLA_Ggroup"}}{
#' Translates HLA alleles into G groups, which defines amino acid identity
#' only in the exons relevant for peptide binding. Note that alleles
#' DRB1*01:01:01 and DRB1*01:16 match more than one G group, here this
#' ambiguity was removed by deleting matching with DRB5*01:01:01G group.
#' }
#' }
#'
#' \code{reduce} control if conversion should happen in a greedy way, such that
#' if some HLA number cannot be converted, it's resolution is reduced by 2 and
#' another attempt is taken. This process stops when alleles cannot be further
#' reduced or all have been successfully converted.
#'
#' @inheritParams checkHlaCallsFormat
#' @inheritParams convertAlleleToVariable
#' @param reduce Logical indicating if function should try to reduce allele
#' resolution when no matching entry in the dictionary is found. See details.
#' @param na.value Vector of length one speciyfing value for alleles with
#' no matching entry in \code{dictionary}. Default is to use \code{0}.
#' @param nacols.rm Logical indicating if result columns that contain only
#' \code{NA} should be removed.
#'
#' @return Data frame with variable number of columns. First column named
#' \code{"ID"} corresponds to \code{"ID"} column in \code{hla_calls}, further
#' columns holds converted HLA variables.
#'
#' @examples
#' hlaToVariable(MiDAS_tut_HLA, dictionary = "allele_HLA_supertype")
#'
#' @importFrom assertthat assert_that is.string see_if
#' @importFrom rlang warn
#' @export
hlaToVariable <- function(hla_calls,
dictionary,
reduce = TRUE,
na.value = 0,
nacols.rm = TRUE) {
assert_that(
checkHlaCallsFormat(hla_calls),
isTRUEorFALSE(reduce),
see_if(length(na.value) == 1, msg = "na.value length must equal 1."),
isTRUEorFALSE(nacols.rm)
)
if (is.string(dictionary)) {
lib <- listMiDASDictionaries()
if (dictionary %in% lib) {
if (dictionary %in% c("allele_HLA-B_Bw", "allele_HLA-Bw_only_B")) {
warn("In ambiguous cases Bw4 will be assigned! See 'hlaToVariable' documentation for more details.")
}
dictionary <- system.file(
"extdata",
paste0("Match_", dictionary, ".txt"),
package = "midasHLA"
)
}
}
variable <- as.data.frame(
lapply(hla_calls[, -1], convertAlleleToVariable, dictionary = dictionary),
stringsAsFactors = FALSE,
row.names = NULL
)
if (reduce) {
max_resolution <- getAlleleResolution(unlist(hla_calls[, -1]))
max_resolution <- max(max_resolution, na.rm = TRUE)
while (any(is.na(variable)) & max_resolution > 2) {
max_resolution <- max_resolution - 2
hla_calls <- reduceHlaCalls(hla_calls, resolution = max_resolution)
variable[is.na(variable)] <- convertAlleleToVariable(
allele = hla_calls[, -1][is.na(variable)],
dictionary = dictionary
)
}
}
# add dictionary prefix to column names
if (is.string(dictionary)) {
dict_prefix <- gsub(".txt$", "", gsub("^.*_", "", dictionary))
} else {
dict_prefix <- colnames(dictionary)[2] # colnames are allele, name_of_variable
}
colnames(variable) <- paste0(dict_prefix, "_", colnames(variable))
# get all na columns
j <- vapply(variable, function(x) ! all(is.na(x)), logical(length = 1))
if (nacols.rm) {
variable <- variable[, j, drop = FALSE]
}
variable <- cbind(hla_calls[, 1, drop = FALSE], variable, stringsAsFactors = FALSE)
colnames(variable) <- c("ID", colnames(variable[, -1]))
if (ncol(variable) <= 1) {
warn("HLA alleles could not be converted to any new variables.")
}
return(variable)
}
#' Reduce HLA calls resolution
#'
#' \code{reduceHlaCalls} reduces HLA calls data frame to specified resolution.
#'
#' Alleles with resolution greater than \code{resolution} or optional suffixes
#' are returned unchanged.
#'
#' @inheritParams checkHlaCallsFormat
#' @inheritParams reduceAlleleResolution
#'
#' @return HLA calls data frame reduced to specified resolution.
#'
#' @examples
#' reduceHlaCalls(MiDAS_tut_HLA, resolution = 2)
#'
#' @export
reduceHlaCalls <- function(hla_calls, resolution = 4) {
assert_that(checkHlaCallsFormat(hla_calls))
hla_calls[, -1] <- as.data.frame(
lapply(hla_calls[, -1], reduceAlleleResolution, resolution = resolution),
stringsAsFactors = FALSE
)
return(hla_calls)
}
#' Transform HLA calls to counts table
#'
#' \code{hlaCallsToCounts} converts HLA calls data frame into a counts table.
#'
#' @inheritParams checkHlaCallsFormat
#' @param check_hla_format Logical indicating if \code{hla_calls} format should
#' be checked. This is useful if one wants to use \code{hlaCallsToCounts} with
#' input not adhering to HLA nomenclature standards. See examples.
#'
#' @return HLA allele counts data frame. First column holds samples ID's, further
#' columns, corresponding to specific alleles, give information on the number
#' of their occurrences in each sample.
#'
#' @importFrom assertthat assert_that is.string
#' @importFrom qdapTools mtabulate
#'
hlaCallsToCounts <- function(hla_calls,
check_hla_format = TRUE) {
assert_that(
isTRUEorFALSE(check_hla_format),
if (check_hla_format) {
checkHlaCallsFormat(hla_calls)
} else {
TRUE
}
)
hla_counts <- hla_calls[, -1, drop = FALSE]
hla_counts <- mtabulate(as.data.frame(t(hla_counts)))
rownames(hla_counts) <- NULL
hla_counts <- cbind(ID = hla_calls[, 1, drop = FALSE],
hla_counts,
stringsAsFactors = FALSE
)
return(hla_counts)
}
#' Calculate HLA allele frequencies
#'
#' \code{getHlaFrequencies} calculates allele frequencies in HLA calls data
#' frame.
#'
#' Both gene copies are taken into consideration for frequencies calculation,
#' \code{frequency = n / (2 * j)} where \code{n} is the number of allele
#' occurrences and \code{j} is the number of samples in \code{hla_calls}.
#'
#' @inheritParams checkHlaCallsFormat
#' @inheritParams getExperimentFrequencies
#' @param compare Logical flag indicating if \code{hla_calls} frequencies
#' should be compared to reference frequencies given in \code{ref}.
#' @param ref_pop Character vector giving names of reference populations in
#' \code{ref} to compare with. Optionally vector can be named, then those
#' names will be used as population names.
#' @param ref Data frame giving reference allele frequencies. See
#' \code{\link{allele_frequencies}} for an example.
#'
#' @return Data frame with each row holding HLA allele, it's count and
#' frequency.
#'
#' @examples
#' getHlaFrequencies(MiDAS_tut_HLA)
#'
#' @importFrom assertthat assert_that
#' @importFrom dplyr left_join rename
#' @export
getHlaFrequencies <- function(hla_calls,
carrier_frequency = FALSE,
compare = FALSE,
ref_pop = c(
"USA NMDP African American pop 2",
"USA NMDP Chinese",
"USA NMDP European Caucasian",
"USA NMDP Hispanic South or Central American",
"USA NMDP Japanese",
"USA NMDP North American Amerindian",
"USA NMDP South Asian Indian"
),
ref = allele_frequencies) {
assert_that(
checkHlaCallsFormat(hla_calls),
isTRUEorFALSE(compare),
is.data.frame(ref),
colnamesMatches(ref, c("var", "population", "frequency")),
is.character(ref_pop),
characterMatches(ref_pop, unique(ref$population))
)
if (compare) {
ref <- getReferenceFrequencies(ref, ref_pop, carrier_frequency)
} else {
ref <- NULL
}
hla_counts <- hlaCallsToCounts(hla_calls, check_hla_format = FALSE)
counts_mat <- dfToExperimentMat(hla_counts)
allele_freq <-
getExperimentFrequencies(
experiment = counts_mat,
pop_mul = 2,
carrier_frequency = carrier_frequency,
ref = ref
)
allele_freq <- rename(allele_freq, "allele" = "term")
return(allele_freq)
}
#' Calculate KIR genes frequencies
#'
#' \code{getKIRFrequencies} calculates KIR genes frequencies in KIR calls data
#' frame.
#'
#' @inheritParams checkKirCallsFormat
#'
#' @return Data frame with each row holding KIR gene, it's count and
#' frequency.
#'
#' @examples
#' getKIRFrequencies(MiDAS_tut_KIR)
#'
#' @importFrom assertthat assert_that
#' @importFrom dplyr left_join
#' @export
getKIRFrequencies <- function(kir_calls) {
assert_that(
checkKirCallsFormat(kir_calls)
)
counts_mat <- dfToExperimentMat(kir_calls)
kir_freq <-
getExperimentFrequencies(
experiment = counts_mat,
pop_mul = 1,
carrier_frequency = FALSE,
ref = NULL
)
kir_freq <- rename(kir_freq, "gene" = "term")
rownames(kir_freq) <- NULL
return(kir_freq)
}
#' Transform amino acid variation data frame into counts table
#'
#' \code{aaVariationToCounts} convert amino acid variation data frame into
#' counts table.
#'
#' @param aa_variation Amino acid variation data frame as returned by
#' \link{hlaToAAVariation}.
#'
#' @return Amino acid counts data frame. First column holds samples ID's,
#' further columns, corresponding to specific amino acid positions, give
#' information on the number of their occurrences in each sample.
#'
#' @importFrom assertthat assert_that is.string
#' @importFrom qdapTools mtabulate
#' @importFrom stats na.omit
#'
aaVariationToCounts <- function(aa_variation) {
assert_that(
is.data.frame(aa_variation),
see_if(colnames(aa_variation)[1] == "ID",
msg = "first column of aa_variation must be named ID"
)
)
ids <- aa_variation[, 1]
aa_counts <- aa_variation[, -1]
aa_ids <- colnames(aa_variation[, -1])
aa_ids <- gsub("_[12]_AA", "", aa_ids)
aa_counts <- lapply(seq_len(ncol(aa_counts)),
function(i) {
x <- paste(aa_ids[i], aa_counts[, i], sep = "_")
x[is.na(aa_counts[, i])] <- NA
return(x)
}
)
ord <- na.omit(unique(unlist(aa_counts)))
aa_counts <- do.call(rbind, aa_counts)
aa_counts <- mtabulate(as.data.frame(aa_counts, stringsAsFactors = FALSE))
rownames(aa_counts) <- NULL
aa_counts <- aa_counts[, ord]
aa_counts <- cbind(ID = aa_variation[, 1, drop = FALSE],
aa_counts,
stringsAsFactors = FALSE
)
return(aa_counts)
}
#' Calculate amino acid frequencies
#'
#' \code{getAAFrequencies} calculates amino acid frequencies in amino acid
#' data frame.
#'
#' Both gene copies are taken into consideration for frequencies calculation,
#' \code{frequency = n / (2 * j)} where \code{n} is the number of amino acid
#' occurrences and \code{j} is the number of samples in \code{aa_variation}.
#'
#' @inheritParams aaVariationToCounts
#'
#' @return Data frame with each row holding specific amino acid position, it's
#' count and frequency.
#'
#' @examples
#' aa_variation <- hlaToAAVariation(MiDAS_tut_HLA)
#' getAAFrequencies(aa_variation)
#'
#' @importFrom assertthat assert_that
#' @export
getAAFrequencies <- function(aa_variation) {
assert_that(
is.data.frame(aa_variation),
see_if(colnames(aa_variation)[1] == "ID",
msg = "first column of aa_variation must be named ID"
)
)
var_counts <- aaVariationToCounts(aa_variation)
counts_mat <- dfToExperimentMat(var_counts)
aa_freq <-
getExperimentFrequencies(
experiment = counts_mat,
pop_mul = 2,
carrier_frequency = FALSE,
ref = NULL
)
aa_freq <- rename(aa_freq, "aa_pos" = "term")
return(aa_freq)
}
#' Pretty format statistical analysis results helper
#'
#' \code{formatResults} format statistical analysis results table to html or
#' latex format.
#'
#' @param results Tibble as returned by \code{\link{runMiDAS}}.
#' @param filter_by Character vector specifying conditional expression used to
#' filter \code{results}, this is equivalent to \code{...} argument passed to
#' \code{\link[dplyr]{filter}}.
#' @param arrange_by Character vector specifying variable names to use for
#' sorting. Equivalent to \code{...} argument passed to
#' \code{\link[dplyr]{arrange}}.
#' @param select_cols Character vector specifying variable names that should be
#' included in the output table. Can be also used to rename selected
#' variables, see examples.
#' @param format String \code{"latex"} or \code{"html"}.
#' @param header String specifying header for result table. If \code{NULL} no header is added.
#' @param scroll_box_height A character string indicating the height of the table.
#'
#' @return Character vector of formatted table source code.
#'
#' @examples
#' \dontrun{
#' midas <- prepareMiDAS(hla_calls = MiDAS_tut_HLA,
#' colData = MiDAS_tut_pheno,
#' experiment = "hla_alleles")
#' object <- lm(disease ~ term, data = midas)
#' res <- runMiDAS(object,
#' experiment = "hla_alleles",
#' inheritance_model = "dominant")
#' formatResults(res,
#' filter_by = c("p.value <= 0.05", "estimate > 0"),
#' arrange_by = c("p.value * estimate"),
#' select_cols = c("allele", "p-value" = "p.value"),
#' format = "html",
#' header = "HLA allelic associations")
#' }
#'
#' @importFrom assertthat assert_that
#' @importFrom dplyr arrange filter select
#' @importFrom knitr kable
#' @importFrom kableExtra add_header_above kable_styling scroll_box
#' @importFrom magrittr %>% %<>%
#' @importFrom stats setNames
#' @importFrom rlang parse_exprs .data
#'
formatResults <- function(results,
filter_by = "p.value <= 0.05",
arrange_by = "p.value",
select_cols = c("term", "estimate", "std.error", "p.value", "p.adjusted"),
format = c("html", "latex"),
header = NULL,
scroll_box_height = "400px"
) {
assert_that(
is.character(filter_by),
is.character(arrange_by),
is.character(select_cols),
is.string(format),
stringMatches(format, choice = c("html", "latex")),
isStringOrNULL(header),
is.string(scroll_box_height)
)
filter_by <- parse_exprs(filter_by)
arrange_by <- parse_exprs(arrange_by)
results %<>%
filter(!!! filter_by) %>%
arrange(!!! arrange_by) %>%
select(select_cols)
if (format == "html" & isTRUE(getOption("knitr.in.progress"))) {
results <-
rapply(
results,
f = gsub,
classes = "character",
how = "replace",
pattern = "(\\*)",
replacement = "\\\\\\1"
)
}
if (! (is.null(header) & format == "html")) {
header <- setNames(ncol(results), header) # Still if format is 'latex' and header = NULL the result is not visualy appealing, and without it gives error. Issue created on github: https://github.com/haozhu233/kableExtra/issues/387
}
results %<>%
kable(format = format, format.args = list(digits = 4, scientific = -3)) %>%
add_header_above(header = header)
if (format == "html") {
results %<>%
kable_styling(bootstrap_options = c("striped", "hover", "condensed")) %>%
scroll_box(width = "100%", height = scroll_box_height)
}
return(results)
}
#' Create association analysis results table in HTML or LaTeX
#'
#' \code{kableResults} convert results table (\code{\link{runMiDAS}} output) to
#' HTML or LaTeX format.
#'
#' @inheritParams formatResults
#' @param colnames Character vector of form \code{c("new_name" = "old_name")},
#' used to rename \code{results} colnames.
#' @param header String specifying results table header.
#' @param pvalue_cutoff Number specifying p-value cutoff for results to be
#' included in output. If \code{NULL} no filtering is done.
#'
#' @return Association analysis results table in HTML or LaTeX.
#'
#' @examples
#' midas <- prepareMiDAS(hla_calls = MiDAS_tut_HLA,
#' colData = MiDAS_tut_pheno,
#' experiment = "hla_alleles")
#' object <- lm(disease ~ term, data = midas)
#' res <- runMiDAS(object, experiment = "hla_alleles", inheritance_model = "additive")
#' kableResults(results = res,
#' colnames = c("HLA allele" = "allele"))
#'
#' @importFrom assertthat assert_that is.number is.string see_if
#' @importFrom dplyr ends_with mutate_at vars
#' @importFrom magrittr %>% %<>%
#' @importFrom rlang has_name list2 parse_expr warn !! :=
#' @export
kableResults <- function(results,
colnames = NULL,
header = "MiDAS analysis results",
pvalue_cutoff = NULL,
format = getOption("knitr.table.format"),
scroll_box_height = "400px") {
assert_that(
is.data.frame(results),
isCharacterOrNULL(colnames),
isNumberOrNULL(pvalue_cutoff),
is.string(format),
stringMatches(format, choice = c("html", "latex")),
is.string(scroll_box_height)
)
if (! is.null(colnames)) {
assert_that(
characterMatches(colnames, choice = colnames(results))
)
}
filter_by <- ifelse(
test = is.null(pvalue_cutoff),
yes = "p.value <= 1",
no = sprintf("p.value < %f", pvalue_cutoff)
)
# create rename vector
select_cols <- colnames(results)
names(select_cols) <- select_cols
i <- na.omit(match(x = select_cols, table = colnames))
names(select_cols)[i] <- names(colnames)
# replace .percent with %
names(select_cols) <- gsub(".percent", " [%]", names(select_cols))
results %<>%
formatResults(
filter_by = filter_by,
arrange_by = "p.value",
select_cols = select_cols,
format = format,
header = header,
scroll_box_height = scroll_box_height
)
return(results)
}
#' Convert counts table to variables
#'
#' \code{countsToVariables} converts counts table to additional variables.
#'
#' \code{dictionary} file should be a tsv format with header and two columns.
#' First column should be named \code{"Name"} and hold variable name, second
#' should be named \code{"Expression"} and hold expression used to identify
#' variable (eg. \code{"KIR2DL3 & ! KIR2DL2"} will match all samples with
#' \code{KIR2DL3} and without \code{KIR2DL2}). Optionally a data frame formatted
#' in the same manner can be passed instead.
#'
#' Dictionaries shipped with the package:
#' \describe{
#' \item{\code{kir_haplotypes}}{
#' KIR genes to KIR haplotypes dictionary.
#' }
#' }
#'
#' @inheritParams hlaToVariable
#' @param counts Data frame with counts, such as returned by
#' \code{\link{hlaCallsToCounts}} function. First column should contain
#' samples IDs, following columns should contain counts (natural numbers
#' including zero).
#' @param dictionary Path to file containing variables dictionary or data
#' frame. See details for further explanations.
#' @param na.value Vector of length one speciyfing value for variables with no
#' matching entry in \code{dictionary}. Default is to use \code{0}.
#'
#' @return Data frame with variable number of columns. First column named
#' \code{"ID"} corresponds to \code{"ID"} column in \code{counts}, further
#' columns hold indicators for converted variables. \code{1} and \code{0}
#' code presence and absence of a variable respectively.
#'
#' @examples
#' countsToVariables(MiDAS_tut_KIR, "kir_haplotypes")
#'
#' @importFrom assertthat assert_that is.string
#' @importFrom rlang parse_exprs
#' @export
countsToVariables <- function(counts,
dictionary,
na.value = NA,
nacols.rm = TRUE) {
assert_that(
checkColDataFormat(counts),
see_if(length(na.value) == 1, msg = "na.value length must equal 1."),
isTRUEorFALSE(nacols.rm)
)
if (is.string(dictionary)) {
pattern <- paste0("counts_", dictionary)
dict_path <- listMiDASDictionaries(pattern = pattern, file.names = TRUE)
if(length(dict_path) == 0) {
dict_path <- dictionary
}
assert_that(is.readable(dict_path))
dictionary <- read.table(
file = dict_path,
header = TRUE,
sep = "\t",
quote = "",
stringsAsFactors = FALSE
)
}
assert_that(
is.data.frame(dictionary),
colnamesMatches(x = dictionary, cols = c("Name", "Expression"))
)
expressions <- dictionary[, "Expression", drop = TRUE]
expressions <- parse_exprs(expressions)
variables <- lapply(
X = expressions,
FUN = function(expr) {
vars <- all.vars(expr)
if (all(has_name(counts, vars))) {
cl <- do.call(
what = substitute,
args = list(expr = expr, env = counts[, vars])
)
test <- eval(cl)
} else {
test <- rep(NA, nrow(counts))
}
test <- as.integer(test)
return(test)
}
)
res <- do.call(cbind, variables)
colnames(res) <- dictionary[, "Name", drop = TRUE]
# add ID column
res <- cbind(counts[, 1, drop = FALSE], res)
if (nacols.rm) {
mask_na <- vapply(res, function(x) ! all(is.na(x)), logical(length = 1))
res <- res[, mask_na, drop = FALSE]
}
return(res)
}
#' Get HLA - KIR interactions
#'
#' \code{getHlaKirInteractions} calculate presence-absence matrix of HLA - KIR
#' interactions.
#'
#' \code{hla_calls} are first reduced to all possible resolutions and converted
#' to additional variables, such as G groups, using dictionaries shipped with
#' the package.
#'
#' \code{interactions_dict} file should be a tsv format with header and two
#' columns. First column should be named \code{"Name"} and hold interactions
#' names, second should be named \code{"Expression"} and hold expression used to
#' identify interaction (eg. \code{"C2 & KIR2DL1"} will match all samples
#' with \code{C2} and \code{KIR2DL1}). The package is shipped with an interactions
#' file based on \href{https://www.ncbi.nlm.nih.gov/pmc/articles/PMC6558367/}{Pende et al., 2019.}
#'
#' @inheritParams checkHlaCallsFormat
#' @inheritParams checkKirCallsFormat
#' @param interactions_dict Path to HLA - KIR interactions dictionary.
#'
#' @return Data frame with variable number of columns. First column named
#' \code{"ID"} corresponds to \code{"ID"} column in \code{counts}, further
#' columns hold indicators for HLA - KIR interactions. \code{1} and \code{0}
#' code presence and absence of a variable respectively.
#'
#' @examples
#' getHlaKirInteractions(
#' hla_calls = MiDAS_tut_HLA,
#' kir_calls = MiDAS_tut_KIR,
#' interactions_dict = system.file(
#' "extdata", "Match_counts_hla_kir_interactions.txt",
#' package = "midasHLA")
#' )
#'
#' @importFrom assertthat assert_that is.string
#' @importFrom dplyr left_join
#' @importFrom magrittr %>%
#' @importFrom rlang warn
#' @importFrom stringi stri_detect_regex
#' @export
getHlaKirInteractions <- function(hla_calls,
kir_calls,
interactions_dict = system.file("extdata", "Match_counts_hla_kir_interactions.txt", package = "midasHLA")) {
assert_that(
checkHlaCallsFormat(hla_calls),
checkKirCallsFormat(kir_calls),
is.string(interactions_dict)
)
id_matches <- hla_calls[, 1, drop = TRUE] %in% kir_calls[, 1, drop = TRUE] %>%
sum()
assert_that(id_matches > 0,
msg = "IDs in hla_calls doesn't match IDs in kir_calls"
)
if (nrow(hla_calls) != id_matches) {
msg <- sprintf("%i IDs in hla_calls matched IDs in kir_calls", id_matches)
warn(msg)
}
# transform hla_calls to all possible variables and resolutions
## in practice only subset of variables could be used but this should be more time proof
midas_dicts <- listMiDASDictionaries(pattern = "allele_") %>%
grep(pattern = "expression", value = TRUE, invert = TRUE)
hla_variables <- Reduce(
f = function(x, y) {
left_join(x, hlaToVariable(hla_calls, dictionary = y), by = "ID")
},
x = midas_dicts,
init = hla_calls
)
hla_max_resolution <- hla_calls[, -1] %>%
unlist() %>%
getAlleleResolution() %>%
max(na.rm = TRUE)
while (hla_max_resolution > 2) {
hla_variables <- hla_calls %>%
reduceHlaCalls(resolution = hla_max_resolution - 2) %>%
left_join(x = hla_variables, by = "ID")
hla_max_resolution <- hla_max_resolution - 2
}
hla_counts <- hlaCallsToCounts(hla_variables, check_hla_format = FALSE)
hla_counts[, -1] <- ceiling(hla_counts[, -1] / 2) # reduce to presence / absence indicators
counts <- left_join(hla_counts, kir_calls, by = "ID")
interactions <- countsToVariables(counts, dictionary = interactions_dict)
return(interactions)
}
#' Filter experiment by frequency
#'
#' Helper function for experiments filtering
#'
#' @inheritParams getExperimentFrequencies
#' @param lower_frequency_cutoff Positive number or \code{NULL}. Numbers greater
#' than 1 are interpreted as number of feature occurrences, numbers between 0
#' and 1 as fractions.
#' @param upper_frequency_cutoff Positive number or \code{NULL}. Numbers greater
#' than 1 are interpreted as number of feature occurrences, numbers between 0
#' and 1 as fractions.
#'
#' @return Filtered experiment matrix.
#'
#' @importFrom assertthat assert_that see_if is.number is.string
#' @importFrom magrittr %>%
#'
filterExperimentByFrequency <- function(experiment,
carrier_frequency = FALSE,
lower_frequency_cutoff = NULL,
upper_frequency_cutoff = NULL) {
UseMethod("filterExperimentByFrequency", experiment)
}
#' @rdname filterExperimentByFrequency
#' @method filterExperimentByFrequency matrix
#'
filterExperimentByFrequency.matrix <- function(experiment,
carrier_frequency = FALSE,
lower_frequency_cutoff = NULL,
upper_frequency_cutoff = NULL) {
inheritance_model_choice <- eval(formals()[["inheritance_model"]])
assert_that(
see_if(
! is.null(getExperimentPopulationMultiplicator(experiment)), # if pop_mul is not set frequency cannot be calculated
msg = "Frequency filtration does not support provided experiment."
),
isTRUEorFALSE(carrier_frequency),
validateFrequencyCutoffs(lower_frequency_cutoff, upper_frequency_cutoff)
)
filtered_vars <- getExperimentFrequencies(
experiment = experiment,
carrier_frequency = carrier_frequency
) %>%
getFrequencyMask(lower_frequency_cutoff = lower_frequency_cutoff,
upper_frequency_cutoff = upper_frequency_cutoff)
mask <- rownames(experiment) %in% filtered_vars
experiment <- experiment[mask, , drop = FALSE]
return(experiment)
}
#' @rdname filterExperimentByFrequency
#' @method filterExperimentByFrequency SummarizedExperiment
#'
filterExperimentByFrequency.SummarizedExperiment <-
function(experiment,
carrier_frequency = FALSE,
lower_frequency_cutoff = NULL,
upper_frequency_cutoff = NULL) {
inheritance_model_choice <- eval(formals()[["inheritance_model"]])
assert_that(
see_if(
! is.null(getExperimentPopulationMultiplicator(experiment)), # if pop_mul is not set frequency cannot be calculated
msg = "Frequency filtration does not support provided experiment."
),
isTRUEorFALSE(carrier_frequency),
validateFrequencyCutoffs(lower_frequency_cutoff, upper_frequency_cutoff)
)
filtered_vars <- getExperimentFrequencies(
experiment = experiment,
carrier_frequency = carrier_frequency
) %>%
getFrequencyMask(lower_frequency_cutoff = lower_frequency_cutoff,
upper_frequency_cutoff = upper_frequency_cutoff)
mask <- rownames(experiment) %in% filtered_vars
new_experiment <- experiment[mask, , drop = FALSE]
# omnibus groubs are static the proper filtering takes place in getOmnibusGroups
# og <- S4Vectors::metadata(experiment)$omnibus_groups
# if (! is.null(og)) {
# new_og <- filterListByElements(list = og, elements = filtered_vars)
# metadata(new_experiment)$omnibus_groups <- new_og
# }
return(new_experiment)
}
#' Calculate experiment's features frequencies
#'
#' \code{getExperimentFrequencies} calculate features frequencies.
#'
#' @param experiment Matrix or SummarizedExperiment object.
#' @param pop_mul Number by which number of samples should be multiplied to get
#' the population size.
#' @param carrier_frequency Logical flag indicating if carrier frequency should
#' be returned.
#' @param ref Wide format data frame with first column named "var" holding
#' features matching \code{experiment} and specific populations frequencies in
#' following columns. See \code{\link{getReferenceFrequencies}} for more
#' details.
#'
#' @return Data frame with each row holding specific variable, it's count and
#' frequency.
#'
#' @importFrom assertthat assert_that is.string see_if
#' @importFrom SummarizedExperiment assay
#'
getExperimentFrequencies <-
function(experiment,
pop_mul = NULL,
carrier_frequency = FALSE,
ref = NULL) {
UseMethod("getExperimentFrequencies", experiment)
}
#' @rdname getExperimentFrequencies
#' @method getExperimentFrequencies matrix
#'
getExperimentFrequencies.matrix <-
function(experiment,
pop_mul = NULL,
carrier_frequency = FALSE,
ref = NULL) {
assert_that(
isCountsOrZeros(experiment),
is.number(pop_mul),
isTRUEorFALSE(carrier_frequency)
)
if (! is.null(ref)) {
assert_that(is.data.frame(ref))
}
if (carrier_frequency) {
experiment <- applyInheritanceModel(experiment, "dominant")
pop_mul <- 1 # carrier frequency does not take account of gene copies
}
counts_sums <- rowSums(experiment, na.rm = TRUE)
denom <- apply(experiment, 1, function(row) sum(!is.na(row)))
allele_freq <- counts_sums / (pop_mul * denom) # remove NA's patients, vectorize the denominator to allow differing number of patients
counts_df <- data.frame(
term = rownames(experiment),
Counts = counts_sums,
Freq = allele_freq,
row.names = NULL,
stringsAsFactors = FALSE
)
if (! is.null(ref)) {
counts_df <-
left_join(counts_df, ref, by = c("term" = "var"))
}
# # format frequencies as percent
# counts_df[, -c(1, 2)] <-
# rapply(
# object = counts_df[, -c(1, 2), drop = FALSE],
# f = function(col) percent(col),
# how = "replace"
# )
return(counts_df)
}
#' @rdname getExperimentFrequencies
#' @method getExperimentFrequencies SummarizedExperiment
#'
getExperimentFrequencies.SummarizedExperiment <-
function(experiment,
pop_mul = NULL,
carrier_frequency = FALSE,
ref = NULL) {
assert_that(
isNumberOrNULL(pop_mul),
isTRUEorFALSE(carrier_frequency)
)
if (! is.null(ref)) {
assert_that(is.data.frame(ref))
}
counts <- assay(experiment)
pop_mul <- getExperimentPopulationMultiplicator(experiment)
getExperimentFrequencies(experiment = counts,
pop_mul = pop_mul,
carrier_frequency = carrier_frequency,
ref = ref
)
}
#' Apply inheritance model
#'
#' Helper function transforming experiment counts to selected
#' \code{inheritance_model}.
#'
#' Under \code{"dominant"} model homozygotes and heterozygotes are coded as
#' \code{1}. In \code{"recessive"} model homozygotes are coded as \code{1} and
#' other as \code{0}. In \code{"additive"} model homozygotes are coded as
#' \code{2} and heterozygotes as \code{1}. In \code{"overdominant"} homozygotes
#' (both \code{0} and \code{2}) are coded as \code{0} and heterozygotes as \code{1}.
#'
#' @param experiment Matrix or SummarizedExperiment object.
#' @param inheritance_model String specifying inheritance model to use.
#' Available choices are \code{"dominant"}, \code{"recessive"},
#' \code{"additive"}.
#'
#' @return \code{experiment} converted to specified inheritance model.
#'
applyInheritanceModel <-
function(experiment,
inheritance_model = c("dominant", "recessive", "additive", "overdominant")) {
UseMethod("applyInheritanceModel", experiment)
}
#' @rdname applyInheritanceModel
#' @method applyInheritanceModel matrix
#'
applyInheritanceModel.matrix <- function(experiment,
inheritance_model = c("dominant", "recessive", "additive", "overdominant")) {
.classifyGte <- function(x, val) {
# classifies vector as being greater than number
# specificly for 1 we classify as dominat, and 2 as recessive
x <- x >= val
mode(x) <- "integer"
x
}
.classifyEq <- function(x, val) {
# classify vector as being equal to number
# specificly for 1 we classify as overdominant
x <- x == val
mode(x) <- "integer"
x
}
switch (
inheritance_model,
"additive" = experiment,
"dominant" = .classifyGte(experiment, 1), # ifelse(x >= 1, 1, 0)
"recessive" = .classifyGte(experiment, 2), # ifelse(x >= 2, 1, 0)
"overdominant" = .classifyEq(experiment, 1) # ifelse(x == 1, 1, 0)
)
}
#' @rdname applyInheritanceModel
#' @method applyInheritanceModel SummarizedExperiment
#'
applyInheritanceModel.SummarizedExperiment <- function(experiment,
inheritance_model = c("dominant", "recessive", "additive", "overdominant")) {
SummarizedExperiment::assay(experiment) <-
applyInheritanceModel(SummarizedExperiment::assay(experiment), inheritance_model)
return(experiment)
}
#' Helper function for filtering frequency data frame
#'
#' @inheritParams filterExperimentByFrequency
#' @param df Data frame as returned by \code{getExperimentFrequencies}.
#'
#' @return Character vector containing names of variables after filtration.
#'
#' @importFrom dplyr filter
#' @importFrom magrittr %>%
#'
getFrequencyMask <- function(df,
lower_frequency_cutoff = NULL,
upper_frequency_cutoff = NULL) {
lower_frequency_cutoff <- ifelse(is.null(lower_frequency_cutoff), -Inf, lower_frequency_cutoff)
upper_frequency_cutoff <- ifelse(is.null(upper_frequency_cutoff), Inf, upper_frequency_cutoff)
freqs_are_float <- lower_frequency_cutoff <= 1 || upper_frequency_cutoff <= 1
variables_freq <- df %>%
filter(.data$Counts > lower_frequency_cutoff |
freqs_are_float) %>%
filter(.data$Freq > lower_frequency_cutoff |
! freqs_are_float) %>%
filter(.data$Counts < upper_frequency_cutoff |
freqs_are_float) %>%
filter(.data$Freq < upper_frequency_cutoff |
! freqs_are_float)
filtered_vars <- variables_freq$term
return(filtered_vars)
}
#' Filter experiment by variable
#'
#' Helper function for experiments filtering
#'
#' @param experiment Matrix or SummarizedExperiment object.
#' @param variables Character vector specifying features to choose.
#'
#' @return Filtered \code{experiment} object.
#'
filterExperimentByVariables <-
function(experiment, variables) {
UseMethod("filterExperimentByVariables", experiment)
}
#' @rdname filterExperimentByVariables
#' @method filterExperimentByVariables matrix
#'
filterExperimentByVariables.matrix <- function(experiment, variables) {
return(experiment[variables, ])
}
#' @rdname filterExperimentByVariables
#' @method filterExperimentByVariables SummarizedExperiment
#'
filterExperimentByVariables.SummarizedExperiment <- function(experiment, variables) {
og <- S4Vectors::metadata(experiment)$omnibus_groups
experiment <- experiment[variables, ]
# omnibus groubs are static the proper filtering takes place in getOmnibusGroups
# if (! is.null(og)) {
# og <- filterListByElements(list = og, elements = variables)
# S4Vectors::metadata(experiment)$omnibus_groups <- og
# }
return(experiment)
}
#' Get experiment's population multiplicator
#'
#' \code{getExperimentPopulationMultiplicator} extracts population multiplicator
#' from experiment's metadata.
#'
#' @param experiment Matrix or SummarizedExperiment object.
#'
#' @return Experiment's population multiplicator number.
#'
#' @importFrom S4Vectors metadata
#'
getExperimentPopulationMultiplicator <-
function(experiment) {
UseMethod("getExperimentPopulationMultiplicator", experiment)
}
#' @rdname getExperimentPopulationMultiplicator
#' @method getExperimentPopulationMultiplicator matrix
#'
getExperimentPopulationMultiplicator.matrix <- function(experiment) return(NULL)
#' @rdname getExperimentPopulationMultiplicator
#' @method getExperimentPopulationMultiplicator SummarizedExperiment
#'
getExperimentPopulationMultiplicator.SummarizedExperiment <-
function(experiment) {
pop_mul <- S4Vectors::metadata(experiment)[["pop_mul"]]
return(pop_mul)
}
#' Check if experiment is inheritance model applicable
#'
#' \code{isExperimentInheritanceModelApplicable} check experiment's metadata
#' for presence of \code{"inheritance_model_applicable"} flag, indicating if
#' inheritance model can be applied.
#'
#' @param experiment Matrix or SummarizedExperiment object.
#'
#' @return Logical flag.
#'
#' @importFrom S4Vectors metadata
#'
isExperimentInheritanceModelApplicable <-
function(experiment) {
UseMethod("isExperimentInheritanceModelApplicable", experiment)
}
#' @rdname isExperimentInheritanceModelApplicable
#' @method isExperimentInheritanceModelApplicable matrix
#'
isExperimentInheritanceModelApplicable.matrix <- function(experiment) {
return(FALSE)
}
#' @rdname isExperimentInheritanceModelApplicable
#' @method isExperimentInheritanceModelApplicable SummarizedExperiment
#'
isExperimentInheritanceModelApplicable.SummarizedExperiment <-
function(experiment) {
inheritance_model_applicable <-
metadata(experiment)[["inheritance_model_applicable"]]
return(inheritance_model_applicable)
}
|
60194484f20c24b1603d3c0108b28ef620aa2f20
|
43cfca848a3f19cd5806951e3f740e2ba1edfb2d
|
/man/sim.autocorrelated.Rd
|
1239b291842413329f2ee5e76b9a1ee6647977a1
|
[] |
no_license
|
quanteco/quanteco-tools
|
9176018da4d5422c1b3f762e0920cd35592c7cdf
|
dbd0d852e73939ed229f91e447f5f3a4e449e1cb
|
refs/heads/master
| 2021-01-15T22:13:49.434241
| 2015-01-29T22:30:44
| 2015-01-29T22:30:44
| 29,038,815
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 467
|
rd
|
sim.autocorrelated.Rd
|
% Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/simautocor.R
\name{sim.autocorrelated}
\alias{sim.autocorrelated}
\title{Simulate Autocorrelated Data}
\usage{
sim.autocorrelated(X, rho, W)
}
\arguments{
\item{X}{a vector of values}
\item{rho}{stength of correlation}
\item{W}{a neighbohood matrix}
}
\description{
Simulates autocorrelated data given a vector (\strong{X}), \eqn{\rho}, and a neighborhood matrix (\strong{W})
}
|
0dce9b38f666041cd824435c0cbc064115ade76e
|
3176c7f008fabb7a406241149808fc2f7cacec4d
|
/man/antiExactMatch.Rd
|
b3a5cb397143bc71dcaefbeff796153a289b2e15
|
[
"MIT"
] |
permissive
|
markmfredrickson/optmatch
|
712f451829c128c4f1fd433e2d527bb7160e8fcc
|
51e0b03a30420179149be254262d7d6414f3d708
|
refs/heads/master
| 2023-05-31T05:11:01.917928
| 2023-04-06T13:18:58
| 2023-04-06T13:18:58
| 1,839,323
| 37
| 18
|
NOASSERTION
| 2023-01-26T18:45:56
| 2011-06-02T20:49:32
|
R
|
UTF-8
|
R
| false
| true
| 2,088
|
rd
|
antiExactMatch.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/exactMatch.R
\name{antiExactMatch}
\alias{antiExactMatch}
\title{Specify a matching problem where units in a common factor cannot be matched.}
\usage{
antiExactMatch(x, z)
}
\arguments{
\item{x}{A factor across which matches should be allowed.}
\item{z}{A logical or binary vector the same length as \code{x}
indicating treatment and control for each unit in the study.
TRUE or 1 represents a treatment unit, FALSE of 0 represents
a control unit. NA units are excluded.}
}
\value{
A distance specification that encodes the across factor level constraint.
}
\description{
This function builds a distance specification where treated units
are infinitely far away from control units that share the same
level of a given factor variable. This can be useful for ensuring
that matched groups come from qualitatively different groups.
}
\details{
The \code{\link{exactMatch}} function provides a way of specifying
a matching problem where only units within a factor level may be
matched. This function provides the reverse scenario: a matching
problem in which only units across factor levels are permitted to
match. Like \code{\link{exactMatch}}, the results of this function will
most often be used as a \code{within} argument to
\code{\link{match_on}} or another distance specification creation
function to limit the scope of the final distance specification
(i.e., disallowing any match between units with the same value on
the factor variable \code{x}).
}
\examples{
data(nuclearplants)
# force entries to be within the same factor:
em <- fullmatch(exactMatch(pr ~ pt, data = nuclearplants), data = nuclearplants)
table(nuclearplants$pt, em)
# force treated and control units to have different values of `pt`:
z <- nuclearplants$pr
names(z) <- rownames(nuclearplants)
aem <- fullmatch(antiExactMatch(nuclearplants$pt, z), data = nuclearplants)
table(nuclearplants$pt, aem)
}
\seealso{
\code{\link{exactMatch}}, \code{\link{match_on}}, \code{\link{caliper}}, \code{\link{fullmatch}}, \code{\link{pairmatch}}
}
|
0c2c54d4a34e54b874d69727aee4dd79c012419e
|
86049b2ad11ffb9fcda331389526320e766c5691
|
/man/predict.cv.oem.Rd
|
0016692a237fbefd38dceebfec913f15376fa78e
|
[] |
no_license
|
jaredhuling/oem
|
a50aa706d879496f60da35acdafbfaf70c1d0fef
|
a854dc18e66d520d83c100b27bf96c71cb1bab75
|
refs/heads/master
| 2023-07-21T06:14:56.863833
| 2022-10-12T14:07:20
| 2022-10-12T14:07:20
| 54,896,754
| 22
| 7
| null | 2021-07-07T03:17:40
| 2016-03-28T14:09:52
|
C++
|
UTF-8
|
R
| false
| true
| 2,541
|
rd
|
predict.cv.oem.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methods.R
\name{predict.cv.oem}
\alias{predict.cv.oem}
\title{Prediction function for fitted cross validation oem objects}
\usage{
\method{predict}{cv.oem}(
object,
newx,
which.model = "best.model",
s = c("lambda.min", "lambda.1se"),
...
)
}
\arguments{
\item{object}{fitted \code{"cv.oem"} model object}
\item{newx}{Matrix of new values for \code{x} at which predictions are to be made. Must be a matrix; can be sparse as in the
\code{CsparseMatrix} objects of the \pkg{Matrix} package
This argument is not used for \code{type = c("coefficients","nonzero")}}
\item{which.model}{If multiple penalties are fit and returned in the same \code{oem} object, the \code{which.model} argument is used to
specify which model to make predictions for. For example, if the oem object \code{"oemobj"} was fit with argument
\code{penalty = c("lasso", "grp.lasso")}, then \code{which.model = 2} provides predictions for the group lasso model. For
\code{predict.cv.oem()}, can specify
\code{"best.model"} to use the best model as estimated by cross-validation}
\item{s}{Value(s) of the penalty parameter lambda at which predictions are required. Default is the entire sequence used to create
the model. For \code{predict.cv.oem()}, can also specify \code{"lambda.1se"} or \code{"lambda.min"} for best lambdas estimated by cross validation}
\item{...}{used to pass the other arguments for predict.oem}
}
\value{
An object depending on the type argument
}
\description{
Prediction function for fitted cross validation oem objects
}
\examples{
set.seed(123)
n.obs <- 1e4
n.vars <- 100
n.obs.test <- 1e3
true.beta <- c(runif(15, -0.5, 0.5), rep(0, n.vars - 15))
x <- matrix(rnorm(n.obs * n.vars), n.obs, n.vars)
y <- rnorm(n.obs, sd = 3) + x \%*\% true.beta
x.test <- matrix(rnorm(n.obs.test * n.vars), n.obs.test, n.vars)
y.test <- rnorm(n.obs.test, sd = 3) + x.test \%*\% true.beta
fit <- cv.oem(x = x, y = y,
penalty = c("lasso", "grp.lasso"),
groups = rep(1:10, each = 10),
nlambda = 10)
preds.best <- predict(fit, newx = x.test, type = "response", which.model = "best.model")
apply(preds.best, 2, function(x) mean((y.test - x) ^ 2))
preds.gl <- predict(fit, newx = x.test, type = "response", which.model = "grp.lasso")
apply(preds.gl, 2, function(x) mean((y.test - x) ^ 2))
preds.l <- predict(fit, newx = x.test, type = "response", which.model = 1)
apply(preds.l, 2, function(x) mean((y.test - x) ^ 2))
}
|
86f224547151112e0cd69da4497aa1b4454d8672
|
c739fa35644593d9ba7b8087d32feebe2b5a25f4
|
/R/fp_border.R
|
bc7ff13c55509aa5b4adb4232199cd996e48f72d
|
[] |
no_license
|
Kill3rbee/officer
|
b813b43489441aea4d377db76fb95a8203be4171
|
cb88985b8a0c633282dce99c267380b32820e959
|
refs/heads/master
| 2020-06-27T10:19:57.971719
| 2019-07-25T11:07:39
| 2019-07-25T11:07:39
| 199,925,311
| 1
| 0
| null | 2019-07-31T20:35:31
| 2019-07-31T20:35:31
| null |
UTF-8
|
R
| false
| false
| 1,580
|
r
|
fp_border.R
|
border_styles = c( "none", "solid", "dotted", "dashed" )
#' @title border properties object
#'
#' @description create a border properties object.
#'
#' @param color border color - single character value (e.g. "#000000" or "black")
#' @param style border style - single character value : "none" or "solid" or "dotted" or "dashed"
#' @param width border width - an integer value : 0>= value
#' @examples
#' fp_border()
#' fp_border(color="orange", style="solid", width=1)
#' fp_border(color="gray", style="dotted", width=1)
#' @export
fp_border = function( color = "black", style = "solid", width = 1 ){
out <- list()
out <- check_set_numeric( obj = out, width)
out <- check_set_color(out, color)
out <- check_set_choice( obj = out, style,
choices = border_styles )
class( out ) = "fp_border"
out
}
#' @param object fp_border object
#' @param ... further arguments - not used
#' @rdname fp_border
#' @examples
#'
#' # modify object ------
#' border <- fp_border()
#' update(border, style="dotted", width=3)
#' @export
update.fp_border <- function(object, color, style, width, ...) {
if( !missing( color ) ){
object <- check_set_color(object, color)
}
if( !missing( width ) ){
object <- check_set_integer( obj = object, width)
}
if( !missing( style ) ){
object <- check_set_choice( obj = object, style, choices = border_styles )
}
object
}
#' @export
print.fp_border <- function(x, ...) {
msg <- paste0("line: color: ", x$color, ", width: ", x$width, ", style: ", x$style, "\n")
cat(msg)
invisible()
}
|
3c8294cb4f3cd6cfe2c4d369380914dd38afab33
|
16526d1a3aecaddfa754b8f3299b63f455d83d3b
|
/cachematrix.R
|
6a967b40160d8622f625ba4a1066ccfbd692c253
|
[] |
no_license
|
bmmulder/ProgrammingAssignment2
|
3b042cf7292682ebeb3a1715ce76c4c603589245
|
3efa37eb62dde53b7b9878610bfec5d4cc4ae013
|
refs/heads/master
| 2021-01-23T21:31:43.082557
| 2014-08-10T05:43:31
| 2014-08-10T05:43:31
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,197
|
r
|
cachematrix.R
|
## The first function, makeCacheMatrix creates a special "matrix"
# setup cached matrix object
makeCacheMatrix <- function(x = matrix()) {
# init the matrix variable
m <- NULL
# set the value of the matrix
set <- function(y) {
x <<- y
m <<- NULL
}
# get the value of the matrix
get <- function() x
# set the value of the inverted matrix
setinverse <- function(solve) m <<- solve
# get the value of the inverted matrix
getinverse <- function() m
# add accessor list
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## The following function calculates the inverted special "matrix"
## created with the above function.
cacheSolve <- function(x, ...) {
# Try to get the cached matrix
m <- x$getinverse()
# if there is a cached version
if(!is.null(m)) {
message("getting cached data")
# return the cached matrix
return(m)
}
# or else, get the matrix
data <- x$get()
# invert the matrix
m <- solve(data, ...)
# cache it for the future
x$setinverse(m)
# return the inverted matrix
m
}
|
327825d5507e2483448b445f47b3230d4892ce05
|
31f0b1306739e3b228e01b70e586ac60c529885a
|
/man/seqentropy.Rd
|
b5a738d7b0c386eb06d6ffa47114af1181fcd49f
|
[] |
no_license
|
WAFI-CNR/ddna-rpackage
|
6f086c9d75bcc68a25c6e4709668a51575fcc767
|
5fbc8b98c22dc828b63ca4b9c4e78ca7cfcb7647
|
refs/heads/master
| 2020-05-31T08:20:30.683215
| 2019-10-21T21:06:02
| 2019-10-21T21:06:02
| 190,187,493
| 0
| 0
| null | 2019-10-19T11:11:41
| 2019-06-04T11:24:31
|
R
|
UTF-8
|
R
| false
| true
| 217
|
rd
|
seqentropy.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ddna.R
\name{seqentropy}
\alias{seqentropy}
\title{Title}
\usage{
seqentropy(seqpdf)
}
\arguments{
\item{seqpdf}{}
}
\description{
Title
}
|
c634e20a0c92ff6bcb6c5418078b703084c8d9ac
|
b4dd54123785b310d03a88835a19fcee77b38b65
|
/R/tokenizer.R
|
34186c0cf2f853b38e2eb6d0e2217b07cd50ef90
|
[
"MIT"
] |
permissive
|
tidyverse/readr
|
5b8a49899586ab0a7a28108b9a0ef634e60940d5
|
80e4dc1a8e48571323cdec8703d31eb87308eb01
|
refs/heads/main
| 2023-08-31T01:10:09.896284
| 2023-08-01T20:02:52
| 2023-08-01T20:02:52
| 11,663,980
| 631
| 271
|
NOASSERTION
| 2023-09-03T11:49:42
| 2013-07-25T15:28:22
|
R
|
UTF-8
|
R
| false
| false
| 5,163
|
r
|
tokenizer.R
|
#' Tokenize a file/string.
#'
#' Turns input into a character vector. Usually the tokenization is done purely
#' in C++, and never exposed to R (because that requires a copy). This function
#' is useful for testing, or when a file doesn't parse correctly and you want
#' to see the underlying tokens.
#'
#' @inheritParams datasource
#' @param tokenizer A tokenizer specification.
#' @param skip Number of lines to skip before reading data.
#' @param n_max Optionally, maximum number of rows to tokenize.
#' @keywords internal
#' @export
#' @examples
#' tokenize("1,2\n3,4,5\n\n6")
#'
#' # Only tokenize first two lines
#' tokenize("1,2\n3,4,5\n\n6", n = 2)
tokenize <- function(file, tokenizer = tokenizer_csv(), skip = 0, n_max = -1L) {
ds <- datasource(file, skip = skip, skip_empty_rows = FALSE)
tokenize_(ds, tokenizer, n_max)
}
#' Tokenizers.
#'
#' Explicitly create tokenizer objects. Usually you will not call these
#' function, but will instead use one of the use friendly wrappers like
#' [read_csv()].
#'
#' @keywords internal
#' @name Tokenizers
#' @examples
#' tokenizer_csv()
NULL
#' @export
#' @rdname Tokenizers
#' @param comment A string used to identify comments. Any text after the
#' comment characters will be silently ignored.
#' @param na Character vector of strings to interpret as missing values. Set this
#' option to `character()` to indicate no missing values.
#' @param quoted_na `r lifecycle::badge("deprecated")` Should missing values
#' inside quotes be treated as missing values (the default) or strings. This
#' parameter is soft deprecated as of readr 2.0.0.
#' @param delim Single character used to separate fields within a record.
#' @param quote Single character used to quote strings.
#' @param trim_ws Should leading and trailing whitespace (ASCII spaces and tabs) be trimmed from
#' each field before parsing it?
#' @param escape_double Does the file escape quotes by doubling them?
#' i.e. If this option is `TRUE`, the value `""""` represents
#' a single quote, `\"`.
#' @param escape_backslash Does the file use backslashes to escape special
#' characters? This is more general than `escape_double` as backslashes
#' can be used to escape the delimiter character, the quote character, or
#' to add special characters like `\\n`.
#' @param skip_empty_rows Should blank rows be ignored altogether? i.e. If this
#' option is `TRUE` then blank rows will not be represented at all. If it is
#' `FALSE` then they will be represented by `NA` values in all the columns.
tokenizer_delim <- function(delim, quote = '"', na = "NA", quoted_na = TRUE, comment = "",
trim_ws = TRUE,
escape_double = TRUE,
escape_backslash = FALSE,
skip_empty_rows = TRUE) {
structure(
list(
delim = delim,
quote = quote,
na = na,
quoted_na = quoted_na,
comment = comment,
trim_ws = trim_ws,
escape_double = escape_double,
escape_backslash = escape_backslash,
skip_empty_rows = skip_empty_rows
),
class = "tokenizer_delim"
)
}
#' @export
#' @rdname Tokenizers
tokenizer_csv <- function(na = "NA", quoted_na = TRUE, quote = "\"",
comment = "", trim_ws = TRUE,
skip_empty_rows = TRUE) {
tokenizer_delim(
delim = ",",
na = na,
quoted_na = quoted_na,
quote = quote,
comment = comment,
trim_ws = trim_ws,
escape_double = TRUE,
escape_backslash = FALSE,
skip_empty_rows = skip_empty_rows
)
}
#' @export
#' @rdname Tokenizers
tokenizer_tsv <- function(na = "NA", quoted_na = TRUE, quote = "\"",
comment = "", trim_ws = TRUE,
skip_empty_rows = TRUE) {
tokenizer_delim(
delim = "\t",
na = na,
quoted_na = quoted_na,
quote = quote,
comment = comment,
trim_ws = trim_ws,
escape_double = TRUE,
escape_backslash = FALSE,
skip_empty_rows = skip_empty_rows
)
}
#' @export
#' @rdname Tokenizers
tokenizer_line <- function(na = character(), skip_empty_rows = TRUE) {
structure(list(na = na, skip_empty_rows = skip_empty_rows),
class = "tokenizer_line"
)
}
#' @export
#' @rdname Tokenizers
tokenizer_log <- function(trim_ws) {
structure(list(trim_ws = trim_ws), class = "tokenizer_log")
}
#' @export
#' @rdname Tokenizers
#' @param begin,end Begin and end offsets for each file. These are C++
#' offsets so the first column is column zero, and the ranges are
#' [begin, end) (i.e inclusive-exclusive).
tokenizer_fwf <- function(begin, end, na = "NA", comment = "", trim_ws = TRUE,
skip_empty_rows = TRUE) {
structure(list(
begin = as.integer(begin), end = as.integer(end), na = na, comment = comment,
trim_ws = trim_ws, skip_empty_rows = skip_empty_rows
),
class = "tokenizer_fwf"
)
}
#' @export
#' @rdname Tokenizers
tokenizer_ws <- function(na = "NA", comment = "", skip_empty_rows = TRUE) {
structure(list(na = na, comment = comment, skip_empty_rows = skip_empty_rows),
class = "tokenizer_ws"
)
}
|
5dc4c3c26c59294a6dba695cf7367a58a3bd35c0
|
2526eb9d76b1eb36770ab82d5baefb25c0b7eb91
|
/R/get_last_sync.R
|
5192195c8ab4f293911b1ba11bd6e329927941ed
|
[] |
no_license
|
kaczmarj/fitbitScraper
|
96a97d401c467d75888e9c0963399976a95ac192
|
488983580225812d537c51e4f834ace674add93d
|
refs/heads/master
| 2021-01-12T16:57:13.761434
| 2016-10-20T14:40:59
| 2016-10-20T14:40:59
| 71,471,856
| 0
| 0
| null | 2016-10-20T14:34:17
| 2016-10-20T14:34:17
| null |
UTF-8
|
R
| false
| false
| 1,907
|
r
|
get_last_sync.R
|
#' Get date and time of last sync to fitbit.com
#'
#' Get last sync from fitbit using cookie returned from login function
#' @param cookie Cookie returned after login, specifically the "u" cookie
#' @keywords sync
#' @export
#' @return A list of three character vectors
#' \item{last_sync}{Character vector that looks like POSIXct}
#' \item{profile_url}{The url of your FitBit profile}
#' \item{display_name}{Your public FitBit display name}
#' @examples
#' \dontrun{
#' get_last_sync(cookie)
#' }
#' get_last_sync
get_last_sync <- function(cookie){
if(!is.character(cookie)){stop("cookie must be a character string")}
url <- "https://www.fitbit.com/ajaxapi"
request <- paste0('{"template":"/ajaxTemplate.jsp",
"serviceCalls":[{"name":"leaderboardAjaxService","method":"data"}]}')
csrfToken <- stringr::str_extract(cookie,
"[A-Z0-9]{8}\\-[A-Z0-9]{4}\\-[A-Z0-9]{4}\\-[A-Z0-9]{4}\\-[0-9A-Z]{12}")
body <- list(request=request, csrfToken = csrfToken)
response <- httr::POST(url, body=body, httr::config(cookie=cookie))
dat_string <- methods::as(response, "character")
dat_list <- jsonlite::fromJSON(dat_string)
# Date and time of last sync in UTC.
UTC_last_sync <- dat_list[["rankDisplayList"]][["syncTime"]]
if (!is.null(UTC_last_sync)){ # if not null, convert it to our timezone.
tz <- Sys.timezone()
if(is.null(tz)){tz <- format(Sys.time(),"%Z")}
last_sync <- as.POSIXct(UTC_last_sync, format="%Y-%m-%dT%H:%M:%S",tz='UTC')
attributes(last_sync)$tzone <- tz # Convert to our timezone.
} else{ # if no sync time, return NA.
last_sync <- NA
}
dat_list[["rankDisplayList"]][["profileUrl"]]
profile_url <- paste0('https://www.fitbit.com',
dat_list[["rankDisplayList"]][["profileUrl"]])
display_name <- dat_list[["rankDisplayList"]][["displayName"]]
return(list(last_sync, profile_url, display_name) )
}
|
2cddfb5a2107de33d76f970a03f9423c267910c7
|
ada1437970128526daf432e5bcdc14c3dd4435d8
|
/man/evolve.Rd
|
86b86c7f38dc9ebeab6b577ac4b8dd58db7c5b9c
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
thomasp85/particles
|
71c7a09cc636000a188a1ae0d9b841e6d6ec37ad
|
b194ad3c5c4017320ae0c51103401c274397cbf3
|
refs/heads/main
| 2022-09-04T17:54:53.130881
| 2022-08-19T12:16:38
| 2022-08-19T12:16:38
| 92,283,359
| 127
| 9
|
NOASSERTION
| 2022-08-19T06:29:35
| 2017-05-24T11:07:43
|
R
|
UTF-8
|
R
| false
| true
| 2,577
|
rd
|
evolve.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/evolve.R
\name{evolve}
\alias{evolve}
\title{Move the simulation forward one or more steps}
\usage{
evolve(simulation, steps = NULL, on_generation = NULL, ...)
}
\arguments{
\item{simulation}{A simulation object}
\item{steps}{The number of generations to progress or a function getting the
simulation object and returns \code{TRUE} if the simulation should proceed and
\code{FALSE} if it should stop. If \code{NULL} the simulation will run until
\code{alpha_min} has been reached.}
\item{on_generation}{A function to be called after each generation has been
progressed. The function will get the current state of the simulation as the
first argument. If the function returns a simulation object it will replace
the current simulation from the next generation. In the case of any other
return type the return will be discarded and the function will have no effect
outside its side-effects.}
\item{...}{Additional arguments to \code{on_generation}}
}
\value{
A simulation object with updated positions and velocities
}
\description{
This is the function that move the simulation forward in time. It is possible
to either specify the number of steps that should be simulated or let the
simulation terminate as \code{alpha_min} is reached. Note that some values of
\code{alpha} and \code{alpha_target} does not allow alpha to converge to \code{alpha_min} so
letting the simulation self-terminate can result in an infinite loop. The
default settings will result in \code{alpha_min} being reached in 300 generations.
}
\details{
Each generation in the simulation progress in the following manner:
\enumerate{
\item Check whether the specified number of generations has been reached
\item Check whether \code{alpha_min} has been reached
\item If either 1. or 2. is true, terminate the simulation
\item Apply the forces on the current particle positions and velocities in the
order they have been added
\item Reduce the velocity according to the given \code{velocity_decay}
\item Update the position and velocity based on any provided constraints
\item Calculate the new particle positions based on the new velocity
\item If given, call the \code{on_generation} function.
}
}
\examples{
graph <- tidygraph::create_notable('folkman')
sim <- graph |>
simulate() |>
wield(link_force) |>
wield(manybody_force)
# Take 5 steps and tell about it
sim |> evolve(5, function(sim) {
cat('Generation: ', evolutions(sim), '\n', sep = '')
})
# Run evolution until alpha_min is reached
sim |> evolve(NULL)
}
|
c6b20752f70b0eabbd3058d7a4ade1d33f48d025
|
b4fc54d90f295493c02b900cff8c5d24e6cd17a3
|
/analysis/multiple_types/google/gg-3b-MM-SF-vs-SM-SF-cost-analysis.R
|
a642787642343b27bf8bbd67cab56fc8a3435aea
|
[
"Apache-2.0"
] |
permissive
|
fabiomorais/ASaaS
|
0b19ecf15197772e1c9963473a769b74a827d3da
|
2452adbca559e0d399c82d7183692cf6962cf1ea
|
refs/heads/master
| 2021-01-01T04:42:54.088109
| 2017-07-18T18:30:31
| 2017-07-18T18:30:31
| 97,231,915
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,930
|
r
|
gg-3b-MM-SF-vs-SM-SF-cost-analysis.R
|
library(dplyr)
library(ggplot2)
library(scales)
library(stringr)
flavors = sort(c("c4.large", "m4.large", "r3.large", "c3.large", "m3.medium"))
f_str = str_sub(flavors, 1, 2)
ref = data.frame(cpuRef = c(32, 32, 32, 32), memRef = c(32, 64, 128, 256), cpuP = rep(1, 4), memP = c(1, 2, 4, 8))%>%
group_by(memRef) %>% mutate(scenario = paste(cpuRef, memRef, sep = "_"), scP = paste(cpuP, memP, sep = ":"))
df_sm_sf = read.table(file = "../data/google/scaling-analysis-SM-SF.dat", header = T)
df_mm_sf = read.table(file = "../data/google/scaling-analysis-MM-SF.dat", header = T)
df_sm_sf = df_sm_sf %>% select(scenario, jobId, flavor, metric_base, len, ecu_viol, mem_viol, tviol, cost_total) %>%
group_by(scenario, jobId, flavor, metric_base) %>% ungroup() %>% distinct()
df_mm_sf = df_mm_sf %>% select(scenario, jobId, flavor, metric_base, len, ecu_viol, mem_viol, tviol, cost_total) %>%
group_by(scenario, jobId, flavor, metric_base) %>% ungroup() %>% distinct()
df1 = df_sm_sf %>% ungroup() %>% group_by(scenario, flavor, metric_base) %>% summarise(cost = sum(cost_total)) %>% mutate(metric_base = as.character(metric_base))
df2 = df_mm_sf %>% ungroup() %>% group_by(scenario, flavor, metric_base) %>% summarise(cost = sum(cost_total)) %>% mutate(metric_base = as.character(metric_base))
df3 = inner_join(df2, df1, by = c("scenario", "flavor")) %>% rename(metric_base = metric_base.x, metric_ref = metric_base.y, cost_mult = cost.x, cost_single = cost.y)
df3 = df3 %>% mutate(incrase = (cost_mult - cost_single) / cost_single)
df3
df3$metric_ref[df3$metric_ref == "ecu"] = "ECU"
df3$metric_ref[df3$metric_ref == "mem"] = "Memória"
dplot = df3 %>% inner_join(ref, by = "scenario")
dplot = dplot %>% filter(scP %in% c("1:1", "1:4"))
write.table(dplot, file = "../data/google/3b_cost_violations_trade-off-google-data.dat", row.names = FALSE)
|
741f7871ab57afc489d653ecaf96d712d83afdb3
|
a1d0246ed8e85838ef4b128297cfc3c62a607646
|
/R/analysis.R
|
8e805ad8f8fcf78b9284a787537efd2dc0a989b1
|
[] |
no_license
|
aurel-l/isea_analysis
|
201d81c096009cf88cdcd8221bc9da5374c7598b
|
b7c4cdea5e9720235d9e540ff9406e3661b0adcc
|
refs/heads/master
| 2021-01-23T03:58:50.999624
| 2015-10-04T17:58:17
| 2015-10-04T17:58:17
| 41,773,918
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,364
|
r
|
analysis.R
|
#!/usr/bin/Rscript
# source script that is common to all scripts
tryCatch({
# look for common.R in the same folder than the script
source(paste0(
dirname(sub('--file=','',commandArgs(trailingOnly=F)[
grep('--file=',commandArgs(trailingOnly=F))
])),
'/common.R'
))
}, warning = function(a) {
# when not run from CLI, assumes common.R is in the working directory
source('common.R')
})
suppressMessages(library(ggplot2))
suppressMessages(library(gridExtra))
suppressMessages(library(vegan))# !high memory use! (needed for mantel.partial)
# variables overwrite
#variables$debug = TRUE
if (variables$debug) {
# change parameters here if debugging
args = list(
order = '../Data/isea_admixture_data_for_comparison_2.csv',
real = '../Data/isea_admixture_data_for_comparison_2.csv',
admix = '../2015_08_24/merged.dat',
ABC = 'threshold, 0.1 0.05 0.01',
repeated = TRUE,
verbose = TRUE
)
} else {
#CLI arguments
parser = ArgumentParser(
description = 'Analyse of admixture files'
)
parser$add_argument(
'order', type = 'character', help = 'path to the order file'
)
parser$add_argument(
'real', type = 'character',
help = 'path to the real admixture data file'
)
parser$add_argument(
'admix', type = 'character', nargs = '?', default = 'stdin',
help = 'path to the admix file, defaults to stdin'
)
parser$add_argument(
'--ABC', '-a', nargs = '?',
help = paste(
'perform an ABC analysis, enter the type of tolerance',
'(absolute or relative) and the list of tolerance values'
)
)
parser$add_argument(
'--repeated', '-r', action='store_true',
help = 'display repeated randomSeeds'
)
parser$add_argument(
'--verbose', '-v', action='store_true',
help = 'verbose mode'
)
args = parser$parse_args()
}
if (variables$debug) {
cat('executing in DEBUG mode, ignoring any command line argument\n')
}
if (!is.null(args$ABC)) {
parsed = strsplit(args$ABC, ' *[ ,] *')[[1]]
variables$ABC = list(
type = parsed[1],
tVector = as.numeric(tail(parsed, -1))
)
if (!(variables$ABC$type %in% c('threshold', 'tolerance'))) {
stop(paste(
variables$ABC$type, 'is not valid (try "tolerance" or "threshold")'
))
}
}
# environment to store incoming data
data = new.env()
# connection to input
data$conn = file(args$admix, open = 'r')
# vector of headers from the input
data$header = strsplit(gsub('"', '', readLines(data$conn, 1L)), ',')[[1]]
# corresponding vector of types
data$csvTypes = sapply(
data$header,
function(x) variables$types[[x]],
USE.NAMES = FALSE
)
# environment to store reference data (real or observed data)
ref = new.env()
# order of the islands as defined by the order file
ref$order = read.csv(args$order)[, c('Island', 'order')]
# reference admixture values
ref$real = read.csv(args$real)[
, c(
'Island', 'DnaAdmixture', 'AutosomeAdmixture',
'XChrAdmixture', 'longitude', 'latitude'
)
]
# order island by order information
ref$real = ref$real[order(ref$order$order), ]
# vector of characters -> factor
ref$real$Island = factor(ref$real$Island, levels = ref$real$Island)
# remove islands without data from reference
ref$real = ref$real[!is.na(ref$real$AutosomeAdmixture), ]
# get interesting admixture values into a numerical matrix (for performance)
ref$realMat = matrix(
as.numeric(unlist(ref$real[, c('AutosomeAdmixture', 'XChrAdmixture')])),
ncol = 2
)
# create matrix of spatial distances between islands
ref$geom = SpatialTools::dist1(
matrix(cbind(ref$real$longitude, ref$real$latitude), ncol = 2)
)
# distances of admixtures between islands in reference data
ref$dist = list(
AutosomeAdmixture = dist(ref$real$AutosomeAdmixture),
XChrAdmixture = dist(ref$real$XChrAdmixture)
)
# list of islands that can be compared between reference and simulated data
variables$comparedIslands = ref$real[, 'Island']
# environment to store summary information
summary = new.env()
# list management
summary$size = 0L
# sensitivities by Islands
summary$sensit = list(
all = list(),
aggr = data.frame()
)
if (is.null(args$ABC)) {
# counts of unique parameter sets
summary$counts = list(
df = data.frame(),
changing = rep.int(1L, length(variables$paramNames))
)
names(summary$counts$changing) = variables$paramNames
}
# comparisons real / simulated
summary$comp = list(
all = list(),
aggr = data.frame()
)
# difference admixtures
summary$diff = list(
all = list(),
aggr = data.frame()
)
# environment to store loop information
loop = new.env()
# loop count, == simulations processed
loop$counter = 0L
# number of changing parameters
loop$changing = -1L
# if we specifically asked for this information
if (args$repeated) {
# vector of the randomSeeds of the simulations processed
randomSeeds = integer(0L)
}
# infinite loop, will break on EOF of input
repeat {
# resize lists of pointers if necessary
if (loop$counter == summary$size) {
# new size will be the double of the old size
if (summary$size == 0) {
summary$size = 1L
} else {
summary$size = summary$size * 2L
}
# reallocate memory for pointers
length(summary$sensit$all) = summary$size
length(summary$comp$all) = summary$size
length(summary$diff$all) = summary$size
}
loop$counter = loop$counter + 1L
# raw content of the input file, chunk of 21 lines
# corresponds to one simulation, assumes the input is sorted by run
data$buffer = readLines(data$conn, variables$nIslands)
# exits the loop if the file has no more line to read
if (length(data$buffer) == 0L) {
close(data$conn)
break
}
# opens a connection to the chunk of raw text for the simulation
csvConn = textConnection(data$buffer)
# parses the content of the connection as csv file into a data frame
data$df = read.csv(
csvConn,
# for performance, specify types by column
colClasses = data$csvTypes,
header = FALSE,
col.names = data$header
)
# closes the connection immediately after reading it
close(csvConn)
# checks that the block of data only contains rows from 1 single simulation
if (length(unique(data$df$run)) > 1) {
stop('Uh-oh, this block contains rows from more than one simulation')
}
# transforms the islands to factors
data$df$Island = factor(data$df$Island, levels = ref$order$Island)
# order the islands in the simulation as specified by the order information
data$df = data$df[order(data$df$Island), ]
# adds information for the difference between X and Autosome admixtures
data$df$diffXAuto = data$df$XChrAdmixture - data$df$AutosomeAdmixture
# if we specifically asked for this information
if (args$repeated) {
# adds the current randomSeed to the list of already parsed ones
randomSeeds = append(randomSeeds, data$df$randomSeed[1L])
}
if (is.null(args$ABC)) {
# melted simulation data for admixtures for every island
summary$sensit$all[[loop$counter]] = melt(
data$df[, c('Island', variables$summaryNames)],
id = 'Island'
)
}
simuParams = data$df[1, variables$paramNames]
# only for grid searches
if (is.null(args$ABC)) {
# if not already initialised, creates a data frame for the parameters
if (nrow(summary$counts$df) == 0L) {
summary$counts$df = simuParams
summary$counts$df = data.frame(
lapply(summary$counts$df, as.character),
stringsAsFactors = FALSE
)
summary$counts$df$count = 1L
} else {
found = FALSE
# tries to find an already corresponding parameter set
for (i in 1L:nrow(summary$counts$df)) {
test = summary$counts$df[i, variables$paramNames] == simuParams
if (all(test)) {
summary$counts$df$count[i] = summary$counts$df$count[i] + 1
found = TRUE
}
}
if (!found) {
# if not, adds this set to the end of the data frame of sets
tmp = cbind(simuParams, 1L)
colnames(tmp) = c(colnames(simuParams), 'count')
summary$counts$df = rbind(summary$counts$df, tmp)
for (p in variables$paramNames) {
summary$counts$changing[p] =
length(unique(summary$counts$df[, p]))
}
}
}
tmp = sum(summary$counts$changing != 1)
if (loop$changing != tmp) {
loop$changing = tmp
if (loop$changing == 0) {
loop$type = 'stability'
} else if (loop$changing == 1) {
loop$type = 'sensitivity'
} else if (loop$changing == 2) {
loop$type = 'sensitivity'
} else {
stop(paste(
'cannot perform analysis on', loop$changing,
'changing parameters'
))
}
}
}
# store difference information
summary$diff$all[[loop$counter]] = data$df[
,
c('Island', variables$paramNames, 'diffXAuto')
]
# subset of Islands to compare real and simulated
data$df = data$df[data$df$Island %in% variables$comparedIslands, ]
## comparisons
# admixture information of the simulation as a numerical matrix
simu = matrix(
as.numeric(unlist(data$df[, c('AutosomeAdmixture', 'XChrAdmixture')])),
ncol = 2
)
if (!is.null(args$ABC)) {
# prepare comparison information data frame with 1 rows for comp values
compared = data$df[1, variables$paramNames]
} else {
# prepare comparison information data frame with 4 rows for comp values
compared = data$df[1:4, variables$paramNames]
# every combination of admixture and comparison value, 4 values
compared$admixture = rep(c('AutosomeAdmixture', 'XChrAdmixture'), 2)
compared$comparison = rep(c('MSD', 'cor'), each = 2)
}
# mean squared distance (numeric(2)), both admixtures in the same operation
msd = colMeans((ref$realMat - simu) ^ 2)
# partial Mantel correlation (numeric(2))
cor = c(
# Autosome admixture
mantel.partial(
ref$dist$AutosomeAdmixture,
dist(simu[, 1]),
ref$geom,
permutations = 1L
)$statistic,
# X Chromosome Admixture
mantel.partial(
ref$dist$XChrAdmixture,
dist(simu[, 2]),
ref$geom,
permutations = 1L
)$statistic
)
# add comparison information
if (!is.null(args$ABC)) {
compared[, variables$ABCsummary] =
c(msd, cor)
compared$randomSeed = data$df$randomSeed[1]
} else {
compared$value = c(msd, cor)
}
# store comparison information
summary$comp$all[[loop$counter]] = compared
# update displayed information if verbose mode is activated
if (args$verbose) {
if (!is.null(args$ABC)) {
text = '- analysis: ABC'
} else {
text = paste(
'- changing params:', loop$changing,
'- analysis:', loop$type,
'- sets:', nrow(summary$counts$df)
)
}
cat('\r', paste('Simulations:', loop$counter, text))
flush.console()
}
}
# end of main loop
if (args$verbose) {
# new line so that the next print won't be added after the progress info
cat('\n')
flush.console()
}
if (args$repeated) {
# count the occurences of every randomSeed
tab = table(randomSeeds)
# extract any repeated randomSeed
repeated = tab[tab != 1]
# if the vector of repeated randomSeeds has values
if (length(repeated)) {
# display them
cat('repeated randomSeeds:\n')
cat(names(repeated), sep = ', ')
cat('\n')
}
}
# only for grid searches
if (is.null(args$ABC)) {
# aggregates parameter sweep information...
suppressMessages(library(XML))
# ...in this XMLTree
XMLTree = newXMLNode('sweep')
# loops on every parameter
for (p in colnames(summary$counts$df)) {
if (p == 'count') {
# this column doesn't need to be in the XML (not a parameter)
next
}
uniqueValues = sort(unique(summary$counts$df[, p]))
# adds a node in the XML tree
newXMLNode(
'parameter',
attrs = c(
'name' = p,
'n' = length(uniqueValues),
'values' = toString(uniqueValues)
),
parent = XMLTree
)
}
# attribute on the root (number of distinct sets of parameters)
addAttributes(XMLTree, sets = prod(summary$counts$changing))
# if in debug mode
if (variables$debug) {
# only displays sweep information
print(XMLTree)
} else {
# otherwise, only saves it in a file
invisible(saveXML(XMLTree, paste0(variables$now, '-parameters.xml')))
}
}
if (args$verbose) {
cat('now aggregating all the data\n')
}
# concatenates at once all the list filled earlier and gc them
# 3 lists of pointers to a lot of small memory blocks -> 3 big memory blocks
if (is.null(args$ABC)) {
summary$sensit$all = do.call('rbind', summary$sensit$all)
invisible(gc())
}
summary$comp$all = do.call('rbind', summary$comp$all)
invisible(gc())
if (is.null(args$ABC)) {
summary$diff$all = do.call('rbind', summary$diff$all)
invisible(gc())
}
if (args$verbose) {
cat(
'now performing',
if (is.null(args$ABC)) loop$type else 'ABC', 'analysis\n'
)
}
# prepares the data for the visualisation, according to the type of analysis
if (!is.null(args$ABC)) {
sourced = 'analysis-ABC.R'
} else {
# grid search
maxCount = max(summary$counts$df$count)
if (loop$type == 'stability') {
# stability
sourced = 'analysis-stability.R'
changing = c()
} else {
# sensitivity
changing = names(
summary$counts$changing[order(-summary$counts$changing)]
)[1:loop$changing]
for (p in changing) {
summary$counts$df[, p] = factor(summary$counts$df[, p])
summary$comp$all[, p] = factor(summary$comp$all[, p])
summary$diff$all[, p] = factor(summary$diff$all[, p])
}
# standard deviation among all of the data
summary$sensit$aggr = aggregate(
. ~ Island + variable,
data = summary$sensit$all,
FUN = sd
)
if (loop$changing == 1) {
sourced = 'analysis-sensitivity.R'
} else {
sourced = 'analysis-sensitivity2D.R'
}
}
# drops the unnecessary columns
if (is.null(args$ABC)) {# not ABC
summary$comp$all = summary$comp$all[
,
c(changing, 'admixture', 'comparison', 'value')
]
} else {# ABC
summary$comp$all = summary$comp$all[
,
c(changing, 'admixture', 'comparison', 'value', 'randomSeed')
]
}
summary$diff$all = summary$diff$all[, c(changing, 'Island', 'diffXAuto')]
if (loop$changing == 2) {
# 2d parameter sweep
# comparisons, mean value (first heat-maps)
summary$comp$aggr = aggregate(
value ~ .,
data = summary$comp$all,
FUN = mean
)
# comparisons, standard deviation value (last heat-maps)
# value added as a 'stddev' column on the previous 'aggr' data frame
summary$comp$aggr$stddev = aggregate(
value ~ .,
data = summary$comp$all,
FUN = sd
)$value
for (p in changing) {
summary$comp$aggr[, p] = factor(summary$comp$aggr[, p])
}
} else if(loop$changing == 1) {
# 1d parameter sweep
# X - Auto, mean value (dot in the plot)
summary$diff$aggr = aggregate(
as.formula(paste('diffXAuto ~ Island +', changing)),
data = summary$diff$all,
FUN = mean
)
# X - Auto, standard deviation value (error bar in the plot)
# value added as a 'stddev' column on the previous 'aggr' data frame
summary$diff$aggr$stddev = aggregate(
as.formula(paste('diffXAuto ~ Island +', changing)),
data = summary$diff$all,
FUN = sd
)$diffXAuto
}
}
# source next analysis script that does only the visualisation
tryCatch({
# look for script in the same folder than the current script
source(paste0(
dirname(sub('--file=','',commandArgs(trailingOnly=F)[
grep('--file=',commandArgs(trailingOnly=F))
])),
paste0('/', sourced)
))
}, warning = function(a) {
# when not run from CLI, assumes script is in the working directory
source(sourced)
})
|
dc7855bf13af6f0649cdde21b4eaa9985fb7302b
|
c88b0cbeda0edf9e745e324ef942a504e27d4f87
|
/fWHR/bastrop.R
|
538ef5fc852251fabb84fe59eb492d2daedfaa47
|
[] |
no_license
|
Diapadion/R
|
5535b2373bcb5dd9a8bbc0b517f0f9fcda498f27
|
1485c43c0e565a947fdc058a1019a74bdd97f265
|
refs/heads/master
| 2023-05-12T04:21:15.761115
| 2023-04-27T16:26:35
| 2023-04-27T16:26:35
| 28,046,921
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,054
|
r
|
bastrop.R
|
### Bastrop
bCh = cPoints[cPoints$location=='Bastrop',]
bCh = bCh[c(-1,-2),]
#bCh = bCh[bCh$A!='X',]
bCh = bCh[bCh$Notes!='redo',]
bChsub = bCh[c(78:93),]
sample(bCh$ID,16)
# [1] Beta Punch Lexus Austin Dahpi Minna Michon Xena Alpha Gremlin Bo
# [12] Samson Kampani Mahsho Cheopi Maishpa
sample(1:10,16, replace=T)
# [1] 2 10 8 4 2 4 4 2 5 9 4 6 10 7 5 5
### Redos -
# definite: c(16, 24, 56, 60)
# possible: c(27, 28, 32, 44, 72, 74)
bCh$ID[c(16, 24, 56, 60)]
### Testing correlations between morphs and composite images
ind = duplicated(bCh$ID) | duplicated(bCh$ID[nrow(bCh):1])[nrow(bCh):1]
ind = ind[1:77]
bCHsup = NULL
bChsup = bCh[ind,]
bChsup = bChsup[1:16,] # don't know why this is necessary
fWHR.Bsub = df2fWHR(bChsub)
fWHR.Bsup = df2fWHR(bChsup)
fWHR.Btemp = merge(fWHR.Bsub,fWHR.Bsup, by.x='ID', by.y='ID')
plot(fWHR.Btemp[,2:3])
cor.test(fWHR.Btemp$ratio.x,fWHR.Btemp$ratio.y)
### Calculate fWHR for morphs
fWHR.Bmorph = df2fWHR(bCh[1:77,])
# copy and paste results back into CSV
|
07edea7bf1ecaa261dfda2a8a2cda641e2641c14
|
132c868650be85a4eaf605832e6cd57d9aa8faf3
|
/R/plots_infection_summary_table.R
|
21e6398598c69f94c56271ccba76e581b720086b
|
[] |
no_license
|
EvoNetHIV/RoleSPVL
|
9797fe146afa1e750ef1cfdaf231b62e0f19e848
|
113b55fedbdd2ac6627b751df3e102e801e36c5c
|
refs/heads/master
| 2021-09-14T17:46:30.726252
| 2018-05-16T22:16:13
| 2018-05-16T22:16:13
| 103,449,083
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,849
|
r
|
plots_infection_summary_table.R
|
#' @title Title
#'
#' @description Description
#'
#' @param x A number.
#' @param y A number.
#' @return return value here.
#' @details
#' Additional details here
#' @examples
#' example function call here
#not exported as it is not in use now...maybe later
infection_summary_table_fxn <- function(pop,partners,dframe,ind)
{
par(mfrow=c(1,1))
# #next table:
# #1) time of arrival
# #2) time of death
# #3) founder?
# #4) time of infection
# #5) number of partnerships
# #5b) number of unique partners
# #6) number of disc. partnershps
# #7) number of disc acts
# #8) number of infectees
#
if(is.na(pop$arrival_time[ind]))
{
arrival_time=1
}else{
arrival_time = pop$arrival_time[ind]
}
if(is.na(pop$Time_Death[ind]))
{
time_death = NA
}else{
time_death = pop$Time_Death[ind]
}
if(is.na(pop$Time_Inf[ind]))
{
time_inf = NA
founder = FALSE
}else{
time_inf = pop$Time_Inf[ind]
founder = ifelse(time_inf<=0, TRUE,FALSE)
}
total_partners = nrow(dframe)
unique_partners = length(partners)
no_disc_partnerships = length(which(dframe$disc_dur!=0))
no_disc_acts = sum(dframe$total_sex)
no_infectees = length(which(dframe$"agent_inf_partner?"))
summary_table=data.frame(arrival=arrival_time,
death = time_death,
founder = founder,
inf_time= time_inf,
totPart = total_partners,
unqPart = unique_partners,
discPart = no_disc_partnerships,
discActs = no_disc_acts,
infectees = no_infectees)
plot(1:10,1:10,type='n',axes=F,ylab=NA,xlab=NA)
plotrix::addtable2plot(1,10,summary_table,cex=1,yjust=1,ypad=.4,hlines=T,vlines=T,xpad=.2)
title(paste("summary data for agent",ind),adj=.1)
invisible(NULL)
}
|
2ae695ef64c3c308b431949614fe64c11766d6d4
|
fd1e6cb50e9218a0fbe6f25b2634eb44db44bc0d
|
/src/0_downloaddata.R
|
656bc72ec8f80b8906856c88c02fde117b55b1a4
|
[
"MIT"
] |
permissive
|
befriendabacterium/communityinvasion
|
a84473f473e053d6db237fdc264c43338c097e4c
|
378a1c232969b4c6760bab73f70e397101470a7f
|
refs/heads/main
| 2023-09-01T23:15:27.016577
| 2023-01-25T11:19:28
| 2023-01-25T11:19:28
| 382,041,900
| 2
| 1
|
MIT
| 2021-09-14T20:04:01
| 2021-07-01T13:26:48
|
R
|
UTF-8
|
R
| false
| false
| 1,076
|
r
|
0_downloaddata.R
|
# START -------------------------------------------------------------------
rm(list=ls())
set.seed(1234)
#set working directory to source file location
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
#move up a directory
setwd("..")
# EMPTY EXISTING DATA/REVERT TO GITHUB REPO STATE -----------------------------------------------------
#empty the inputs and outputs directories of any data, keeping all READMEs (that's what the grep does) and jpgs
file.remove(
grep(list.files(c('inputs','outputs'), recursive = T, full.names = T), pattern='.md|.jpg', invert=T, value=T)
)
# DOWNLOAD NEW DATA -------------------------------------------------------
#change this to '2_preanalysis' to run the code from Step 8, to '3_end' to download the end result of running the code
whichpoint<-'1_start'
my_project <- osfr::osf_ls_files(osfr::osf_retrieve_node("hc57w"))
data_folder <- my_project[which(my_project$name==whichpoint),]
#download inputs and outputs folders
osfr::osf_download(osfr::osf_ls_files(data_folder),getwd(), recurse = T, conflicts='overwrite')
|
80506f8fbd602ec163323e0a36e7914a16f42f2b
|
26ecda1a80b85a74501eab533ef60006c4835b09
|
/R/create.barplot.R
|
467d5928a757f983043d8ae4fd68d153f67e5f4c
|
[] |
no_license
|
rdeborja/plotting.general
|
988d18d02836ad7bc1d29b6aa81c615707ed2118
|
7a312a10ccc7a7931dc5e3516604bf302dcda21b
|
refs/heads/master
| 2021-01-10T19:43:42.175722
| 2016-08-22T20:13:09
| 2016-08-22T20:13:09
| 16,683,758
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,763
|
r
|
create.barplot.R
|
create.barplot <- function(data = NULL, x = NULL, y = NULL, fill = NULL, group = NULL, group.col = FALSE, group.row = FALSE, rotate = FALSE, width = 0.9, ylab = NULL, xlab = NULL, yaxis.log.scale = FALSE, xaxis.log.scale = FALSE, theme = NULL) {
# validate the arguments
if (is.null(x)) stop('Missing x argument')
if (is.null(data)) stop('Missing data argument')
# initialize the ggplot object
if (!is.null(y) & is.null(fill)) {
plot.object <- ggplot(data = data, aes_string(x = x, y = y))
} else if (!is.null(y) & !is.null(fill)) {
plot.object <- ggplot(data = data, aes_string(x = x, y = y, fill = fill))
} else {
plot.object <- ggplot(data = data, aes_string(x = x))
}
# if y is passed as an argument, then change the stat property to 'identity' so we plot the actual
# y values passed to the function
if (!is.null(y)) {
plot.object <- plot.object + geom_bar(stat = 'identity', width = width)
}
else if (is.null(y)) {
plot.object <- plot.object + geom_bar(stat = 'bin', width = width)
}
# if the group argument is not null, then add the group variable as a facet_grid parameter
if (!is.null(group) & length(group) == 2) {
if (group.col == TRUE & group.row == TRUE) {
plot.object <- plot.object + facet_grid(group[1] ~ group[2])
} else {
stop('To group row and column wise must have a vector of 2 items')
}
}
else if (!is.null(group) & length(group) == 1) {
if (group.col == TRUE) {
plot.object <- plot.object + facet_grid(paste(sep = ' ', '. ~', group), scales = 'free', space = 'free')
} else if (group.row == TRUE) {
plot.object <- plot.object + facet_grid(paste(sep = ' ', group, '~ .'))
} else {
plot.object <- plot.object + facet_grid(paste(sep = ' ', group, '~ .'))
}
}
# plot the data
if (rotate == TRUE) {
plot.object <- plot.object + coord_flip()
}
# add custom labels to the x and y axes
if (!is.null(xlab)) {
plot.object <- plot.object + xlab(xlab)
}
if (!is.null(ylab)) {
plot.object <- plot.object + ylab(ylab)
}
# make the x-axis start at the bottom of the plot
if (yaxis.log.scale == TRUE) {
plot.object <- plot.object + scale_y_log10(expand = c(0, 0))
} else {
plot.object <- plot.object + scale_y_continuous(expand = c(0, 0))
}
if (xaxis.log.scale == TRUE) {
plot.object <- plot.object + scale_x_log10(expand = c(0, 0))
}
# modify the theme to include any modifications made at the command line (for now it's just the xaxis label angle)
if (is.null(theme)) {
plot.object <- plot.object + plotting.general::default.barplot.theme()
} else {
plot.object <- plot.object + theme
}
return(plot.object)
}
|
b08418bf0f91f6abeb51156cd19c2e94af6b21c9
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/conformalClassification/examples/TCPClassification.Rd.R
|
8e19a07039c21d2ba76b58e83700012feafb24da
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,258
|
r
|
TCPClassification.Rd.R
|
library(conformalClassification)
### Name: TCPClassification
### Title: Class-conditional transductive conformal classifier for
### multi-class problems
### Aliases: TCPClassification
### ** Examples
## load the library
#library(mlbench)
#library(caret)
#library(conformalClassification)
## load the DNA dataset
#data(DNA)
#originalData <- DNA
## make sure first column is always the label and class labels are always 1, 2, ...
#nrAttr = ncol(originalData) #no of attributes
#tempColumn = originalData[, 1]
#originalData[, 1] = originalData[, nrAttr]
#originalData[, nrAttr] = tempColumn
#originalData[, 1] = as.factor(originalData[, 1])
#originalData[, 1] = as.numeric(originalData[, 1])
## partition the data into training and test set
#result = createDataPartition(originalData[, 1], p = 0.8, list = FALSE)
#trainingSet = originalData[result, ]
#testSet = originalData[-result, ]
##reduce the size of the training set, because TCP is slow
#result = createDataPartition(trainingSet[, 1], p=0.8, list=FALSE)
#trainingSet = trainingSet[-result, ]
##TCP classification
#pValues = TCPClassification(trainingSet, testSet)
#perfVlaues = pValues2PerfMetrics(pValues, testSet)
#print(perfVlaues)
#CPCalibrationPlot(pValues, testSet, "blue")
#not run
|
9af4774f2a80849181b219432d58aa6ac4b66197
|
b42f2f08b29c1e0ceafb8e6eac6734ed6f7a389e
|
/12. Basic Statistical Methods/1.MinMaxRange.R
|
8bbe6dbec8252eecfec6924865da5ebc3b4e9148
|
[] |
no_license
|
ashleyradams/R-Programming-for-Beginners
|
6c25c5278150fde221f3d89e0e76697f5dafb9c0
|
5a11b3784ac9507bc6f3b63b537b8d2b30f14cdd
|
refs/heads/master
| 2022-06-06T20:58:38.925171
| 2020-05-01T18:25:11
| 2020-05-01T18:25:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,179
|
r
|
1.MinMaxRange.R
|
x <- c(10,45,30,50,35,40,80,25)
min(x)
max(x)
range(x)
library(dplyr)
mydata <- read.csv('GEStock.csv')
mysubdata <- select(mydata,Date,Price)
x <- is.na(mysubdata$Price)
y <- factor(x)
table(y)
min(mysubdata$Price)
max(mysubdata$Price)
range(mysubdata$Price)
mysubdata[which.max(mysubdata$Price),]
mysubdata[which.min(mysubdata$Price),]
plot(mysubdata$Price ,
xlab='Dates',
ylab = 'Stock Price',
main='Dates Vs Stock Price',
col ='red',
pch=20,
type='l')
abline(h=min(mysubdata$Price),col='blue',lwd=2)
abline(h=max(mysubdata$Price),col='blue',lwd=2)
z <- c(min(mysubdata$Price),max(mysubdata$Price))
axis(2, at=z,labels=round(z,2),
col.axis='blue',
las=2,
cex.axis=1.0,
tck=-.01)
abline(v=which.min(mysubdata$Price),col='blue')
abline(v=which.max(mysubdata$Price),col='blue')
x <- c(which.max(mysubdata$Price),
which.min(mysubdata$Price))
y <- c(as.character(mysubdata[which.max(mysubdata$Price),1]),
as.character(mysubdata[which.min(mysubdata$Price),1])
)
axis(1, at=x,labels=y,
col.axis='blue',
las=1,
cex.axis=1.0,
tck=-.01)
|
8ba770f5348ce2e14e00492072a17a1f856ae6e9
|
a6f4c8c91414d62fad5f8f7f53b1dee9c9d099ee
|
/R-Portable-Mac/library/xlsx/tests/test.import.R
|
47e6de0eceea2fa787731cabb704bd85f8194ec7
|
[
"LicenseRef-scancode-unknown-license-reference",
"CC0-1.0",
"GPL-2.0-only"
] |
permissive
|
sdownin/sequencer
|
6a2d70777fbd8109e26f126229b5ee10348cf4e7
|
045d0580e673cba6a3bd8ed1a12ff19494bf36fa
|
refs/heads/master
| 2023-08-04T08:06:02.891739
| 2023-08-03T04:07:36
| 2023-08-03T04:07:36
| 221,256,941
| 2
| 1
|
CC0-1.0
| 2023-02-04T15:06:14
| 2019-11-12T16:00:50
|
C++
|
UTF-8
|
R
| false
| false
| 4,349
|
r
|
test.import.R
|
#
#
#
test.import <- function(outdir="C:/Temp/", type="xlsx",
speedtest=FALSE)
{
cat("##################################################\n")
cat("Test reading xlsx files into R\n")
cat("##################################################\n")
filename <- paste("test_import.", type, sep="")
file <- system.file("tests", filename, package="xlsx")
cat("Load test_import.xlsx ... ")
wb <- loadWorkbook(file)
cat("OK\n")
sheets <- getSheets(wb)
cat("Get the second sheet with mixedTypes ... ")
sheet <- sheets[[2]]
rows <- getRows(sheet)
cells <- getCells(rows)
values <- lapply(cells, getCellValue)
cat("Extract cell [5,2] and see if == 'Apr' ...")
stopifnot(values[["5.2"]] == "Apr")
cat("OK\n")
orig <- getOption("stringsAsFactors")
options(stringsAsFactors=FALSE)
cat("Test high level import\n")
cat("Read data in second sheet ...\n")
res <- read.xlsx(file, 2)
cat("First column is of class Dates ... ")
stopifnot(class(res[,1])=="Date")
cat("OK\n")
cat("Second column is of class character ... ")
stopifnot(class(res[,2])=="character")
cat("OK\n")
cat("Third column is of class numeric ... ")
stopifnot(class(res[,3])=="numeric")
cat("OK\n")
cat("Fourth column is of class logical ... ")
stopifnot(class(res[,4])=="logical")
cat("OK\n")
cat("Sixth column is of class POSIXct ... ")
stopifnot(inherits(res[,6], "POSIXct"))
cat("OK\n\n")
options(stringsAsFactors=orig)
cat("Some cells are errors because of wrong formulas\n")
print(res) # some cells are errors because of wrong formulas
cat("OK\n")
cat("Test high level import keeping formulas... \n")
res <- read.xlsx(file, 2, keepFormulas=TRUE)
cat("Now showing the formulas explicitly\n")
print(res)
cat("OK\n")
cat("Test high level import with colClasses.\n")
cat("Force conversion of 5th column to numeric.\n")
colClasses <- rep(NA, length=6); colClasses[5] <- "numeric"
res <- read.xlsx(file, 2, colClasses=colClasses)
print(res) # force convesion to numeric
cat("OK\n")
cat("Test you can import sheet one column... \n")
res <- read.xlsx(file, "oneColumn", keepFormulas=TRUE)
if (ncol(res)==1) {cat("OK\n")} else {cat("FAILED!\n")}
cat("Check that you can import String formulas ... \n")
res <- read.xlsx(file, "formulas", keepFormulas=FALSE)
if (res[1,3]=="2010-1") {cat("OK\n")} else {cat("FAILED!\n")}
if (speedtest){
require(xlsx)
colClasses <- c("numeric", rep("character", 76))
res <- read.xlsx2("C:/Temp/ModelList.xlsx", sheetName="Models",
colClasses=colClasses)
}
cat("######################################################\n")
cat("Test read.xlsx2 ...\n")
cat("######################################################\n")
res <- read.xlsx2(file, sheetName="mixedTypes")
res <- read.xlsx2(file, sheetName="mixedTypes", colClasses=c(
"numeric", "character", rep("numeric", 4)))
res <- read.xlsx2(file, sheetName="mixedTypes", startRow=2, noRows=3)
cat("######################################################\n")
cat("Test low level import ...\n")
cat("######################################################\n")
file <- system.file("tests", filename, package="xlsx")
wb <- loadWorkbook(file)
sheets <- getSheets(wb)
sheet <- sheets[['deletedFields']]
cat("Check that you can extract only some rows (say 5) ...")
rows <- getRows(sheet, rowIndex=1:5)
cells <- getCells(rows)
res <- lapply(cells, getCellValue)
rr <- unique(sapply(strsplit(names(res), "\\."), "[[", 1))
stopifnot(identical(rr, c("1","2","3","4", "5")))
cat("OK\n")
cat("Check that you can extract only some columns (say 4) ...")
rows <- getRows(sheet)
cells <- getCells(rows, colIndex=1:4)
res <- lapply(cells, getCellValue)
cols <- unique(sapply(strsplit(names(res), "\\."), "[[", 2))
stopifnot(identical(cols, c("1","2","3","4")))
cat("OK\n")
cat("Check that you can extract a matrix (say 2:3, 1:3) ... \n")
sheet <- sheets[['mixedTypes']]
vv <- getMatrixValues(sheet, 2:3, 1:3)
print(vv)
cat("OK\n")
}
|
0dfcc76cff3d439e186ad7a71896d919f913d2b7
|
ddb8577b3318daf821903b16029c497944ef0fcf
|
/R/GenerarFicheroDistribucionTamagnos.R
|
bfe71e3ee7299b46fd1532aeea503d8aa17f560d
|
[] |
no_license
|
curromolero/libRadTran
|
4a7d3c0712b6644165e7e07db4b5ae966a451c26
|
212290f0706860139d09992fc221cc0408075122
|
refs/heads/master
| 2021-01-11T23:05:10.094123
| 2017-02-02T16:20:23
| 2017-02-02T16:20:23
| 78,545,284
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,394
|
r
|
GenerarFicheroDistribucionTamagnos.R
|
generarFicheroDistribucionTamagnos <- function(distTamagnosVolumen, fechaMedida, dirMedida) {
# Graba un fichero con la distribucion numerica calculada a partir de la de volumen proporcionada por AERONET
# Carga la funcion de ajuste requerida
source("R/fitAERONETDistribution_bimodal.R")
# Extrae la informacion de la estructura en lista
distActual <- data.frame(distTamagnosVolumen$tamagnos, distTamagnosVolumen$dV)
colnames(distActual) <- c('radius', 'dV/dlnr')
# Ajusta los modos de la distribucion en volumen y comprueba el resultado
fitResults <- fitAERONETDistribution_bimodal(distActual)
nuevoEjeX <- seq(-10, 3, by = 0.01)
modo1 <- fitResults$C1 * exp(-(nuevoEjeX-fitResults$mean1)**2/(2 * fitResults$sigma1**2))
modo2 <- fitResults$C2 * exp(-(nuevoEjeX-fitResults$mean2)**2/(2 * fitResults$sigma2**2))
# plot(log(distActual$radius), distActual$`dV/dlnr`, "b", xlab = "log particle radius, log(r)", ylab = "dV/dlogr")
# lines(nuevoEjeX, modo1, col = "red")
# lines(nuevoEjeX, modo2, col = "blue")
# Convierte la distribucion AERONET en volumen dV/dlnr en numero dN(r) / d lnr
NumberDist <- distActual$`dV/dlnr` / ((4/3)*pi*distActual$radius**3)
# Convierte los modos ajustados en volumen a dN(r) / d lnr
NumberDistMode1 <- modo1 / ((4/3)*pi*exp(nuevoEjeX)**3)
NumberDistMode2 <- modo2 / ((4/3)*pi*exp(nuevoEjeX)**3)
# Comprueba el resultado final
# plot(log(distActual$radius), log(NumberDist), "b", xlim=c(-6,2.5), ylim=c(-20, 5), xlab = "log particle radius, log(r)", ylab = "dN/dlogr (log scale)")
# lines(nuevoEjeX, log(NumberDistMode1), col = "black")
# lines(nuevoEjeX, log(NumberDistMode2), col = "blue")
# lines(nuevoEjeX, log(NumberDistMode1 + NumberDistMode2), col = "red")
# Suma los dos modos y prepara el data.frame para guardar el fichero
NumberDist_file <- data.frame(exp(nuevoEjeX), NumberDistMode1 + NumberDistMode2)
# Graba el fichero con nombre
ficheroINPUT_Dist <- paste(paste('distTamagnos', format(fechaMedida, '%Y%m%d_%H%M'), sep = '_'), 'dat', sep = '.')
dirINPUT_Dist <- file.path(dirMedida, ficheroINPUT_Dist, fsep = .Platform$file.sep)
write.table(NumberDist_file, file = dirINPUT_Dist, append = FALSE, quote = TRUE, sep = " ", eol = "\n", na = "NA",
dec = ".", row.names = FALSE, col.names = FALSE, qmethod = c("escape", "double"), fileEncoding = "")
return(ficheroINPUT_Dist)
}
|
2771348e4cd6e69848b01284e3d44410a04cbe17
|
911c1cf47c0f5caa2ab5090b7112bfea237bd5a7
|
/Gillespie-Algorithm.R
|
5d46d56f37298c1e379ec7fc2440a55d2ecb9c74
|
[] |
no_license
|
chewbacca89/Gillespie-Algorithm
|
17126c9201237eb093e406e037511eac1b587975
|
477d5c702efd4f47bd9c2ef5c5379e074ac9560e
|
refs/heads/master
| 2021-01-17T20:00:39.302532
| 2017-06-19T14:20:37
| 2017-06-19T14:20:37
| 65,720,789
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,559
|
r
|
Gillespie-Algorithm.R
|
# Gillespie-Algorithm
# catalyitcl degragation
########################################################################
Gillespie <- function(t0,Tend,X0,param,propensities){
# 1. Set initial time t=t0 and number of molecules X(t) = X0
t <- t0
X <- X0
t_vector <- t
X_matrix <- matrix(X,ncol=1)
while (t<Tend){
prop <- propensities(X,param)
a0 <- sum(prop$a)
# 2. Determine time to next reaction event ...
tau <- -1/a0*log(runif(1,min=0,max=1))
# ... and type of next reaction event
y <- runif(1,min=0,max=1)
mu <- 0
a_sum <- 0
while (a_sum<y){
mu <- mu+1
a_sum <- a_sum+prop$a[mu]/sum(prop$a)
}
# 4. Update time ...
t <- t+tau
t_vector <- c(t_vector,t)
# ... and number of molecules
X <- X+prop$nu[,mu]
X_matrix <- cbind(X_matrix,X)
} # 5. Go to 2. as long as t < Tend
return(list(t=t_vector,X=X_matrix))
}
########################################################################
start <- c(10,1) # (A,B)
params <- c(c1=0.5,c2=2)
propensitiesUsed <- function(state,param){
with(as.list(c(state,param)),{
# propensities
A <- state[1]
B <- state[2]
a1 <- c1*A*B
a2 <- c2
# stochiometric matrix
nu <- matrix(c(-1,0,1,0),ncol=2)
return(list(a=c(a1,a2),nu=nu))
})
}
G <- Gillespie(t0=0,Tend=200,X0=start,param=params,propensities=propensitiesUsed)
plot(G$t,G$X[1,],'s',xlab='time [sec]',ylab='number of molecules A')
########################################################################
|
b0b930e9f03a9c8425a438c5c6317c790276e885
|
35ba2c59410cebb39b5449178edfc3b431efe8e8
|
/code/DataAna.R
|
f07b5a8df051a59529e9b1fea4406b15d4600609
|
[
"MIT"
] |
permissive
|
AnnyKong/MathSummerScheduling
|
a5bafa2fecd0148262510cbe82f1d88a7fbae7d5
|
1c1e56e101403d6e217f602452b9f6e31e7977ef
|
refs/heads/master
| 2020-04-09T22:58:39.723647
| 2019-01-10T01:11:41
| 2019-01-10T01:11:41
| 160,643,853
| 0
| 0
|
MIT
| 2019-01-10T01:11:42
| 2018-12-06T08:32:50
|
Jupyter Notebook
|
UTF-8
|
R
| false
| false
| 10,268
|
r
|
DataAna.R
|
library(dplyr)
library(survival)
# predict years from 2014 to 2019
for (i in 2014:2019) {
# set predict year
predict_yr = i
yrN = predict_yr-2008
# initialize all fields
t_at <- c(110, 220, 940, 1050, 1200)
yr_at <- 2007+1:yrN
course = c(111, 120, 124, 125, 126, 307, 308, 309, 324)
weight <- c(seq(0.1,0.6,0.1), seq(0.8, 1.6, 0.2))[1:yrN]
weight <- weight / sum(weight)
t_ex <- expression(paste(1,":",10,"pm"), paste(2,":",20,"pm"),
paste(9,":",40,"am"), paste(10,":",50,"am"),
paste(12,":00","am"))
defa <- rep(0, length(t_at))
# set graph output working directory
setwd(paste("/Users/linni/Documents/MATH 381/MathSummerScheduling/Graph/",yr_at[yrN]+1,sep=""))
# input data
su <- read.csv("/Users/linni/Documents/MATH 381/MathSummerScheduling/data/su1.csv", header = TRUE)
# only use data before predicted year
su1 <- data.frame(su)[su$Yr<=yr_at[yrN],]
crsN <- read.csv(paste("/Users/linni/Documents/MATH 381/MathSummerScheduling/output/Stage_I_output",
predict_yr,".csv",
sep="_"),header = TRUE)
crsN <- data.frame(crsN)
initial(yrN)
# gives a brief summary of enrollment of all years before predicted year
initial <- function(yrN) {
png(paste("annual_enrol.png"))
par(mfrow=c(1,1))
total <- numeric(yrN)
for (i in 1:yrN) {
yr <- yr_at[i]
total[i] <- sum(su1[su1$Yr == yr,]$Current.Enrlmnt)
}
plot(yr_at, total,
main=paste("MATH Course Annual Total Enrollment in [ 2008 ~", yr_at[yrN], "]"),
ylim=c(0,max(total)+10), pch=20,
xlab="Year",ylab="Total Enrollment")
dev.off()
}
# draws graphs for prediction and past data
graph <- function(course) {
png(paste(course,"past_summary.png",sep="_"))
par(mfrow=c(2,2),oma=c(0,0,2,0))
bf <- data.frame("Section" = t_at, "> 0.8 LB" = defa,
"> 0.8 UB" = defa, "0.57~0.8 LB"= defa,
"0.57~0.8 UB" = defa,
"> 0.57 LB" = defa,
"> 0.57 UB" = defa)
names(bf)<-c("Section","> 0.8 LB","> 0.8 UB",
"0.57~0.8 LB","0.57~0.8 UB",
"> 0.57 LB","> 0.57 UB")
mc <- su1[su1$Crs.No == course,]
mc <- mc[mc$Current.Enrlmnt != 0,]
p<-numeric(yrN)
stu<-numeric(yrN)
for (i in 1:yrN) {
yr <- mc[mc$Yr == yr_at[i],]
p[i] <- as.integer(nrow(yr))
stu[i] <- sum(yr$Current.Enrlmnt)
}
plot(yr_at, stu,
main="Annual Total Enrollment", ylim=c(0,max(stu)+10),
xlab="Year",ylab="Total Enrollment", pch=20,xaxt="n")
axis(1, at = yr_at,labels = yr_at)
plot(yr_at, p, main="Annual Number of Sections",
xlab="Year",ylab="Number of Sections",
ylim=c(0,max(p)+1),pch=20)
mc$pch=20
mc$pch[mc$Yr>=(yr_at[yrN]-3)]=4
mc$Color="black"
mc$Color[mc$Yr>=yr_at[yrN]-3]="red"
occur<-numeric(yrN)
countS <- 0
for (i in t_at) {
sec <- mc[mc$Start.Time == i,]
occur[i] <- nrow(sec)
if (occur[i] == 0) {next}
countS = countS+1
}
barplot(table(mc$Start.Time),
ylim=c(0,max(occur,na.rm=TRUE)+3),
main="Frequency of Sections",
xlab="Start Time",
ylab="Number of Sections")
m <- su1[su1$Crs.No==course,]
mCE <- m$Enrlmnt.Percentage*40
x <- seq(0, 45, by = 1)
y<-dnorm(x, mean=mean(mCE), sd=sd(mCE))
z<-dpois(x, lambda=mean(mCE))
hist(m$Enrlmnt.Percentage*40,freq=FALSE,
main="Current Enrollment Distribution",
xlab="Current Enrollment",
xlim=c(0,45),
ylim=c(0,max(y,z)+0.01))
lines(y,col='red')
lines(z,col='blue')
legend("topleft",legend=c("normal", "poisson"),
col=c("red", "blue"), lty=1, cex=0.8)
title(paste("Past Data of MATH", course,"in [ 2008 ~", yr_at[yrN], "]"),
outer=TRUE)
dev.off()
png(paste(course,"pe_section.png",sep="_"))
plot(mc$Start.Time,mc$Enrlmnt.Percentage,
main=paste("MATH", course,
"Enrollment Percentages of Sections in [ 2008 ~", yr_at[yrN], "]"),
xlab="Start Time",
ylab="Enrollment Percentage",xaxt="n",
ylim=c(0,1), pch=mc$pch, col=mc$Color)
axis(1, at = t_at,labels = t_ex, cex.axis=0.8)
legend("bottomleft",
legend=c("cancel", "satisfied LB", "satisfied UB"),
col=c("green", "blue", "red"), lty=1, cex=0.75)
# axis(1, at = t_at,labels = t_ex, cex.axis=0.8)
abline(h=0.29,col='green')
abline(h=0.57,col='blue')
abline(h=0.80,col='red')
dev.off()
png(paste(course,"predict_hist.png",sep="_"))
if (countS <= 2) {
par(mfrow=c(2,1),oma=c(0,0,2,0))
} else if (countS <= 4) {
par(mfrow=c(2,2),oma=c(0,0,2,0))
} else {
par(mfrow=c(3,2),oma=c(0,0,2,0))
}
for (i in t_at) {
sec <- mc[mc$Start.Time == i,]
occur <- nrow(sec)
if (occur == 0) {next}
total <- 0
#sec_tot <- 0
for (a in 1:yrN) {
sec_e <- sec[sec$Yr == yr_at[a],]
e_yr <- sec_e$Enrlmnt.Percentage*40
if (length(e_yr)==0) {e_yr = 0}
total = total + e_yr * p[a] * weight[a]
}
mu<-total/(sum(p*weight))
ntrial<-1000
x<-numeric(ntrial)
k1 <- 0
k2 <- 0
k3 <- 0
for (j in 1:ntrial){
d <- rpois(occur, mu)
x[j] <- mean(d)/40
if (x[j] >= 0.57) {
k3 = k3 + 1
if (x[j] > 0.8) {
k1 = k1 + 1
} else { # (x[j] <= 0.8 && x[j] >= 0.57
k2 = k2 + 1
}
}
}
bf[bf$Section==i,]$"> 0.8 LB" =
cipoisson(k1,time=1000,p=0.95)[1]
bf[bf$Section==i,]$"> 0.8 UB" =
cipoisson(k1,time=1000,p=0.95)[2]
bf[bf$Section==i,]$"0.57~0.8 LB" =
cipoisson(k2,time=1000,p=0.95)[1]
bf[bf$Section==i,]$"0.57~0.8 UB" =
cipoisson(k2,time=1000,p=0.95)[2]
bf[bf$Section==i,]$"> 0.57 LB" =
cipoisson(k3,time=1000,p=0.95)[1]
bf[bf$Section==i,]$"> 0.57 UB" =
cipoisson(k3,time=1000,p=0.95)[2]
hist(x,main=paste("Start Time", i, "occur:", occur),
xlab="Predicted Enrolmnt",
xlim=c(0,1))
abline(v=0.29,col='green')
abline(v=0.57,col='blue')
abline(v=0.80,col='red')
text(0.29,1000, "0.29", pos = 2)
text(0.57, 1000, "0.57", pos = 2)
text(0.8,1000, "0.80", pos = 4)
}
legend("topleft",
legend=c("cancel", "satisfied LB", "satisfied UB"),
col=c("green", "blue", "red"), lty=1, cex=0.5)
title(paste("Prediction of MATH", course,"in", yr_at[yrN]+1),
outer=TRUE)
dev.off()
return(bf)
}
# make a priority queue for time sections of a course
priority <- function(course) {
bf <- graph(course)
bf <- bf[order(-bf$`> 0.8 LB`,
-bf$`0.57~0.8 LB`,
-bf$`> 0.57 LB`),]
for (i in t_at) {
m <- bf[bf$Section == i,]
num <- which(bf$Section == i)
if (m$`> 0.57 LB` < 0.57) {
bf<-bf[-num,]
}
}
print("Priority Queue:")
print(bf)
return(bf)
}
# output the prediction of time sections for a course
solution <- function(course) {
crsN <- read.csv(paste("/Users/linni/Documents/MATH 381/MathSummerScheduling/output/Stage_I_output",
predict_yr,".csv",
sep="_"),header = TRUE)
crsN <- data.frame(crsN)
num <- crsN[crsN$Course==course,]$Number
q <- priority(course)
sol <- data.frame("Section" = t_at, "Number" = defa)
now<-0
while (now < num) {
nonRep <- min(nrow(q),num-now)
for (i in 1:nonRep) {
sol[sol$"Section"==q$Section[i],]$"Number" =
sol[sol$"Section"==q$Section[i],]$"Number" + 1
now <- now + 1
}
if (now < num) {
sol[sol$"Section"==220,]$"Number" =
sol[sol$"Section"==220,]$"Number" + 1
now <- now + 1
}
}
cat('\n')
return(sol)
}
# output the compariso of real and prediction of time sections for a course
# if predicted year does not have past data, omit this part
validate <- function() {
if (predict_yr < 2019) {
sink(paste("/Users/linni/Documents/MATH 381/MathSummerScheduling/output/Stage_II_real_validate",yr_at[yrN]+1,".txt",sep="_"))
for (i in course) {
crs <- su[su$Crs.No==i,]
crs <- crs[crs$Yr==predict_yr,]
num <- crsN[crsN$Course==course,]$Number
real <- data.frame("Section" = t_at, "Number" = defa)
print(i)
count = numeric(5)
defa <- rep(0, length(t_at))
for (j in 1:5) {
a<-crs[crs$Start.Time==t_at[j]]
if (length(a) != 0) {
real[real$"Section"==t_at[j],]$"Number"=real[real$"Section"==t_at[j],]$"Number"+1
}
}
predict <- solution(i)
print(real)
print(paste("Comparison of MATH", i))
for (j in 1:5){
a<-crs[crs$Start.Time==t_at[j]]
if (length(a) != 0) {
diff <- real[real$"Section"==t_at[j],]$"Number" - predict[predict$"Section"==t_at[j],]$"Number"
if (diff == 0) {
print(paste(t_at[j],": has the same number"))
} else if (diff > 0) {
print(paste(t_at[j],": recommend to reduce", diff, "section(s)"))
} else { # diff < 0
print(paste(t_at[j],": recommend to increase", abs(diff), "section(s)"))
}
} else {
print(paste(t_at[j],": has no past data"))
}
}
cat("\n")
}
sink()
}
}
# produce outputs for all courses
produce <- function() {
sink(paste("/Users/linni/Documents/MATH 381/MathSummerScheduling/output/Stage_II_predict", yr_at[yrN]+1, ".txt",sep="_"))
course = c(111, 120, 124, 125, 126, 307, 308, 309, 324)
for (i in course) {
print(i)
print(solution(i))
cat('\n\n')
}
sink()
validate()
}
produce()
}
|
5699e8fd2dc9cb86738cf196f299e1b60b0f31fd
|
5bd4b82811be11bcf9dd855e871ce8a77af7442f
|
/gap/R/qtl2dplotly.R
|
114e1ce42c2ff0cf1bd532fad19733191a67690f
|
[] |
no_license
|
jinghuazhao/R
|
a1de5df9edd46e53b9dc90090dec0bd06ee10c52
|
8269532031fd57097674a9539493d418a342907c
|
refs/heads/master
| 2023-08-27T07:14:59.397913
| 2023-08-21T16:35:51
| 2023-08-21T16:35:51
| 61,349,892
| 10
| 8
| null | 2022-11-24T11:25:51
| 2016-06-17T06:11:36
|
R
|
UTF-8
|
R
| false
| false
| 2,588
|
r
|
qtl2dplotly.R
|
#' 2D QTL plotly
#'
#' @param d Data in qtl2dplot() format.
#' @param chrlen Lengths of chromosomes for specific build: hg18, hg19, hg38.
#' @param qtl.id QTL id.
#' @param qtl.prefix QTL prefix.
#' @param qtl.gene QTL gene.
#' @param target.type Type of target, e.g., protein.
#' @param TSS to use TSS when TRUE.
#' @param xlab X-axis title.
#' @param ylab Y-axis title.
#' @param ... Additional arguments, e.g., target, log10p, to qtl2dplot.
#'
#' @export
#' @return A plotly figure.
#'
#' @examples
#' \dontrun{
#' INF <- Sys.getenv("INF")
#' d <- read.csv(file.path(INF,"work","INF1.merge.cis.vs.trans"),as.is=TRUE)
#' r <- qtl2dplotly(d)
#' htmlwidgets::saveWidget(r,file=file.path(INF,"INF1.qtl2dplotly.html"))
#' r
#' }
qtl2dplotly <- function(d, chrlen=gap::hg19, qtl.id="SNPid:", qtl.prefix="QTL:", qtl.gene="Gene:", target.type="Protein",
TSS=FALSE, xlab="QTL position", ylab="Gene position",...)
{
n <- CM <- snpid <- pos_qtl <- pos_gene <- target_gene <- lp <- chr1 <- pos1 <- chr2 <- pos2 <- target <- gene <- value <- cistrans <- y <- NA
t2d <- qtl2dplot(d, chrlen, TSS=TSS, plot=FALSE, ...)
n <- with(t2d, n)
CM <- with(t2d, CM)
tkvals <- tktxts <- vector()
for (x in 1:n) {
tkvals[x] <- ifelse(x == 1, CM[x]/2, (CM[x] + CM[x-1])/2)
tktxts[x] <- xy(x)
}
t2d_pos <- with(t2d, data) %>%
dplyr::mutate(snpid=paste(qtl.id,id),
pos_qtl=paste0(qtl.prefix,chr1,":",pos1),
pos_gene=paste0(qtl.gene,chr2,":",pos2),
target_gene=paste0(target.type, " (gene): ", target, " (", gene, ")"),
lp=paste("value:", value),
text=paste(snpid, pos_qtl, pos_gene, target_gene, lp, sep="\n")) %>%
dplyr::select(x,y,cistrans,text)
axes <- list(tickmode = "array",
tick0 = 1,
dtick = 1,
ticklen = 1,
tickwidth = 0,
tickfont = list(family = "arial", size = 12, color = "#7f7f7f"),
tickvals = tkvals,
ticktext = tktxts)
xaxis = c(title = xlab, axes)
yaxis = c(title = ylab, axes)
cols <- c('#BF382A','#0C4B8E')
# cols <- setNames(cols, c("cis","trans"))
fig <- with(t2d_pos,
plotly::plot_ly(x = ~x, y = ~y, color = ~cistrans, colors = cols,
marker=list(size=11), mode="markers") %>%
plotly::add_markers(type="scatter",text=text) %>%
plotly::layout(xaxis=xaxis, yaxis=yaxis, showlegend = TRUE))
}
|
0e12286362d6e667bd4877c7ea30c3be76dfc6ef
|
df8c454c8782a8677b5a0978850c3d442ec2de02
|
/binomial.Rcheck/00_pkg_src/binomial/R/bin_variable_functions.R
|
6a635adaa825e916e36dca4dbe43cb2abc0e36cd
|
[] |
no_license
|
stat133-sp19/hw-stat133-anacomesana
|
982681dc430da554c17e10b1d965cb0f81c20e4a
|
d7a104a0d45d9b70a4ba0f030bd7d86b770acb57
|
refs/heads/master
| 2020-04-28T20:33:46.618417
| 2019-05-04T01:09:17
| 2019-05-04T01:09:17
| 175,548,868
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,765
|
r
|
bin_variable_functions.R
|
#' @title Binomial Random Variable function
#' @description Returns object of class binvar
#' that is a list of number of trials,
#' and probability of success
#' @param trials number of trials (numeric)
#' @param prob probability of success (numeric)
#' @return list of number of trials
#' and probability of success with class binvar
#' @export
#' @examples
#' bin_variable(trials=5,prob=0.5)
bin_variable <- function(trials,prob) {
check_trials(trials)
check_prob(prob)
our_ls <- list(
'trials' = trials,
'prob' = prob)
class(our_ls) = 'binvar'
return(our_ls)
}
# Print Binomial Variable
#' @export
print.binvar <- function(distr, ...) {
cat(paste0('"Binomial variable"\n \nParameters \n- number of trials: ',
distr$trials,
'\n- prob of success: ',distr$prob))
}
# Summary Binomial Variable
#' @export
summary.binvar <- function(distr) {
sum_ls = list(
'trials' = distr$trials,
'prob' = distr$prob,
'mean' = aux_mean(distr$trials,distr$prob),
'variance' = aux_variance(distr$trials,distr$prob),
'mode' = aux_mode(distr$trials,distr$prob),
'skewness' = aux_skewness(distr$trials,distr$prob),
'kurtosis' = aux_kurtosis(distr$trials,distr$prob)
)
class(sum_ls) = 'summary.binvar'
return(sum_ls)
}
# Print Summary Binomial Variable
#' @export
print.summary.binvar <- function(sm) {
cat(format(paste0(print.binvar(sm),
'\n\nMeasures\n- mean: ',
sm$mean,
'\n- variance:',
sm$variance,
'\n- mode: ',
sm$mode,
'\n- skewness: ',
sm$skewness,
'\n- kurtosis: ',
sm$kurtosis)
)
)
}
|
bbb482750c3efbe658eab048069364d5b3c64e09
|
3a9f24c8e1540544ef7fae443bbccbf06ea8fb3d
|
/dump/enrichment_msigdb.R
|
3d42da575c63d0eb92acda76491eb2e71bb2dcb9
|
[] |
no_license
|
gaiatek/illupipe
|
7116664fe3079d172d4e7d9f2d07c433652a93f3
|
6679b218181f00c81d74071ab57c38b688bfbab8
|
refs/heads/master
| 2020-05-17T18:35:09.724662
| 2012-07-12T15:38:41
| 2012-07-12T15:38:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,609
|
r
|
enrichment_msigdb.R
|
######################################################################
# annotation enrichment.of gene a cluster list #
# cmethod is passed to p.adjust #
# all : list of all genes (SYMBOL) on the array #
# clusters : list of vectors of SYMBOLs of the genes in each cluster #
# pathways output of #
######################################################################
gene.annotation.enrichment <- function(all,clusters,pathways,cmethod="BH")
{
p <- list()
# preprocessing: compile list of all genes in any pathway
all.genes.pw <- list()
for (i in 1:length(pathways))
{
genes.pw <- c(pathways[i],recursive=TRUE)
genes.pw <- genes.pw[3:length(genes.pw)]
all.genes.pw <- c(all.genes.pw,genes.pw)
}
all.genes.pw <- unique(all.genes.pw)
g <- length(intersect(all,all.genes.pw))
text<-paste("Testing ",length(clusters)," Clusters and",length(all.genes.pw)," Names in ",length(pathways)," Sets.")
message(text)
# calculate p-values
for (i in 1:length(clusters))
{
p_clust <- vector()
cluster <- unique(clusters[[i]])
n <- length(intersect(all.genes.pw,cluster))
for (j in 1:length(pathways))
{
genes.pw <- c(pathways[j],recursive=TRUE)
genes.pw <- genes.pw[3:length(genes.pw)]
f <- length(intersect(all,genes.pw))
ov <- length(intersect(genes.pw,cluster))
# overlap <- c(overlap,ov)
p_val <- phyper(ov-1, f, g-f, n ,lower.tail=FALSE)
if (p_val<0) # bug in R 2.7.0
{
text <- paste("Returned negative p-value: phyper(", ov-1, "," , f, "," , g-f, "," ,n, ",lower.tail=FALSE)")
warning(text)
}
p_clust <- c(p_clust,p_val)
}
p_clust <- p.adjust(as.vector(p_clust),method=cmethod)
p <- c(p,list(p_clust))
}
p
}
# load MSigDb *.gmt files
load.pathways <- function(filename)
{
path.size <- count.fields(file=filename, sep = "\t", quote = "", skip = 0,blank.lines.skip = TRUE, comment.char = "")
pathways <- list(scan(file=filename,what="character",sep="\t",quote="",n=path.size[1],strip.white = TRUE,quiet=TRUE))
for (i in 2:length(path.size))
{
pw <- list(scan(file=filename,what="character",sep="\t",quote="",skip=i-1,n=path.size[i],strip.white = TRUE,quiet=TRUE))
pathways <- c(pathways,pw)
}
pathways
}
get.pathnames.clean <- function(enrich,pathways,max_log_p)
{
pathnames <- vector()
for (i in 1:length(enrich))
{
d <- log10(c(enrich[i],recursive=TRUE))
path_indices <- which( (d < max_log_p) & (d != -Inf) )
cpath <-vector()
for (j in 1:length(path_indices))
{
name <- c(pathways[path_indices[j] ],recursive=TRUE)[1]
cpath <- c(cpath,name)
}
if (length(path_indices)>0){ pathnames <- c(pathnames,cpath)}
}
pathnames
}
get.pvalues.clean <- function(enrich,pathways,max_log_p)
{
pvalVec=vector()
for (i in 1:length(enrich))
{
d <- log10(c(enrich[i],recursive=TRUE))
path_indices <- which( (d < max_log_p) & (d != -Inf) )
pp=vector()
for (j in 1:length(path_indices))
{
pval<-c(enrich[[i]][path_indices[j]],recursive=TRUE)
pp<-c(pp,pval)
}
if (length(path_indices)>0){ pvalVec <- c(pvalVec,pp)}
}
pvalVec
}
get.gene.set<- function(name,pathways)
{
for (path in pathways)
{
if (path[1]==name)
return(path[3:length(path)])
}
}
|
96e785cdadbd6dd64b698f38e8347c19a47792ee
|
3db55642beea2f852d0871bcad36f37752704c28
|
/sracppingTables.R
|
bc383e574e38fb6427f52c09d12741d6cb054299
|
[] |
no_license
|
Varma45/rProgramming
|
bb6f7db6e282ad23763d6fa1ed367f8a5a5210ae
|
89ea7392f2e8da4d4c3e0e7213c1cdecb78f1f1c
|
refs/heads/master
| 2021-01-20T08:13:34.798699
| 2017-06-16T11:10:15
| 2017-06-16T11:10:15
| 90,113,607
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 416
|
r
|
sracppingTables.R
|
library(rvest)
link <- "http://www.indiaonlinepages.com/population/state-wise-population-of-india.html"
download.file(link,destfile = "D://dc++//workspace 2//state.HTML")
dlink <- "D://dc++//workspace 2//state.HTML"
read_link <- read_html(dlink)
pop_html <- html_nodes(read_link,xpath = "/html/body/center[2]/table/tbody/tr/td[1]/center[2]/table/tbody")
pop_data <- html_table(pop_html)
head(pop_data)
pop_data
|
866132134f7cd3919cf287a49f6f2ca7f7ebc0b7
|
b919835ccb1757a3b37326acd28d799cc123c5b0
|
/Practica2/p2-ensemble.R
|
865506b9ee2b50a9a03090015911cf7fd5f6b218
|
[
"MIT"
] |
permissive
|
Carlosma7/SIGE
|
80fb510bb1c7bcec3e036ad4ba8ae8e651e17edb
|
6975ebfa29ed89004be01f2b0759f92f6c14be01
|
refs/heads/main
| 2023-06-12T06:24:27.368223
| 2021-07-10T15:39:10
| 2021-07-10T15:39:10
| 384,730,939
| 0
| 1
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 3,834
|
r
|
p2-ensemble.R
|
## -------------------------------------------------------------------------------------
## Sistemas Inteligentes para la Gestión en la Empresa
## Curso 2020-2021
## Pablo Alfaro Goicoechea
## Carlos Morales Aguilera
## Técnica de ensamblamiento de modelos de redes neuronales
## -------------------------------------------------------------------------------------
# Carga de bibliotecas
library(keras)
# Carga de datos
dataset_dir <- './data/images/medium10000_twoClasses/'
train_images_dir <- paste0(dataset_dir, 'train')
val_images_dir <- paste0(dataset_dir, 'val')
test_images_dir <- paste0(dataset_dir, 'test')
# Generadores de imágenes (reescalado)
train_images_generator <- image_data_generator(rescale = 1/255)
val_images_generator <- image_data_generator(rescale = 1/255)
test_images_generator <- image_data_generator(rescale = 1/255)
# Definición de flujos de imágenes con generadores
train_generator_flow <- flow_images_from_directory(
directory = train_images_dir,
generator = train_images_generator,
class_mode = 'binary',
batch_size = 128,
target_size = c(64, 64) # (w x h) --> (64 x 64)
)
validation_generator_flow <- flow_images_from_directory(
directory = val_images_dir,
generator = val_images_generator,
class_mode = 'binary',
batch_size = 128,
target_size = c(64, 64) # (w x h) --> (64 x 64)
)
test_generator_flow <- flow_images_from_directory(
directory = test_images_dir,
generator = test_images_generator,
class_mode = 'binary',
batch_size = 128,
target_size = c(64, 64) # (w x h) --> (64 x 64)
)
# Extracción de características red VGG16, con ImageNet
vgg16_base <- application_vgg16(
weights = "imagenet",
include_top = FALSE,
input_shape = c(64, 64, 3)
)
# Extracción de características red MobileNet, con ImageNet
mobile_base <- application_mobilenet_v2(
weights = "imagenet",
include_top = FALSE,
input_shape = c(64, 64, 3)
)
# Congelar las capas convolutivas
freeze_weights(vgg16_base)
freeze_weights(mobile_base)
# Crear modelo ensemble
model_input <- layer_input(shape=c(64, 64, 3))
model_list <- c(vgg16_base(model_input) %>% layer_flatten(),
mobile_base(model_input) %>% layer_flatten())
model_output <- layer_concatenate(model_list) %>%
layer_dense(units = 512, activation = "relu") %>%
layer_dense(units = 256, activation = "relu") %>%
layer_dense(units = 1, activation = "sigmoid")
model <- keras_model(
inputs = model_input,
outputs = model_output
)
# Compilación del modelo
model %>% compile(
optimizer = optimizer_rmsprop(lr = 2e-5),
loss = "binary_crossentropy",
metrics = c("accuracy")
)
# Entrenamiento del modelo
start_time <- Sys.time()
history <- model %>%
fit_generator(
train_generator_flow,
steps_per_epoch = 10,
epochs = 3,
validation_data = validation_generator_flow
)
plot(history)
# Evaluación modelo
metrics <- model %>%
evaluate_generator(test_generator_flow, steps = 5)
end_time <- Sys.time()
message(" loss: ", metrics[1])
message(" accuracy: ", metrics[2])
message(" time: ", end_time - start_time)
# Evaluación mediante matriz de confusión
predictions <- predict_generator(model, test_generator_flow, steps = 4)
y_true <- test_generator_flow$classes
y_pred <- ifelse(predictions[,1] > 0.5, 1, 0)
cm <- confusionMatrix(as.factor(y_true), as.factor(y_pred))
cm_prop <- prop.table(cm$table)
plot(cm$table)
cm_tibble <- as_tibble(cm$table)
ggplot(data = cm_tibble) +
geom_tile(aes(x=Reference, y=Prediction, fill=n), colour = "white") +
geom_text(aes(x=Reference, y=Prediction, label=n), colour = "white") +
scale_fill_continuous(trans = 'reverse')
|
e47e16d69282bde5c5481cf34f21fcec528c7b03
|
a9dcf0ded2eea7916b9530e3ff659b3495f316b8
|
/shelter_competition.R
|
f2696488da296b67b6872a2a1b772274e9668782
|
[] |
no_license
|
JT-R/shelter_animal_outcomes
|
878a53b4616987d9f052f53bf41488ea138ee055
|
8b18f6da3bec7b37ba4db9acdb4797397d5359fe
|
refs/heads/master
| 2020-12-04T08:17:25.992958
| 2016-06-23T16:21:38
| 2016-06-23T16:21:38
| 67,886,872
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,404
|
r
|
shelter_competition.R
|
source("utilities.R")
labeled_data <- LoadData("train.csv")
testing <- LoadData("test.csv")
sample_submission <- LoadData("sample_submission.csv")
processed_datasets <- PreprocessDatasets(labeled_data, testing)
labeled_data <- processed_datasets[['labeled_data']]
testing <- processed_datasets[['testing']]
######
# TO PONIZEJ BYLO NAJLEPSZE W DRUGIM SUBMISSION:
######
# model <- multinom(OutcomeType ~ Sex + Has_Name + DaysUponOutcome + AnimalType + Fixed_Breed + TimeOfTheDay + Season + Year + Month + Day, data = labeled_data)
# FitUnknownLevelsToModel(dataset = testing, model = model)
#
# predictions <- data.table(predicted_class = predict(model, newdata = testing, type = "probs"))
# setnames(predictions, colnames(predictions), gsub(pattern = "predicted_class.", replacement = "", x = colnames(predictions), fixed=TRUE))
# setcolorder(predictions, neworder = order(colnames(predictions)))
# predictions <- data.table(ID = testing$ID, predictions)
# write.csv(x = predictions, file = "predictions.csv", row.names=FALSE, quote=FALSE)
#####
model <- multinom(OutcomeType ~ Has_Name + DaysUponOutcome + Fixed_Breed + TimeOfTheDay +
Season + Year + IsMix + IsDomestic + Sex*IsSterilized + HairLength +
NumberOfColors + FirstColor + AnimalType*AgeCategory + IsWeekend + YearPart, data = labeled_data)
FitUnknownLevelsToModel(dataset = testing, model = model)
predictions <- data.table(predicted_class = predict(model, newdata = testing, type = "probs"))
setnames(predictions, colnames(predictions), gsub(pattern = "predicted_class.", replacement = "", x = colnames(predictions), fixed=TRUE))
setcolorder(predictions, neworder = order(colnames(predictions)))
final_multinomial_predictions <- data.table(ID = testing$ID, predictions)
write.csv(x = final_multinomial_predictions, file = "mult_predictions.csv", row.names=FALSE, quote=FALSE)
model_formula <- as.formula("~Sex + Has_Name + DaysUponOutcome +
AnimalType + Fixed_Breed + TimeOfTheDay + Season +
Year + Month + Day + IsDomestic + IsMix -1")
model_formula <- as.formula("~ Has_Name + DaysUponOutcome + Fixed_Breed + TimeOfTheDay +
Season + Year + IsMix + IsDomestic + Sex + IsSterilized + HairLength +
NumberOfColors + FirstColor + AnimalType + AgeCategory + IsWeekend + YearPart -1")
# submission 11:
# predictors <- c("Has_Name","DaysUponOutcome","Fixed_Breed","TimeOfTheDay","Season","Year","IsMix","IsDomestic","Sex",
# "IsSterilized","HairLength","NumberOfColors","FirstColor","AnimalType","AgeCategory","IsWeekend","YearPart")
predictors <- c("Has_Name","DaysUponOutcome","Breed","TimeOfTheDay","Season","Day","Year","IsMix","IsDomestic","Sex",
"IsSterilized","HairLength","NumberOfColors","FirstColor","AnimalType","AgeCategory","IsWeekend","YearPart")
xgb_outcome <- as.numeric(as.factor(labeled_data$OutcomeType))-1
xgb_labeled <- xgb.DMatrix(data.matrix(labeled_data[,predictors, with=FALSE]), label = xgb_outcome) # the training set
xgb_testing <- xgb.DMatrix(data.matrix(testing[,predictors, with=FALSE]))
model <- xgb.train(data = xgb_labeled,
label = xgb_outcome,
num_class = 5,
nrounds = 160,
eta = 0.05,
nthreads = 4,
#max.depth = 6,
watchlist = list(train = xgb_labeled),
objective = "multi:softprob",
eval_metric = "mlogloss"
)
predictions <- predict(model, xgb_testing)
predictions <- data.table(t(matrix(predictions, nrow = 5, ncol = nrow(testing))))
#setnames(predictions, colnames(predictions), c("Euthanasia", "Adoption", "Died", "Return_to_owner", "Transfer"))
#setnames(predictions, colnames(predictions), c('Adoption', 'Died', 'Euthanasia', 'Return_to_owner', 'Transfer'))
setnames(predictions, colnames(predictions), c('Euthanasia', 'Adoption', 'Died', 'Return_to_owner', 'Transfer'))
setcolorder(predictions, neworder = order(colnames(predictions)))
final_xgb_pred <- predictions
final_xgb_pred <- data.table(ID = testing$ID, final_xgb_pred)
write.csv(x = final_xgb_pred, file = "xgb_predictions.csv", row.names=FALSE, quote=FALSE)
hist(final_xgb_pred$Adoption)
hist(final_xgb_pred$Died)
hist(final_xgb_pred$Euthanasia)
hist(final_xgb_pred$Return_to_owner)
hist(final_xgb_pred$Transfer)
validation_datasets <- CreateValidationDatasets(labeled_set, validation)
training <- validation_datasets[['training']]
validation <- validation_datasets[['validation']]
actual_classes <- data.table(model.matrix(~ OutcomeType -1, data = validation))
xgb_sample_outcome <- as.numeric(as.factor(training$OutcomeType))-1
xgb_validation_outcome <- as.numeric(as.factor(validation$OutcomeType))-1
# predictors <- c("Has_Name","DaysUponOutcome","Fixed_Breed","TimeOfTheDay","Season","Year","IsMix","IsDomestic","Sex",
# "IsSterilized","HairLength","NumberOfColors","FirstColor","AnimalType","AgeCategory","IsWeekend","YearPart")
# predictors <- c("Has_Name","DaysUponOutcome","Fixed_Breed","TimeOfTheDay","Month","Day","Year","IsMix","IsDomestic","Sex",
# "IsSterilized","HairLength","NumberOfColors","FirstColor","SecondColor","AnimalType","AgeCategory","IsWeekend","YearPart")
predictors <- c("Has_Name","DaysUponOutcome","Fixed_Breed","TimeOfTheDay","Month","Day","Year","IsMix","IsDomestic","Sex",
"IsSterilized","HairLength","NumberOfColors","FirstColor","SecondColor","AnimalType","AgeCategory","IsWeekend","YearPart")
predictors <- c(predictors, colnames(training)[grepl("stype_", colnames(training))])
predictors <- c("Has_Name","DaysUponOutcome","Fixed_Breed","TimeOfTheDay","Year","Month","Day","IsMix","IsDomestic","Sex",
"IsSterilized","FirstColor","SecondColor","AnimalType","AgeCategory")
predictors <- c(predictors, colnames(training)[grepl("stype_", colnames(training))])
predictors <- c("Has_Name","DaysUponOutcome","Breed","Month","Day","Year","IsMix","IsDomestic","Sex",
"IsSterilized","HairLength","FirstColor","AnimalType")
xgb_training <- xgb.DMatrix(data.matrix(training[,predictors, with=FALSE]), label = xgb_sample_outcome) # the training set
xgb_validation <- xgb.DMatrix(data.matrix(validation[,predictors, with=FALSE]), label = xgb_validation_outcome)
xgb_model <- xgb.train(data = xgb_training,
label = xgb_sample_outcome,
num_class = 5,
nrounds = 80,
eta = 0.1,
min_child_weight = 0.1,
nthreads = 4,
max.depth = 7,
watchlist = list(train = xgb_training, eval = xgb_validation),
objective = "multi:softprob",
eval_metric = "mlogloss"
)
predictions <- predict(xgb_model, xgb_validation)
predictions <- data.table(t(matrix(predictions, nrow = 5, ncol = nrow(validation))))
# setnames(predictions, colnames(predictions), c("Euthanasia", "Adoption", "Died", "Return_to_owner", "Transfer"))
setnames(predictions, colnames(predictions), c('Euthanasia', 'Adoption', 'Died', 'Return_to_owner', 'Transfer'))
#setcolorder(predictions, neworder = order(colnames(predictions)))
MultiLogLoss(act = as.matrix(actual_classes), pred = as.matrix(predictions))
xgb_pred <- predictions
#actual_classes <- data.table(model.matrix(~ OutcomeType -1, data = validation))
xgb_imp <- xgb.importance(feature_names = predictors,
model = xgb_model)
xgb.plot.importance(xgb_imp)
xgb_classes <- apply(xgb_pred, 2, function(x) ifelse(x>median(x), 1, 0))
xgb_tables <- vector(mode = "list", 5)
xgb_tables[[1]] <- table(xgb_classes[,"Adoption"], validation$Adoption)
xgb_tables[[2]] <- table(xgb_classes[,"Died"], validation$Died)
xgb_tables[[3]] <- table(xgb_classes[,"Euthanasia"], validation$Euthanasia)
xgb_tables[[4]] <- table(xgb_classes[,"Return_to_owner"], validation$Return_to_owner)
xgb_tables[[5]] <- table(xgb_classes[,"Transfer"], validation$Transfer)
xgb_tables
xgb_accuracy <- sapply(xgb_tables, function(x) sum(diag(x))/sum(x))
xgb_accuracy
#
# validation_datasets <- CreateValidationDatasets(labeled_set, validation)
# training <- validation_datasets[['training']]
# validation <- validation_datasets[['validation']]
# mult_model <- multinom(OutcomeType ~ Sex + Has_Name + DaysUponOutcome + AnimalType + Fixed_Breed, data = training)
mult_model <- multinom(OutcomeType ~ Has_Name + DaysUponOutcome + Fixed_Breed + TimeOfTheDay +
Season + Year + IsMix + IsDomestic + Sex*IsSterilized + HairLength +
NumberOfColors + FirstColor + AnimalType*AgeCategory + IsWeekend + YearPart,
data = training, MaxNWts = 10000)
FitUnknownLevelsToModel(dataset = validation, model = mult_model)
predictions <- data.table(predicted_class = predict(mult_model, newdata = validation, type = "probs"))
setnames(predictions, colnames(predictions), gsub(pattern = "predicted_class.", replacement = "", x = colnames(predictions), fixed=TRUE))
setcolorder(predictions, neworder = order(colnames(predictions)))
mult_pred <- predictions
#predictions <- data.table(ID = validation$AnimalID, predictions)
setnames(actual_classes, colnames(actual_classes), gsub(pattern = "OutcomeType", replacement = "", x = colnames(actual_classes), fixed=TRUE))
setcolorder(actual_classes, neworder = order(colnames(actual_classes)))
MultiLogLoss(act = as.matrix(actual_classes), pred = as.matrix(predictions))
#
MultiLogLoss(act = as.matrix(actual_classes), pred = as.matrix(actual_classes))
mult_classes <- apply(mult_pred, 2, function(x) ifelse(x>median(x), 1, 0))
mult_tables <- vector(mode = "list", 5)
mult_tables[[1]] <- table(mult_classes[,"Adoption"], validation$Adoption)
mult_tables[[2]] <- table(mult_classes[,"Died"], validation$Died)
mult_tables[[3]] <- table(mult_classes[,"Euthanasia"], validation$Euthanasia)
mult_tables[[4]] <- table(mult_classes[,"Return_to_owner"], validation$Return_to_owner)
mult_tables[[5]] <- table(mult_classes[,"Transfer"], validation$Transfer)
mult_tables
mult_accuracy <- sapply(mult_tables, function(x) sum(diag(x))/sum(x))
mult_accuracy
xgb_pred
mult_pred
combined_preds <- (xgb_pred + mult_pred)/2
MultiLogLoss(act = as.matrix(actual_classes), pred = as.matrix(xgb_pred))
MultiLogLoss(act = as.matrix(actual_classes), pred = as.matrix(mult_pred))
MultiLogLoss(act = as.matrix(actual_classes), pred = as.matrix(combined_preds))
png(filename = "scorings_histograms.png", width = 1024, height = 768)
par(mfrow=c(5,3))
hist(xgb_pred$Adoption)
hist(mult_pred$Adoption)
hist(final_pred$Adoption)
hist(xgb_pred$Died)
hist(mult_pred$Died)
hist(final_pred$Died)
hist(xgb_pred$Euthanasia)
hist(mult_pred$Euthanasia)
hist(final_pred$Euthanasia)
hist(xgb_pred$Return_to_owner)
hist(mult_pred$Return_to_owner)
hist(final_pred$Return_to_owner)
hist(xgb_pred$Transfer)
hist(mult_pred$Transfer)
hist(final_pred$Transfer)
dev.off()
plot(xgb_pred$Adoption, mult_pred$Adoption)
plot(xgb_pred$Died, mult_pred$Died)
plot(xgb_pred$Euthanasia, mult_pred$Euthanasia)
plot(xgb_pred$Return_to_owner, mult_pred$Return_to_owner)
plot(xgb_pred$Transfer, mult_pred$Transfer)
|
e035cd6af0e55cec525d3e7c05d33f40d675139b
|
d6f23784a430db1c98ab2960584b387d98835bf8
|
/example/visualize_baba_path.R
|
6fab8fde2569bf19648efde2b5a735dde3e578cc
|
[] |
no_license
|
jackkamm/baba
|
52832c1cfd3e7ffb738c3abc02354480490da852
|
e8507a06b5f847a2d312049f170111889dfea49a
|
refs/heads/master
| 2021-10-27T13:49:25.691762
| 2019-04-17T14:22:37
| 2019-04-17T14:22:37
| 82,648,795
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,889
|
r
|
visualize_baba_path.R
|
library(shiny)
library(tidyr)
library(dplyr)
library(ggplot2)
library(ape)
library(jsonlite)
library(gplots)
library(phytools)
get_limits <- function(vec){
c(min(vec), max(vec))
}
plot_baba_components <- function(inferredComponents, sparsityLvl){
sparsity_levels = unique(inferredComponents$sparsity)
sparsity_idx <- which.min(abs(sparsityLvl - sparsity_levels))
sparsityLvl <- sparsity_levels[sparsity_idx]
inferredComponents %>%
filter(sparsity == sparsityLvl) ->
currComponents
if (max(currComponents$ComponentWeight) > 0) {
currComponents %>%
filter(ComponentWeight > 0) ->
currComponents
}
currComponents %>%
ggplot(aes(x=Component, label=Population,
y=Population, size=PopulationWeight*ComponentWeight,
color=ComponentWeight, alpha=PopulationWeight)) +
geom_text() +
scale_x_continuous(expand=c(.1,0)) +
scale_alpha_continuous(limits = get_limits(inferredComponents$PopulationWeight)) +
scale_size_continuous(limits=get_limits(inferredComponents$PopulationWeight *
inferredComponents$ComponentWeight)) +
scale_color_continuous(limits=get_limits(inferredComponents$ComponentWeight),
low="blue", high="red") +
facet_grid(Leaf ~ .) +
ggtitle(paste("sparsity = ", sparsityLvl))
}
read_baba_path <- function(dirname) {
fnames <- list.files(dirname, pattern=".*_decomposition.txt")
do.call(rbind, lapply(paste(dirname, fnames, sep="/"), read.table, head=T))
}
shiny_baba_path <- function(inferredComponents, logStep) {
sparsity_levels = unique(inferredComponents$sparsity)
if (!logStep) {
minSparsity = round(min(sparsity_levels), 2)
maxSparsity = round(max(sparsity_levels), 2)
sparsity_step = round((max(sparsity_levels) - min(sparsity_levels)) / (length(sparsity_levels)-1), 2)
} else {
log_sparsity_levels <- log10(sparsity_levels)
min_logSparsity = round(min(log_sparsity_levels), 2)
max_logSparsity = round(max(log_sparsity_levels), 2)
logSparsity_step = round((max(log_sparsity_levels) -
min(log_sparsity_levels)) /
(length(log_sparsity_levels)-1), 2)
}
plotsBySparsLvl <- lapply(sparsity_levels, function(sparsLvl) {
inferredComponents %>%
plot_baba_components(sparsLvl) +
theme(legend.position="bottom", text=element_text(size=16),
axis.text=element_text(size=10))
})
if (logStep) {
slider <- sliderInput(
"logSparsity",
"logSparsity =",
min = min_logSparsity,
max = max_logSparsity,
step = logSparsity_step,
value = mean(c(min_logSparsity, max_logSparsity)))
} else {
slider <- sliderInput("sparsity",
"sparsity =",
min = minSparsity,
max = maxSparsity,
step = sparsity_step,
value = mean(c(minSparsity, maxSparsity)))
}
ui <- fluidPage(
#titlePanel("sparse quartet decomposition"),
slider, plotOutput("distPlot", height = "600px")
)
server <- function(input, output) {
output$distPlot <- renderPlot({
# find the nearest sparsity level
if (logStep) {
idx = which.min(abs(input$logSparsity - log_sparsity_levels))
} else {
idx = which.min(abs(input$sparsity - sparsity_levels))
}
plotsBySparsLvl[[idx]]
})
}
# Run the application
shinyApp(ui = ui, server = server)
}
inferredComponents <- read_baba_path("fit_baba_path_results")
shiny_baba_path(inferredComponents, logStep=TRUE)
plot_baba_components(inferredComponents, 630) +
theme(legend.position="bottom", text=element_text(size=16),
title=element_text(size=10), axis.text=element_text(size=8), legend.text=element_text(size=10),
legend.key.width=grid::unit(2, "line"))
ggsave("baba_path_raw.png")
|
c6238e2f1256974b61f9afee950b7d6f82d1f3bf
|
73b5d6d0399b6ca3e1eed0f8b95f2cd146d345b9
|
/plot_3.R
|
c53a61f55f89078300189cd0261a55fd60fb9df0
|
[] |
no_license
|
mtelesha/ExData_PeerAssessment2
|
b342761909aced48c0f78896bb2d1e0a0be7ab02
|
63105e2ff0c7a50a94d06cbaa10bef2cc2182e24
|
refs/heads/master
| 2021-01-18T12:45:53.893583
| 2015-03-22T07:59:54
| 2015-03-22T07:59:54
| 32,666,595
| 0
| 0
| null | 2015-03-22T07:35:34
| 2015-03-22T07:35:33
| null |
UTF-8
|
R
| false
| false
| 585
|
r
|
plot_3.R
|
# plot #3
# libraries
library(dplyr)
library(ggplot2)
options(scipen=999) # turn off scientfic numbers
# load data files
nei <- readRDS("./data/summarySCC_PM25.rds")
scc <- readRDS("./data/Source_Classification_Code.rds")
# answer the question
question_3 <- nei %>%
filter(fips == "24510") %>%
group_by(year, type) %>%
summarise(sum = sum(Emissions))
# create the plot
ggplot(data = question_3, aes(x = year, y = sum, group = type, color = type)) +
geom_line(size = 1.2) +
geom_point(size = 5)
# save the last plot
ggsave("plot_3.png", width = 4, height = 3)
|
3ef6ff1e16a6adbb343d551426c3abb3a79cff92
|
6694b8995f2d33a7b7daef7f7c999b9cc4be90e7
|
/CourseProject2/Plot6.R
|
f419454771588437afd6644b35f813dd24e5bb92
|
[] |
no_license
|
Fbarangan/Exploratory_Data_Analysis
|
312126c8717871a69aaa660ce95d3fd736ff840e
|
4f795e4837df9bbcc8f2f58069816013fe9ae1bb
|
refs/heads/master
| 2021-01-10T17:15:13.003304
| 2016-04-18T06:25:06
| 2016-04-18T06:25:06
| 54,862,035
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,790
|
r
|
Plot6.R
|
#File Management and data Download
library(dplyr)
library(ggplot2)
#get Directorory
getwd()
# file location - "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
# Create the file
if (!file.exists("PM25_Raw_Data")) {
dir.create("PM25_Raw_Data")
}
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(fileUrl, destfile = "./PM25_Raw_Data/PeerAssessment.zip")
dateOfDownload <- date()
# Click the PeerAssessment.zip to unzip.
# 2 File are extracted:
# Source_Classification_Code.rds
# summarySCC_PM25.rds
# Make sure to go to the right directory
# Read .rds file
NEI <- readRDS ("./CourseProject2/PM25_Raw_Data/PeerAssessment/summarySCC_PM25.rds")
SCC <- readRDS ("./CourseProject2/PM25_Raw_Data/PeerAssessment/Source_Classification_Code.rds")
# Turn table into dplyr
NEI_ddply <- tbl_df(NEI)
SCC_ddply <- tbl_df(SCC)
# Plot 6
#Compare emissions from motor vehicle sources in Baltimore City with emissions from motor vehicle sources in Los Angeles County, California (fips == "06037"). Which city has seen greater changes over time in motor vehicle emissions?
LosAngeles_VS_Baltimore <- NEI_ddply %>%
filter(fips %in% c("06037", "24510")) %>%
filter(type == 'ON-ROAD') %>%
group_by(year, fips) %>%
summarise(Total = sum(Emissions )) %>%
select(Year = year, fips, Emission = Total)
# Plot
plot_LosAngeles_VS_Baltimore <- qplot(Year, Emission, geom = "line",
color = fips ,data= LosAngeles_VS_Baltimore) +
ggtitle("Total Emission of PM2.5 in Baltimore Vs LA ;Type = On-ROAD by Year") +
xlab("Year(s)") +
ylab ("Total Emission (PM2.5)")
print(plot_LosAngeles_VS_Baltimore)
dev.copy(png, file = "plot6.png")
dev.off()
|
2961ed9e728480c11ff7f2ff92b8afa1f2c3640c
|
e4880de71470c0d58ebc1000cb89d0151d5aa6b1
|
/man/model_options.Rd
|
6b37e869c561ca4cd97a59af888a54b3c2c3c69d
|
[] |
no_license
|
bprucka/uttr
|
9c42a8e360c41c5402778d3ea1d9a5f9ae45c32e
|
68d02c8567997a8a03d91295005b51a28633c4eb
|
refs/heads/master
| 2021-09-22T09:44:47.307283
| 2018-09-07T15:11:17
| 2018-09-07T15:11:17
| 147,837,755
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 489
|
rd
|
model_options.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/General_Functions.R
\name{model_options}
\alias{model_options}
\title{Set options for a model}
\usage{
model_options(...)
}
\arguments{
\item{...}{A list of unquoted expressions setting the options in the form
of \code{option = value}.}
}
\value{
Returns a named list containing the model options specified
}
\description{
\code{model_options()} sets model specific options for use within \code{model_fit()}.
}
|
d3be8a6de46f3f7286468ac911223b93100e4bb1
|
28470bf5146137c8c7f374f23c503944af5743af
|
/tests/testthat.R
|
a4dd0ec817f74b4ec3cec0079acae00a89f7ff31
|
[
"MIT"
] |
permissive
|
C-Juliette/Geostatistic
|
f86b78695cdfe3c00f94a1b9666b149bcb57814e
|
9d5f02c7392d6240ca6f45683ded722c460600e8
|
refs/heads/main
| 2023-06-20T05:03:30.244606
| 2021-07-20T15:37:02
| 2021-07-20T15:37:02
| 374,126,970
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 64
|
r
|
testthat.R
|
library(testthat)
library(MonPackage)
test_check("MonPackage")
|
df0c227e5131ac417495de0d85d05ff3a91b0a0d
|
ebb314b086f96245ee9e776b8301b261bae78e70
|
/man/Zelig-poisson-bayes-class.Rd
|
6deeb94988ead48d143f02f306d9b058fd09220a
|
[] |
no_license
|
mbsabath/Zelig
|
65e153648ff89790e758b104ffb0de5c1e376c29
|
c90d171f11fde4042710353883f2ccb6e16e471e
|
refs/heads/master
| 2021-01-25T04:21:44.942660
| 2017-06-06T20:33:24
| 2017-06-06T20:33:24
| 93,430,501
| 0
| 1
| null | 2017-07-12T15:14:34
| 2017-06-05T17:39:14
|
R
|
UTF-8
|
R
| false
| true
| 336
|
rd
|
Zelig-poisson-bayes-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model-poisson-bayes.R
\docType{class}
\name{Zelig-poisson-bayes-class}
\alias{Zelig-poisson-bayes-class}
\alias{zpoissonbayes}
\title{Bayesian Poisson Regression}
\description{
Vignette: \url{http://docs.zeligproject.org/articles/zelig_poissonbayes.html}
}
|
5c12d2a299a70fe3ee88542510d3796ba19898ab
|
29585dff702209dd446c0ab52ceea046c58e384e
|
/qdm/R/samediff.R
|
6fd93ea276f1261365d53672ac9d3698e441168e
|
[] |
no_license
|
ingted/R-Examples
|
825440ce468ce608c4d73e2af4c0a0213b81c0fe
|
d0917dbaf698cb8bc0789db0c3ab07453016eab9
|
refs/heads/master
| 2020-04-14T12:29:22.336088
| 2016-07-21T14:01:14
| 2016-07-21T14:01:14
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,277
|
r
|
samediff.R
|
# samediff.R
#
# last mod: Oct/16/2014, FW
# Calculate discrimination probabilities, P("different"), from same-different
# judgments
psi <- function(data, oa1 = "s1", oa2 = "s2", resp = "resp"){
## Check data
if (!(oa1 %in% names(data) && oa2 %in% names(data)))
stop("Stimulus names need to be given to oa1 and/or oa2.")
if (!resp %in% names(data))
stop("response variable not defined.")
if (!all(sort(unique(data$resp)) == c("d", "s")))
stop("response variable does not consist of 'd' and 's' answers only")
## Frequency different
formula <- as.formula(paste("~", paste(oa1, oa2, sep=" + ")))
freq <- as.matrix(unclass(xtabs(formula, data[data$resp == "d",])))
attr(freq, "call") <- NULL
## Probability different
n <- as.matrix(unclass(xtabs(formula, data)))
attr(n, "call") <- NULL
prob <- freq/n
prob[which(is.na(prob))] <- 1
x <- if(anyNA(suppressWarnings(
nnam <- as.numeric(snam <- rownames(freq))))) snam else nnam
y <- if(anyNA(suppressWarnings(
nnam <- as.numeric(snam <- colnames(freq))))) snam else nnam
retval <- list(prob=prob, ntrials=n, freq=freq, x=x, y=y)
class(retval) <- "psi"
retval
}
|
8c4b8fe1491fa846d7a44b1aac18bdf18560364e
|
cca264c6bb6daef2d22e749ec039eb5b386a9ef8
|
/exercicios/explorandoInputs/explorandoInputs.R
|
1256cb2c3320564be23db5536f14e01e740061c6
|
[] |
no_license
|
rodrigomotoyama/202006-dashboards
|
696b1289557bbe82b67023ffbeb81ab781f62a01
|
b1fb22fe4f50ed851c6592c2a984ab4d5c7a7674
|
refs/heads/master
| 2022-12-01T07:28:54.563402
| 2020-08-09T02:53:49
| 2020-08-09T02:53:49
| 286,156,044
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,386
|
r
|
explorandoInputs.R
|
library(shiny)
library(tidyverse)
library(readr)
library(lubridate)
dados <- read_rds("dados/ssp.rds")
col_crime <- dados %>%
names() %>%
.[6:28]
col_muni <- dados %>%
count(municipio_nome) %>%
select(municipio_nome) %>%
c()
ui <- fluidPage(
"Série temporal crimes", #Como eu faço pra inserir o nome do municipio aqui?
selectInput(
inputId = "crime",
label = "Selecione o crime",
choices = col_crime
),
selectInput(
inputId = "muni",
label = "Selecione o municipio",
choices = col_muni,
selected = "São Paulo",
multiple = T
),
dateInput(
inputId = "mes",
label = "Selecione o mês"
),
tableOutput(outputId = "tabela")
)
server <- function(input, output, session) {
dados <- read_rds("dados/ssp.rds")
col_crime <- dados %>%
names() %>%
.[6:28]
dados_tidy <- dados %>%
gather(tipo_crime, numero_casos, col_crime) %>%
filter(numero_casos != 0) %>%
mutate(data = ymd(paste(as.character(mes), as.character(ano), sep = "-")))
output$tabela <- renderTable({
# browser()
dados_tidy %>%
filter(tipo_crime == input$crime,
municipio_nome %in% input$muni,
data == input$mes
) %>%
group_by(data, municipio_nome) %>%
summarise(total_crimes = sum(numero_casos)) %>%
data.frame()
})
}
shinyApp(ui, server)
|
76a8536e806658803e11e1d302f79309f97e7335
|
5247d313d1637170b6bbc5e367aba46c88725efd
|
/man/tw_api_get_statuses_user_timeline.Rd
|
17506782c2f1d29d217712d83c2f759db20784b8
|
[] |
no_license
|
fentonmartin/twitterreport
|
dac5c512eea0831d1a84bef8d2f849eab2b12373
|
5ddb467b8650289322ae83e0525b4ff01fba0d1d
|
refs/heads/master
| 2021-08-22T04:25:01.834103
| 2017-11-29T07:47:43
| 2017-11-29T07:47:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 3,069
|
rd
|
tw_api_get_statuses_user_timeline.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/twitter_api.R
\name{tw_api_get_statuses_user_timeline}
\alias{tw_api_get_statuses_user_timeline}
\title{Gets status updates (tweets) from a given user}
\usage{
tw_api_get_statuses_user_timeline(screen_name = NULL, twitter_token,
user_id = NULL, since_id = NULL, count = 100, max_id = NULL,
exclude_replies = NULL, include_rts = NULL, quietly = FALSE, ...)
}
\arguments{
\item{screen_name}{of the user}
\item{twitter_token}{An object of class \link[httr:oauth1.0_token]{Token1.0} as
generated by \link{tw_gen_token}.}
\item{user_id}{The ID of the user for whom to return results for}
\item{since_id}{Returns results with an ID greater than (that is, more recent than) the specified ID}
\item{count}{Number of statuses to get}
\item{max_id}{Returns results with an ID less than (that is, older than) or equal to the specified ID}
\item{exclude_replies}{This parameter will prevent replies from appearing in the returned timeline}
\item{include_rts}{When set to false, the timeline will strip any native retweets}
\item{quietly}{Whether or not to show the 'success' message}
\item{...}{Additional arguments passed to \code{\link[=GET]{GET()}}}
}
\value{
A data.frame with tweets (if success), with the following columns:
\itemize{
\item \code{screen_name}
\item \code{in_reply_to_screen_name}
\item \code{user_id}
\item \code{created_at}
\item \code{id}
\item \code{text}
\item \code{source}
\item \code{truncated}
\item \code{retweet_count}
\item \code{favourites_count}
\item \code{favorited}
\item \code{retweeted}
\item \code{coordinates}
\item \code{source_name}
}
otherwise returns \code{NULL}.
}
\description{
Using the twitter API, gets the status updates of a given user (up to 3,200)
}
\details{
This function is designed to be applied to a large list of twitter
accounts, see the example below.
\subsection{From twitter}{Returns a collection of the most recent Tweets
posted by the user indicated by the screen_name or user_id parameters.
}
}
\examples{
\dontrun{
# List of twitter accounts
users <- c('MarsRovers', 'senatormenendez', 'sciencemagazine')
# Getting the twitts (first gen the token)
key <- tw_gen_token('myapp','key', 'secret')
tweets <- lapply(users, tw_api_get_statuses_user_timeline, twitter_token=key)
# Processing the data (and taking a look)
tweets <- do.call(rbind, tweets)
head(tweets)
}
}
\references{
Twitter REST API (GET statuses/user_timeline)
https://dev.twitter.com/rest/reference/get/statuses/user_timeline
}
\seealso{
\code{\link[=tw_extract]{tw_extract()}}
Other API functions: \code{\link{tw_api_get_followers_ids}},
\code{\link{tw_api_get_followers_list}},
\code{\link{tw_api_get_friends_ids}},
\code{\link{tw_api_get_search_tweets}},
\code{\link{tw_api_get_statuses_sample}},
\code{\link{tw_api_get_trends_place}},
\code{\link{tw_api_get_users_search}},
\code{\link{tw_api_get_users_show}},
\code{\link{tw_api_trends_available}},
\code{\link{tw_gen_token}}
}
\author{
George G. Vega Yon
}
\concept{API functions}
|
4a98f7c38bd66eef279ea7d3666097d8290a0e62
|
c0f8c06b1b28537dc2d86befa562f871a28900fe
|
/base/cl-abc.R
|
fbc77957fcbb629580f9c227fb278bbcdb0ae069
|
[] |
no_license
|
edwaltz/cl-abc
|
d4ec32a9e1122e8f1b0c62c96d94331d4bcf0e3b
|
1cda8403185f52042a83608d9ca9b1811d11c349
|
refs/heads/master
| 2021-01-20T23:31:39.096957
| 2013-06-04T00:15:11
| 2013-06-04T00:15:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,718
|
r
|
cl-abc.R
|
# Composite Likelihood ABC
library(abc)
library(MASS)
pmin <- -100
pmax <- 100
sim.prior <- function(data, num) {
# Simulate from prior estimated by the standard kernel density estimatior
#
# Args:
# data - points to be estimated
# num - number of simulation points of the prior
#
# Returns:
# A list of points simulated from the estimated prior and corresponding bandwidth matrix.
n <- nrow(data)
d <- ncol(data)
H <- var(data)*(4/((d+2)*n))^(2/(d+4))
pos <- sample(n, num, TRUE, rep(1/n, n))
ret.sim <- data[pos, ]+mvrnorm(num, rep(0, d), H)
return(ret.sim)
}
clabc <- function(obs, prior, sim, h) {
# Composite Likelihood ABC.
#
# Args:
# obs - observed summary statistic;
# prior - prior of the parameters;
# sim - simulation for the component of the composite likelihood;
# h - threshold;
#
# Returns:
# A list of the abc estimation of parameters par, simulated prior of the
# estimated distribution from the par and the corresponding bandwidth matrix.
ret <- list()
ret$par <- abc(obs, prior, sim, h, "rejection")$unadj.values
ret$sim <- sim.prior(ret$par, nrow(prior))
return(ret)
}
clabc.step <- function(num, p, obs, h, rlik, type="paire", ...) {
# ABC for 1 obs.
#
# Args:
# num - number of samples form the prior of the parameters;
# p - dimension of the parameter;
# obs - observed summary statistic;
# h - threshold;
# rlik - function generate random points from the specific likelihood;
# type - full & pair.
#
# Return:
# The sample points.
op <- options(warn=(-1)) # suppress warnings
ptm.final <- proc.time() # time record
obs.val <- as.vector(obs)
d <- length(obs.val) # dimension for summary statistic
prior <- matrix(runif(num*p, pmin, pmax), nrow=num)
ret <- list()
if (type=="full") {
sim <- rlik(prior, ndim=d, ...)
ret <- clabc(obs, prior, sim, h)
prior <- ret$prior
} else if (type=="pair") {
order <- combn(d, 2) # order of the composite likelihood
for (ind in 1:(d*(d-1)/2)) {
sim <- rlik(prior, ndim=d, ...)
ret <- clabc(obs.val[order[, ind]], prior, sim[, order[, ind]], h)
prior <- ret$prior
}
}
# Finalize the running.
cost.final <- proc.time()-ptm.final
print(cost.final["elapsed"])
options(op)
return(ret)
}
adj.margin <- function(join, margin) {
# Marginal adjustment for composite likelihood ABC.
#
# Args:
# join - the unadjusted joint distribution;
# margin - the marginal distribution.
#
# Returns:
# The adjusted joint distribution.
adj <- join
for (ind in 1:ncol(join)) {
adj[order(join[, ind]) ,ind] <- sort(margin[, ind])
}
return(adj)
}
|
45e784a8a78fc1962ce29cdc4b48ae4b58842d0b
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/7717_0/rinput.R
|
32ba048b460a9855931b18ea00b4ed29f9a1d4f3
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("7717_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="7717_0_unrooted.txt")
|
1cd27d016fcb26e1134f8d16bb6505f3d7b5e959
|
6fb453f3b45cad66751ee817f9b3463c89196972
|
/R/log_fc.R
|
4a8b3276ac0e928b6189d64d28c75b57b1715e1c
|
[] |
no_license
|
SamirRachidZaim/referenceNof1
|
838c74bdad9f99cc4144ed43368dc2b187403e26
|
65f63660a96022c202addef59888e2026faae24f
|
refs/heads/master
| 2022-12-31T22:02:44.858043
| 2020-10-20T15:16:46
| 2020-10-20T15:16:46
| 296,476,075
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 423
|
r
|
log_fc.R
|
#' random forest feature selection based on binomial exact test
#'
#' \code{referenceNof1} is the R implementation of the reference biomarker algorithm by (Zaim 2020)
#'
#' @usage log_fc(pair.mat)
#'
#' @param pair.mat a two-column, stimulus-response paired sample matrix
#'
#' @return a data.frame with 1 columns containing the log fold change
log_fc <- function(pair.mat){
log2(pair.mat[,2]+1)/log2(pair.mat[,1]+1)
}
|
6528752f9fddc6d3a0c7b00a9aadb3efd6348d11
|
5a2cd2517a6738105447909a4e8ecda07bc28647
|
/man/BrailleRUsefulLinks.Rd
|
b7f579895f2591328b055d9ab57c67a6874414f8
|
[] |
no_license
|
dewarren/BrailleR
|
e2f2a948832d94661e4da37cf0c813fda83e7561
|
5517918d36e61aade29e57f4ce706137fc697fed
|
refs/heads/master
| 2020-12-02T06:29:52.684393
| 2017-10-02T01:41:41
| 2017-10-02T01:41:41
| 96,844,078
| 0
| 0
| null | 2017-07-11T02:56:05
| 2017-07-11T02:56:05
| null |
UTF-8
|
R
| false
| false
| 702
|
rd
|
BrailleRUsefulLinks.Rd
|
\name{BrailleRUsefulLinks}
\alias{BrailleRHome}
\alias{LURN}
\title{Open the BrailleR Project or Let's Use R Now home page in your browser}
\usage{
BrailleRHome()
LURN(BlindVersion = getOption("BrailleR.VI"))
}
\arguments{
\item{BlindVersion}{Use the version of Let's Use R Now tailored to an audience of blind users.}
}
\value{
Nothing. These functions are for opening a web page and nothing will result in the R session.
}
\description{
Visit the BrailleR Project home page for updates, instructions on how to join the mailing list which we call BlindRUG and get the latest downloads; or the Let's Use R Now (LURN) home page to read an online manual for using R.
}
\author{
A. Jonathan R. Godfrey
}
|
6cfce5084e1eb67bc1477b6bb8c51bc99775eed4
|
a35c23526ac659127a03058426a71b3dfee3e250
|
/HONDO/Saplings/scripts/hondo_saplings_2010.R
|
1be1484430418efa84d13541ce6a3dcb68920e3e
|
[] |
no_license
|
avhesketh/LDP_SEADYN
|
88914e1d61c41d8958ecfd7843d6fd3626e9434b
|
e826215b435c9eeb786cfe2f22388b023df0a21d
|
refs/heads/master
| 2023-04-15T21:26:37.191146
| 2022-05-27T15:30:30
| 2022-05-27T15:30:30
| 372,590,927
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,108
|
r
|
hondo_saplings_2010.R
|
### Extracting 2010 sapling data and tidying for depositing
### AVH November 2021
## TLDR; these data didn't end up in the final deposit because they don't actually look like usual
## sapling data.
library(tidyverse)
library(assertr)
data <- read_csv("./Hondo/Saplings/raw_data/2010_data/Hondo_Saplings_Stand7_2010.csv",
col_names = c("stand", "quad","tree_tag","species_code","base_coord_N_m",
"base_coord_S_m","base_coord_E_m","base_coord_W_m","DBH_cm",
"stem_height_m","tree_code","stem_lean_amt","stem_lean_direction", "comments"),
skip = 1, col_types = c("f","f","f", "n","n",
"n","n","n","n","c","n",
"c","c")) %>%
mutate_all(~na_if(., ".")) %>%
mutate_at(c("base_coord_N_m", "base_coord_W_m",
"base_coord_S_m", "base_coord_E_m"), as.numeric) %>%
mutate(base_coord_S_m = if_else(is.na(base_coord_N_m), base_coord_S_m,
5-base_coord_N_m),
base_coord_W_m = if_else(is.na(base_coord_E_m), base_coord_W_m,
5-base_coord_E_m)) %>%
select(-base_coord_N_m, -base_coord_E_m)
levels(as.factor(data$species_code))
# QC validation
glimpse(data)
data %>% verify(stand == 7) %>% verify(substr(quad, 1,1) %in% 0:9) %>%
verify(substr(quad,2,2) %in% c("A","B","C","D","E","F",
"G","H","I","J")) %>%
verify(is.numeric(c(tree_tag, base_coord_S_m, base_coord_W_m))) %>%
verify(species_code %in% c("PIMA","LALA")) %>%
verify(base_coord_S_m > 0 & base_coord_S_m < 5) %>%
verify(base_coord_W_m > 0 & base_coord_W_m < 5) %>%
verify(DBH_cm > 0) %>% verify(stem_height_m > 0) %>%
verify(tree_code %in% c("LL","LD","DD")) %>%
verify(stem_lean_amt > 0 & stem_lean_amt < 90) %>%
verify(stem_lean_direction %in% c("N","S","E","W","NW","SW","NE","SE"))
## confusion: are these saplings (DBH was measured, which seems like not saplings, but data are titled saplings...)
|
512357258a1b39a9cbd08c691c61f18bf0814ab0
|
98d1a4a349a2a916cca89ba8eb3e20b3ee68c84b
|
/man/smooth_raster.Rd
|
00af95201cd664327730bda26e597248b8291116
|
[] |
no_license
|
edwindj/sdcSpatial
|
31b23f1c47dd2e90fd40fc822047e4c4d5358069
|
750d8ff7da14499a79ba9465e8e3ce7b92370aff
|
refs/heads/master
| 2023-08-02T19:24:56.369292
| 2023-07-28T13:43:44
| 2023-07-28T13:43:44
| 135,307,240
| 11
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,143
|
rd
|
smooth_raster.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/smooth_raster.R
\name{smooth_raster}
\alias{smooth_raster}
\title{Create kde density version of a raster}
\usage{
smooth_raster(
x,
bw = raster::res(x),
smooth_fact = 5,
keep_resolution = TRUE,
na.rm = TRUE,
pad = TRUE,
padValue = NA,
threshold = NULL,
type = c("Gauss", "circle", "rectangle"),
...
)
}
\arguments{
\item{x}{raster object}
\item{bw}{bandwidth}
\item{smooth_fact}{\code{integer}, disaggregate factor to have a
better smoothing}
\item{keep_resolution}{\code{integer}, should the returned map have same
resolution as \code{x} or keep the disaggregated raster resulting from
\code{smooth_fact}?}
\item{na.rm}{should the \code{NA} value be removed from the raster?}
\item{pad}{should the data be padded?}
\item{padValue}{what should the padding value be?}
\item{threshold}{cells with a lower (weighted) value of this threshold will be removed.}
\item{type}{what is the type of smoothing (see \code{raster::focal()})}
\item{...}{passed through to \code{\link{focal}}.}
}
\description{
Create kde density version of a raster
}
|
2e400f04c2c9c850501302072452fbae24f06759
|
1e993cc32d8bf94c2aa653004a45b2f6b700c00a
|
/bin/GetLongestContig.R
|
6dc157a9a5245316c24313599bbaac7907e0c739
|
[
"MIT"
] |
permissive
|
skjq/Hannigan_CRCVirome_mBio_2018
|
337582b93e9718395529fa2f5d85f3a8ac5cd2b3
|
7247bbda750ee6ff05b3df853b8b2d0b07e8cd04
|
refs/heads/master
| 2022-12-21T12:24:19.418712
| 2018-10-11T13:12:47
| 2018-10-11T13:12:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,390
|
r
|
GetLongestContig.R
|
# GetLongestContig.R
# Geoffrey Hannigan
# Schloss Lab
# University of Michigan
library("optparse")
library("ggplot2")
library("cowplot")
library("plyr")
option_list <- list(
make_option(c("-i", "--input"),
type = "character",
default = NULL,
help = "Contig length table.",
metavar = "character"),
make_option(c("-t", "--toplength"),
type = "integer",
default = 1,
help = "Amount of top lengths to report.",
metavar = "character"),
make_option(c("-c", "--clusters"),
type = "character",
default = NULL,
help = "Contig cluster id table.",
metavar = "character"),
make_option(c("-o", "--out"),
type = "character",
default = NULL,
help = "Output name.",
metavar = "character")
)
opt_parser <- OptionParser(option_list=option_list);
opt <- parse_args(opt_parser);
# Virus
inputfile <- read.delim(opt$input, head = FALSE, sep = "\t")
colnames(inputfile) <- c("ContigID", "Length")
clustersfile <- read.delim(opt$clusters, head = FALSE, sep = ",")
colnames(clustersfile) <- c("ContigID", "Cluster")
mergeclust <- merge(inputfile, clustersfile, by = "ContigID")
# Get top lengths
topcontigsbylength <- ddply(mergeclust, "Cluster", function(x) head(x[order(x$Length, decreasing = TRUE) , ], opt$toplength))
write.table(
x = topcontigsbylength,
file = opt$out,
quote = FALSE,
sep = "\t",
row.names = FALSE
)
|
b5ccef73e0b149430b28ae9a2c7f96af1c6372a2
|
9267a73131c01475cf20cf6719ce991653700941
|
/man/carstats.Rd
|
9addad2b21301edf5e0aef400af50e97dd5a1770
|
[] |
no_license
|
cran/CornerstoneR
|
40d8f0aa0e3ea3b99be2741f212ce4edfcaf7eb8
|
308bc720857013667a66bd6f2f9f1586d4ef0415
|
refs/heads/master
| 2021-07-03T21:11:06.083410
| 2020-08-28T10:30:33
| 2020-08-28T10:30:33
| 162,022,134
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 736
|
rd
|
carstats.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{carstats}
\alias{carstats}
\title{Data from carstats}
\format{
A \code{\link{data.table}} object with 406 observations and 9 variables.
The variables and their scale of measurement are as follows:
\itemize{
\item{Model: }{nominal}
\item{Origin: }{nominal}
\item{MPG: }{interval}
\item{Cylinders: }{ordinal}
\item{Displacement: }{interval}
\item{Horsepower: }{interval}
\item{Weight: }{interval}
\item{Acceleration: }{interval}
\item{Model.Year: }{interval}
}
}
\source{
Cornerstone sample dataset
}
\description{
Dataset of different cars and various values.
}
|
eb2b694fee29e3a77d3d35b58443d0ba48cf11f2
|
a36788ef0c3589f5ca89b909341ca79e958af036
|
/R/data_ODsample.R
|
f951cf7d1993faf73b97378dca7fd9d2acd1225d
|
[] |
no_license
|
ropensci/skynet
|
dfe786786a397a1ed75fe8e3d3da6e5cff1504b3
|
255d83ac1ed0b3e98980e5762014514ec5e6c6af
|
refs/heads/master
| 2022-11-07T00:27:45.699099
| 2022-10-25T13:15:29
| 2022-10-25T13:15:29
| 86,709,909
| 8
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 161
|
r
|
data_ODsample.R
|
#' Sample OD data
#'
#' Sample data to use with SKYNET functions
#'
#' @format A dataframe with 500.000 observations and 19 variables
#' @name OD_Sample
#'
NULL
|
761b1eddac38fa39b9a4e7a07c9237ec96de4b7f
|
bb12ce1efd621cf2a4a942d7939bce44610aab3e
|
/R/spped-main-dir.R
|
f2cb7f1116a4a3836d527532a5a323263ae4f56a
|
[] |
no_license
|
dantonnoriega/sppedr
|
2eabd1616c961f79b8e91c12c8cc77bf096930a3
|
c377de137aaf0f9b8d78efb299013a9de162e6de
|
refs/heads/master
| 2021-01-21T14:24:25.837312
| 2017-08-16T23:01:11
| 2017-08-16T23:01:11
| 95,277,197
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,549
|
r
|
spped-main-dir.R
|
tools_main_dir <- function() {
get = function() {
env <- Sys.getenv('SPPED_MAIN_DIR')
if(identical(env, "")) {
message("Couldn't find env var SPPED_MAIN_DIR. This the path to the main SPPED directory on your computer. Please set using set_main_dir().")
return(NULL)
} else env
}
set = function(path = NULL) {
if(!is.null(path)) {
main_dir <- path
} else {
# have user input main_dir
message("Please enter the path to your SPPED directory, and press enter:")
main_dir <- readline(": ")
main_dir <- normalizePath(main_dir)
}
stopifnot(dir.exists(main_dir))
text <- paste0("SPPED_MAIN_DIR=",main_dir,"\n")
env <- Sys.getenv('SPPED_MAIN_DIR')
# check for existing SPPED_MAIN_DIR
if (!identical(env, "")) { # if found, replace line and rewrite
renv <- readLines(file.path(normalizePath("~/"), ".Renviron"))
loc <- grep("SPPED_MAIN_DIR", renv)
renv[loc] <- text
Sys.setenv(SPPED_MAIN_DIR = main_dir)
writeLines(renv, file.path(normalizePath("~/"), ".Renviron"))
} else { # if not found, append to file
Sys.setenv(SPPED_MAIN_DIR = main_dir)
cat(text, file=file.path(normalizePath("~/"), ".Renviron"), append=TRUE)
}
}
has = function() {
env <- Sys.getenv('SPPED_MAIN_DIR')
if(!identical(env, "")) TRUE else FALSE
}
clear = function() {
env <- Sys.getenv('SPPED_MAIN_DIR')
# clear SPPED_MAIN_DIR variable
if (!identical(env, "")) { # if found, replace line and rewrite
renv <- readLines(file.path(normalizePath("~/"), ".Renviron"))
indx <- grepl("SPPED_MAIN_DIR", renv)
Sys.setenv(SPPED_MAIN_DIR = "")
writeLines(renv[!indx], file.path(normalizePath("~/"), ".Renviron"))
}
}
list(get = get, set = set, has = has, clear = clear)
}
#' Assign the tools function environment
#' @noRd
spped_main_dir <- tools_main_dir()
#' Clear the sep = "|", encoding = "UTF-8") main_dir.
#' @export
clear_main_dir <- spped_main_dir$clear
#' Get main dir
#' @export
set_main_dir <- spped_main_dir$set
#' Get sep = "|", encoding = "UTF-8") main_dir
#' @export
get_main_dir <- function() {
if(!spped_main_dir$has()) stop("Path to SPPED main directory not found. Please set your the path to your SPPED main_dir using function 'set_main_dir()'.")
spped_main_dir$get()
}
#' Make sure sep = "|", encoding = "UTF-8") main_dir string is not empty
#'
#' @return logical TRUE if main_dir is not empty
#'
#' @export
has_main_dir <- spped_main_dir$has
|
f8c2a5e48a468084e785f1e65ce7aeec9578fb6e
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/phonR/examples/plotVowels.Rd.R
|
b9e00fdfed671013d59feb4edcb28f3dabfdcd58
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,491
|
r
|
plotVowels.Rd.R
|
library(phonR)
### Name: Plot vowel formant frequencies
### Title: Plot vowel formant data and a variety of derivative measures.
### Aliases: plotVowels
### Keywords: device hplot
### ** Examples
data(indoVowels)
with(indo, plotVowels(f1, f2, vowel, group=gender, plot.means=TRUE,
pch.means=vowel, ellipse.line=TRUE, poly.line=TRUE,
poly.order=c('i','e','a','o','u'), var.col.by=vowel,
var.sty.by=gender, pretty=TRUE, alpha.tokens=0.3,
cex.means=2))
# simulate some diphthongs
f1delta <- sample(c(-10:-5, 5:15), nrow(indo), replace=TRUE)
f2delta <- sample(c(-15:-10, 20:30), nrow(indo), replace=TRUE)
f1coefs <- matrix(sample(c(2:5), nrow(indo) * 2, replace=TRUE),
nrow=nrow(indo))
f2coefs <- matrix(sample(c(3:6), nrow(indo) * 2, replace=TRUE),
nrow=nrow(indo))
indo <- within(indo, {
f1a <- f1 + f1delta * f1coefs[,1]
f2a <- f2 + f2delta * f2coefs[,1]
f1b <- f1a + f1delta * f1coefs[,2]
f2b <- f2a + f2delta * f2coefs[,2]
})
with(indo, plotVowels(cbind(f1, f1a, f1b), cbind(f2, f2a, f2b), vowel,
group=gender, plot.tokens=TRUE, pch.tokens=NA,
alpha.tokens=0.3, plot.means=TRUE, pch.means=vowel,
var.col.by=vowel, var.sty.by=gender, pretty=TRUE,
diph.arrows=TRUE, diph.args.tokens=list(lwd=0.8),
diph.args.means=list(lwd=2)))
|
cb92dd916f959ca3214c478ed325368c56b56256
|
edd3571a86b3293dd3d97b7ef6c43386be58575c
|
/man/rmvnorm_cholesky.Rd
|
103063cfccccb667f385271dab78dfb0a8647d12
|
[] |
no_license
|
pierrejacob/PET
|
88ec8531ed13fb5ea27eb30037f063bb11b766fa
|
5057eb5d7c52f7b4fc18a258260a9a8557d6a72b
|
refs/heads/master
| 2021-01-19T16:42:17.937684
| 2018-05-02T22:19:17
| 2018-05-02T22:19:17
| 88,281,336
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,041
|
rd
|
rmvnorm_cholesky.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multivariatenormal.R
\name{rmvnorm_cholesky}
\alias{rmvnorm_cholesky}
\title{Generate multivariate Normals}
\usage{
rmvnorm_cholesky(n, mean, cholesky)
}
\arguments{
\item{n}{is the number of desired samples}
\item{mean}{is the mean vector}
\item{cholesky}{is the cholesky factor of the covariance matrix, obtained e.g. with \code{chol()}.}
}
\value{
a n x d matrix where d is both the length of \code{mean} and the dimension of \code{cholesky}. Each row contains a draw from
the desired multivariate Normal.
}
\description{
Function to generate draws from a multivariate Normal distribution, given Cholesky factor of the covariance.
}
\details{
This function does not check anything (i.e. that the given
covariance is PSD). Thus it is faster than functions in standard packages, but more risky.
}
\examples{
rmvnorm_cholesky(10, rep(0, 5), chol(diag(1, 5, 5)))
}
\seealso{
\code{\link{rmvnorm}}, \code{\link{dmvnorm}}, \code{\link{dmvnorm_cholesky_inverse}}
}
|
adbb4e82979286a6cda98f18d84ff57f50124013
|
4af4d4872f7bbe51e41db6e60889498a67a31249
|
/all_models.R
|
8dee7a0099537df8ecfb6694946b70bc3874f75b
|
[] |
no_license
|
mmhu/imaging-project
|
a230943834e0f0c03d59e49ada01e2573b10d943
|
bdaba26e67fbf8563b05bf9ec11721d08d1448fc
|
refs/heads/master
| 2021-01-25T07:49:03.918835
| 2017-07-11T19:26:33
| 2017-07-11T19:26:33
| 93,665,393
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,862
|
r
|
all_models.R
|
# (1)
library(knitr)
opts_chunk$set(cache = TRUE)
opts_knit$set(root.dir = "C:/Users/bensadis/Desktop/BDSI/imaging-project")
library(devtools)
library(ggbiplot)
library(ada)
library(randomForest)
library(e1071)
library(tidyverse)
library(mgcv)
library(gbm)
library(nnet)
library(ROCR)
nTrain <- 500
sampleidx <- sample(1:700,nTrain)
print("all features")
### get features ###
features <- read.csv("features.csv", header = TRUE)
features <- features[, -8] # get rid of glcm_correlation with all 1 values
firstorder <- read.csv("firstorder.csv", header = TRUE)
colors <- read.csv("colors.csv", header = TRUE)
colors <- colors[, 2:13]
circ <- read.csv("circularity.csv", header = TRUE) %>% as_vector
asym <- read.csv("asym.csv", header = TRUE)
asym <- asym[,2] %>% as_vector
truth <- read.csv("training_set_truth.csv", header = FALSE)
truth$V3 = as.factor(truth$V3)
truth = truth$V3
all.features <- cbind(firstorder, colors, circ)
all.features.scaled <- scale(all.features, center=TRUE, scale=TRUE)
all.features.scaled = as.data.frame(all.features.scaled)
train.x <- all.features[sampleidx,]
train.xs <- all.features.scaled[sampleidx,]
train.y <- truth[sampleidx]
test.x <- all.features[-sampleidx,]
test.xs <- all.features.scaled[-sampleidx,]
test.y <- truth[-sampleidx]
svm.modelr1 <- svm(train.y ~ ., data = train.xs, kernel = "radial", cost = 10^2, gamma = 10^-3, class.weights = c("benign" = 0.2, "malignant" = 0.8))
svm.predr1 <- predict(svm.modelr1, newdata = test.xs, type = "class")
r1.tb <- table(test.y, svm.predr1)
#print(r1.tb)
sens <- r1.tb[2,2] / (r1.tb[2,2] + r1.tb[2,1])
spec <- r1.tb[1,1] / (r1.tb[1,1] + r1.tb[1,2])
acc <- (sens + spec) / 2
#print(sens)
#print(spec)
print(acc)
for (round in 1:50) {
### get features ###
features <- read.csv("features.csv", header = TRUE)
features <- features[, -8] # get rid of glcm_correlation with all 1 values
feat.idx <- sample(42, 20)
print(feat.idx)
features <- features[, feat.idx]
firstorder <- read.csv("firstorder.csv", header = TRUE)
colors <- read.csv("colors.csv", header = TRUE)
colors <- colors[, 2:13]
circ <- read.csv("circularity.csv", header = TRUE) %>% as_vector
asym <- read.csv("asym.csv", header = TRUE)
asym <- asym[,2] %>% as_vector
truth <- read.csv("training_set_truth.csv", header = FALSE)
truth$V3 = as.factor(truth$V3)
truth = truth$V3
all.features <- cbind(firstorder, features, colors, circ, asym)
all.features.scaled <- scale(all.features, center=TRUE, scale=TRUE)
all.features.scaled = as.data.frame(all.features.scaled)
train.x <- all.features[sampleidx,]
train.xs <- all.features.scaled[sampleidx,]
train.y <- truth[sampleidx]
test.x <- all.features[-sampleidx,]
test.xs <- all.features.scaled[-sampleidx,]
test.y <- truth[-sampleidx]
svm.modelr1 <- svm(train.y ~ ., data = train.xs, kernel = "radial", cost = 10^2, gamma = 10^-3, class.weights = c("benign" = 0.2, "malignant" = 0.8))
svm.predr1 <- predict(svm.modelr1, newdata = test.xs, type = "class")
r1.tb <- table(test.y, svm.predr1)
#print(r1.tb)
sens <- r1.tb[2,2] / (r1.tb[2,2] + r1.tb[2,1])
spec <- r1.tb[1,1] / (r1.tb[1,1] + r1.tb[1,2])
acc <- (sens + spec) / 2
#print(sens)
#print(spec)
print(acc)
}
svm.modelr1 <- svm(train.y ~ ., data = train.xs, kernel = "radial", cost = 10^2, gamma = 10^-3, class.weights = c("benign" = 0.2, "malignant" = 0.8))
svm.modelr2 <- svm(train.y ~ ., data = train.xs, kernel = "radial", cost = 10^3*5, gamma = 10^-5*5, class.weights = c("benign" = 0.2, "malignant" = 0.8))
svm.modelr3 <- svm(train.y ~ ., data = train.xs, kernel = "radial", cost = 10^2*5, gamma = 10^-4*5, class.weights = c("benign" = 0.2, "malignant" = 0.8))
svm.modelr4 <- svm(train.y ~ ., data = train.xs, kernel = "radial", cost = 10^3*5, gamma = 10^-5, class.weights = c("benign" = 0.2, "malignant" = 0.8))
svm.modelr5 <- svm(train.y ~ ., data = train.xs, kernel = "radial", cost = 10^4, gamma = 10^-5, class.weights = c("benign" = 0.2, "malignant" = 0.8))
svm.models1 <- svm(train.y ~ ., data = train.xs, kernel = "sigmoid", coef0 = 10^-1, cost = 10^2, gamma = 10^-4*5, class.weights = c("benign" = 0.2, "malignant" = 0.8))
svm.models2 <- svm(train.y ~ ., data = train.xs, kernel = "sigmoid", coef0 = 10^-6, cost = 10^2, gamma = 10^-4*5, class.weights = c("benign" = 0.2, "malignant" = 0.8))
svm.models3 <- svm(train.y ~ ., data = train.xs, kernel = "sigmoid", coef0 = 10^0, cost = 10^4*5, gamma = 10^-5, class.weights = c("benign" = 0.2, "malignant" = 0.8))
svm.models4 <- svm(train.y ~ ., data = train.xs, kernel = "sigmoid", coef0 = 10^-3, cost = 10^2, gamma = 10^-4*5, class.weights = c("benign" = 0.2, "malignant" = 0.8))
svm.models5 <- svm(train.y ~ ., data = train.xs, kernel = "sigmoid", coef0 = 10^-6, cost = 10^3, gamma = 10^-5*5, class.weights = c("benign" = 0.2, "malignant" = 0.8))
##################VVVVVVVVVVVVVVVVVVV CROSS VALIDATION VVVVVVVVVVVVVVVVVVV###################
num_rounds <- 10
svm.sensitivity <- rep(0,num_rounds)
nnet.sensitivity <- rep(0,num_rounds)
ada.sensitivity <- rep(0,num_rounds)
rf.sensitivity <- rep(0,num_rounds)
svm.specificity <- rep(0,num_rounds)
nnet.specificity <- rep(0,num_rounds)
ada.specificity <- rep(0,num_rounds)
rf.specificity <- rep(0,num_rounds)
svm.avg_accuracy <- rep(0,num_rounds)
nnet.avg_accuracy <- rep(0,num_rounds)
ada.avg_accuracy <- rep(0,num_rounds)
rf.avg_accuracy <- rep(0,num_rounds)
for (n in 1:num_rounds) {
print(paste0("n: ", n))
ss <- sample(700,replace=F)
### svm ###
print("svm")
svm.pred <- rep(0,700)
for (i in seq(1,700,by=70)) {
svm.cv <- svm(truth[-ss[i:(i+69)]] ~ ., data = all.features.scaled[-ss[i:(i+69)],], kernel = "radial", cost = 10^2*5, gamma = 10^-5*5, class.weights = c("benign" = 0.2, "malignant" = 0.8))
svm.pred[ss[i:(i+69)]] = predict(svm.cv, newdata = all.features.scaled[ss[i:(i+69)],])
}
svm.table = table(truth, svm.pred)
### nueral net ###
print("nnet")
nnet.pred <- rep(0,700)
for (i in seq(1,700,by=70)) {
nnet.cv <- nnet(truth[-ss[i:(i+69)]] ~ ., data = all.features.scaled[-ss[i:(i+69)],], size = 5, decay = 1.0e-5, maxit = 1000, trace=FALSE)
nnet.pred[ss[i:(i+69)]] = predict(nnet.cv, newdata = all.features.scaled[ss[i:(i+69)],], type = "class")
}
nnet.table = table(truth, nnet.pred)
### adaboost ###
print("ada")
ada.pred <- rep(0,700)
for (i in seq(1,700,by=70)) {
ada.cv <- ada(truth[-ss[i:(i+69)]] ~ ., data = all.features.scaled[-ss[i:(i+69)],], loss = "e", type = "discrete", iter=50, nu=0.08, rpart.control(maxdepth = 4))
ada.pred[ss[i:(i+69)]] = predict(ada.cv, newdata = all.features.scaled[ss[i:(i+69)],])
}
ada.table = table(truth, ada.pred)
### random forest ###
print("rf")
rf.pred <- rep(0,700)
for (i in seq(1,700,by=70)) {
rf.cv <- randomForest(as.factor(truth[-ss[i:(i+69)]]) ~ ., data = all.features.scaled[-ss[i:(i+69)],], ntree = 4, mtry = 18, sampsize = 630, maxnodes = 20, classwt=(c("benign" = 0.2, "malignant" = 0.8)))
rf.pred[ss[i:(i+69)]] = predict(rf.cv, newdata = all.features.scaled[ss[i:(i+69)],])
}
rf.table = table(truth, rf.pred)
### concurrency tables ###
svm.table
nnet.table
ada.table
rf.table
svm.sensitivity[[n]] = svm.table[2,2] / 135
nnet.sensitivity[[n]] = nnet.table[2,2] / 135
ada.sensitivity[[n]] = ada.table[2,2] / 135
rf.sensitivity[[n]] = rf.table[2,2] / 135
svm.specificity[[n]] = svm.table[1,1] / 565
nnet.specificity[[n]] = nnet.table[1,1] / 565
ada.specificity[[n]] = ada.table[1,1] / 565
rf.specificity[[n]] = rf.table[1,1] / 565
svm.avg_accuracy[[n]] = (svm.sensitivity[[n]] + svm.specificity[[n]]) / 2
nnet.avg_accuracy[[n]] = (nnet.sensitivity[[n]] + nnet.specificity[[n]]) / 2
ada.avg_accuracy[[n]] = (ada.sensitivity[[n]] + ada.specificity[[n]]) / 2
rf.avg_accuracy[[n]] = (rf.sensitivity[[n]] + rf.specificity[[n]]) / 2
}
svm.sensitivity.mean <- mean(svm.sensitivity)
nnet.sensitivity.mean <- mean(nnet.sensitivity)
ada.sensitivity.mean <- mean(ada.sensitivity)
rf.sensitivity.mean <- mean(rf.sensitivity)
svm.specificity.mean <- mean(svm.specificity)
nnet.specificity.mean <- mean(nnet.specificity)
ada.specificity.mean <- mean(ada.specificity)
rf.specificity.mean <- mean(rf.specificity)
svm.avg_accuracy.mean <- mean(svm.avg_accuracy)
nnet.avg_accuracy.mean <- mean(nnet.avg_accuracy)
ada.avg_accuracy.mean <- mean(ada.avg_accuracy)
rf.avg_accuracy.mean <- mean(rf.avg_accuracy)
svm.sensitivity.mean
nnet.sensitivity.mean
ada.sensitivity.mean
rf.sensitivity.mean
svm.specificity.mean
nnet.specificity.mean
ada.specificity.mean
rf.specificity.mean
svm.avg_accuracy.mean
nnet.avg_accuracy.mean
ada.avg_accuracy.mean
rf.avg_accuracy.mean
##ROC curve##
for (n in 1:num_rounds) {
print(paste0("n: ", n))
ss <- sample(700,replace=F)
### svm ###
print("svm")
svm.pred <- rep(0,700)
for (i in seq(1,700,by=70)) {
svm.cv <- svm(truth[-ss[i:(i+69)]] ~ ., data = all.features.scaled[-ss[i:(i+69)],], probability=TRUE,kernel = "sigmoid", cost = 10^2, coef0 = 10^-1, gamma = 10^-4*5, class.weights = c("benign" = 0.2, "malignant" = 0.8))
svm.pred[ss[i:(i+69)]] = predict(svm.cv, newdata = all.features.scaled[ss[i:(i+69)],])
}
svm.table = table(truth, svm.pred)
### concurrency tables ###
svm.table
svm.sensitivity[[n]] = svm.table[2,2] / 135
svm.specificity[[n]] = svm.table[1,1] / 565
svm.avg_accuracy[[n]] = (svm.sensitivity[[n]] + svm.specificity[[n]]) / 2
}
##ada ROC curve##
<<<<<<< HEAD
ada.pred <- matrix(data=NA,nrow=700,ncol=2)
ada.features <- cbind(firstorder, colors, circ)
for (i in seq(1,700,by=70)) {
ada.cv <- ada(truth[-ss[i:(i+69)]] ~ ., data = ada.features[-ss[i:(i+69)],], loss = "e", type = "discrete", iter=50, nu=0.08, rpart.control(maxdepth = 4))
ada.pred[ss[i:(i+69)],] = predict(ada.cv, newdata = ada.features[ss[i:(i+69)],],type="prob")
=======
num_rounds <- 100
ada.pred <- matrix(data=0,nrow=700,ncol=2)
for (r in 1:num_rounds) {
print(r)
ss <- sample(700,replace=F)
for (i in seq(1,700,by=70)) {
ada.cv <- ada(truth[-ss[i:(i+69)]] ~ ., data = all.features[-ss[i:(i+69)],], loss = "e", type = "discrete", iter=50, nu=0.08, rpart.control(maxdepth = 4))
ada.pred[ss[i:(i+69)],] = ada.pred[ss[i:(i+69)],] + predict(ada.cv, newdata = all.features[ss[i:(i+69)],],type="prob")
}
>>>>>>> 550f2c1fd3fb4caafc06566baa6fdcf0206bee98
}
ada.pred <- ada.pred / num_rounds
ada.predictions <- prediction(ada.pred[,2],labels=truth)
ada.perf <- performance(ada.predictions,"tpr","fpr")
plot(ada.perf,main="ROC Curve for AdaBoost",col=2,lwd=2)
abline(a=0,b=1,lwd=2,lty=2,col="gray")
<<<<<<< HEAD
ada_avg_acc <- (ada.predictions@tp[[1]]/135+ada.predictions@tn[[1]]/565)/2
which.max(ada_avg_acc)
ada.predictions@cutoffs[[1]][362]
=======
avg_accuracies <- (ada.predictions@tp[[1]]/135 + ada.predictions@tn[[1]]/565) / 2
threshold <- ada.predictions@cutoffs[[1]][which.max(avg_accuracies)]
>>>>>>> 550f2c1fd3fb4caafc06566baa6fdcf0206bee98
###RandomForest Curve###
ss <- sample(700,replace=F)
rf.pred <- matrix(data=NA,nrow=700,ncol=2)
for (i in seq(1,700,by=70)) {
rf.cv <- randomForest(as.factor(truth[-ss[i:(i+69)]]) ~ ., data = all.features[-ss[i:(i+69)],], ntree = 4, mtry = 18, sampsize = 630, maxnodes = 20, classwt=(c("benign" = 0.2, "malignant" = 0.8)))
rf.pred[ss[i:(i+69)],] = predict(rf.cv, newdata = all.features[ss[i:(i+69)],],type="prob")
}
rf.predictions <- prediction(rf.pred[,2],labels=truth)
rf.perf <- performance(rf.predictions,"tpr","fpr")
plot(rf.perf,main="ROC Curve for Random Forest",col=2,lwd=2)
abline(a=0,b=1,lwd=2,lty=2,col="gray")
## svm ROC ##
svm.pred <- matrix(data=NA,nrow=700,ncol=2)
for (i in seq(1,700,by=70)) {
svm.cv <- svm(truth[-ss[i:(i+69)]] ~ ., data = all.features.scaled[-ss[i:(i+69)],], probability=TRUE, kernel = "radial", cost = 10^2*5, gamma = 10^-5*5, class.weights = c("benign" = 0.2, "malignant" = 0.8))
svm.pred.class <- predict(svm.cv, newdata = all.features.scaled[ss[i:(i+69)],], probability=TRUE)
svm.pred[ss[i:(i+69)],] <- attr(svm.pred.class,"probabilities")
}
svm.table = table(truth, svm.pred)
svm.predictions <- prediction(svm.pred[,2],labels=truth)
svm.perf <- performance(svm.predictions,"tpr","fpr")
plot(svm.perf,main="ROC Curve for SVM",col=2,lwd=2)
abline(a=0,b=1,lwd=2,lty=2,col="gray")
## neural network ROC ##
ss <- sample(700,replace=F)
nnet.pred <- rep(0,700)
for (i in seq(1,700,by=70)) {
nnet.cv <- nnet(truth[-ss[i:(i+69)]] ~ ., data = all.features.scaled[-ss[i:(i+69)],], size = 5, decay = 1.0e-5, maxit = 1000, trace=FALSE)
nnet.pred[ss[i:(i+69)]] = predict(nnet.cv, newdata = all.features.scaled[ss[i:(i+69)],], type = "raw")
}
nn.predictions = prediction(nnet.pred,truth)
nn.perf = performance(nn.predictions,"tpr","fpr")
plot(nn.perf,main="ROC Curve for NN",col=2,lwd=2)
abline(a=0,b=1,lwd=2,lty=2,col="gray")
|
220e8ecca8e3b4effacc33e7502ff3a059b34e91
|
1858c69a9338db6cdce1c01f9660949b32601477
|
/inst/shiny/server.R
|
90a839ef92df3cc131ba7e166ddf079471a450af
|
[
"MIT"
] |
permissive
|
lianos/multiGSEA.shiny
|
c457793966bcad00b9abfd73acf77c6915d02d05
|
ef24e4dda17ec6a79928e9cf7da7da57b5ee57eb
|
refs/heads/develop
| 2021-06-04T16:24:57.860391
| 2020-09-11T15:37:09
| 2020-09-11T15:37:09
| 112,698,587
| 3
| 5
|
NOASSERTION
| 2020-02-13T00:11:19
| 2017-12-01T05:24:24
|
R
|
UTF-8
|
R
| false
| false
| 4,669
|
r
|
server.R
|
shinyServer(function(input, output, session) {
## If this application was invoked via explore(MultiGSEAResult), then
## getOption(EXPLORE_MULTIGSEA_RESULT='path/to/result.rds') was set that
## we can load, otherwise this will respond to a user upload.
mgc <- reactive({
## Are we here because the user uploaded something, or did the user ask
## to `explore(MultiGSEAResult)`? This implementation feels wrong, but ...
if (is.null(input$mgresult)) {
mg <- getOption('EXPLORE_MULTIGSEA_RESULT', NULL)
res <- failWith(NULL, MultiGSEAResultContainer(mg), silent=TRUE)
return(res)
}
## User uploaded a file
return(failWith(NULL, MultiGSEAResultContainer(input$mgresult$datapath)))
})
lfc <- reactive({
lfc <- req(mgc()$mg)
lfc <- logFC(lfc, as.dt=TRUE)
lfc[order(logFC, decreasing=TRUE)]
})
gs_result_filter <- callModule(mgResultFilter, 'mg_result_filter', mgc)
## Overview Tab ==============================================================
output$gseaMethodSummary <- renderUI({
obj <- failWith(NULL, expr=mgc(), silent=TRUE)
if (!is(obj, 'MultiGSEAResultContainer')) {
tags$p(style="font-weight: bold; color: red",
"Upload the MultiGSEAResult object to initialize the application")
} else {
tagList(
tags$h4("GSEA Analyses Overview"),
summaryHTMLTable.multiGSEA(mgc()$mg, mgc()$methods,
gs_result_filter()$fdr(),
p.col='padj.by.collection')
)
}
})
## GSEA Results Tab ==========================================================
gs_viewer <- callModule(geneSetContrastView, 'geneset_viewer',
mgc, maxOptions=500, server=TRUE)
## A table of GSEA statistics/results for the given method and fdr threshold
## The table is wired to the gs_viewer so that row clicks can signal updates
## to the contrast viewer
gs_table_browser <- callModule(mgTableBrowser, 'mg_table_browser', mgc,
method=gs_result_filter()$method,
fdr=gs_result_filter()$fdr,
server=TRUE)
## clicks on gsea result table update the contrast view
observeEvent(gs_table_browser$selected(), {
.mgc <- req(mgc())
geneset <- req(gs_table_browser$selected())
updateActiveGeneSetInContrastView(session, gs_viewer, geneset, .mgc)
})
## A table of other genesets that brushed genes in the contrast viewer
## belong to. This table is also wired to the contrast viewer, so that
## a click on a row of the table will update the contrast view, too.
other_genesets_gsea <- callModule(mgGeneSetSummaryByGene,
'other_genesets_gsea',
mgc, features=gs_viewer()$selected,
method=gs_result_filter()$method,
fdr=gs_result_filter()$fdr)
## DEBUG: Can we add a DT row click listner to the `other_genesets_gsea` so
## that it updates the `gs_viewer`? My first shot at doing sends the
## application into a tailspin, my best guess is because the selection is
## still active in the interactive boxp/density plot.
## Differential Gene Expression Tab ==========================================
gene.volcano <- callModule(mgVolcano, 'dge_volcano', mgc,
width=400, height=350)
output$dge_volcano_genestats <- DT::renderDataTable({
res.all <- req(lfc())
res <- res.all[, list(symbol, feature_id, logFC, pval, padj)]
selected <- gene.volcano()
# browser()
if (!is.null(selected)) {
res <- subset(res, feature_id %in% selected$feature_id)
}
renderFeatureStatsDataTable(res, filter='top', feature.link.fn=ncbi.entrez.link)
})
## Respond to user click to download differential expression statistics
output$download_dge_stats <- downloadHandler(
filename=function() "multiGSEA-feature-level-statistics.csv",
content=function(file) write.csv(lfc(), file, row.names=FALSE))
## A table of other genesets that brushed genes in the contrast viewer
## belong to. This table is also wired to the contrast viewer, so that
## a click on a row of the table will update the contrast view, too.
other_genesets_volcano <- callModule(mgGeneSetSummaryByGene,
'other_genesets_volcano',
mgc, features=gene.volcano,
method=gs_result_filter()$method,
fdr=gs_result_filter()$fdr)
})
|
703bdb36fef37bada52498a83bb7f6bbd5f2592c
|
027a491b02278271f0c3bb5173b76ec14bea3b08
|
/preliminary_study/plot.R
|
08e9e2a7ad3a387c4c5a9d89fdc253a4a3718bd2
|
[] |
no_license
|
thegcamilo/AIArt_MoralStanding
|
d3c670162092c3002459e7f30b550fdc4d9f6ea6
|
04df5c3428c93c0a7826d34dcf861130e5c988c2
|
refs/heads/master
| 2023-05-08T04:48:18.815719
| 2021-05-18T03:43:53
| 2021-05-18T03:43:53
| 354,767,548
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,289
|
r
|
plot.R
|
library(ggplot2)
library(dplyr)
library(patchwork)
library(here)
setwd(paste(here(), "preliminary_study", sep="/"))
getwd()
df.tmp <- read.csv("data/AIArt_judgements.csv")
df <- read.csv("data/unmelted.csv")
udc.max <- (df$Undecided > df$AI.Program) & (df$Undecided > df$Human)
sum(udc.max)
df <- df %>%
rowwise %>%
mutate(n.valid=AI.Program + Human) %>%
mutate(n=AI.Program + Human + Undecided)
rowwise.entropy <- function(row) {
tmp <- c(row["AI.Program"], row["Human"])
return (entropy::entropy(tmp))
}
entropy.results <- apply(df, 1, rowwise.entropy)
df$entropy <- entropy.results
valid <- df %>%
arrange(desc(entropy), desc(n.valid))
### use this to select how many you want to visualize
valid <- valid[1:10, ]
melted.df <- reshape2::melt(valid, measure.vars=c("Human", "AI.Program", "Undecided"), variable.name="img_judge")
melted.df$img_judge <- factor(melted.df$img_judge,
levels = c("AI.Program", "Undecided", "Human"),
labels = c("AI Program", "Undecided", "Human"))
melted.df <- melted.df %>%
mutate(perc = value/n)
melted.df <- melted.df %>%
arrange(img_judge, desc(entropy)) %>%
mutate(img_n = factor(img_n, unique(img_n)))
melted.df$perc <- melted.df$perc * 100
melted.df$n_ordered <- 1:nrow(melted.df)
plot <- ggplot(melted.df, aes(x=img_n, y=perc, fill=img_judge)) +
geom_bar(stat="identity", width=0.6) +
# geom_hline(yintercept = c(60, 40), size=0.7) +
theme_bw() +
theme(
legend.position = "bottom"
) +
scale_fill_manual(
values=c("dodgerblue3", "gray", "firebrick3")
) +
ylab("Percentage (%)") +
xlab("Image #") +
labs(fill="") +
theme(
axis.text.x = element_text(size=10.5),
legend.text = element_text(size=14),
axis.text.y = element_text(size=14),
axis.title.y = element_text(size=14, margin=margin(0, 8, 0, 0)),
axis.title.x = element_text(size=14, margin=margin(4, 0, 0, 0)),
legend.margin = margin(0, 0, 0, 0),
legend.box.margin = margin(0, 0, 0, 0),
plot.margin = margin(1, 2, 1, 2)
) +
geom_rect(xmin=0.5, xmax=10.5, ymin=-1, ymax=101, fill='transparent', color='black', size=1.2) +
scale_x_discrete(labels=1:58) +
plot_annotation(tag_levels = "A") &
theme(plot.tag = element_text(size = 16)) ; plot
|
cfb3df5c05f6d7c8b0b4e8fee52d47f21c9927d8
|
bc73dbb3ba557c4e9f31976f395053325c9be018
|
/sanity_checks2.R
|
54581ed7bc5d7d49086033c262f1ecff1825bf19
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
ldecicco-USGS/passive_tox
|
c7fe0ccbe4227265ceab17a1e3fe9f3db329cf09
|
9321ef259cfeeb6888b870ae7aa97a018e21d5cc
|
refs/heads/master
| 2021-06-26T00:40:03.326522
| 2020-11-20T21:30:13
| 2020-11-20T21:30:13
| 135,317,394
| 1
| 2
| null | 2020-07-21T19:50:00
| 2018-05-29T15:35:27
|
R
|
UTF-8
|
R
| false
| false
| 6,138
|
r
|
sanity_checks2.R
|
library(tidyverse)
library(toxEval)
library(openxlsx)
source(file = "read_chemicalSummary.R")
tox_list <- create_toxEval(file.path(Sys.getenv("PASSIVE_PATH"),
"data","data_for_git_repo","clean",
"passive.xlsx"))
path_to_data <- Sys.getenv("PASSIVE_PATH")
wb <- loadWorkbook(file.path(path_to_data, "Supplemental", "Table S2 - Chemical list+conc ranges.xlsx"))
df <- readWorkbook(wb, startRow = 5, check.names = TRUE)
df_joined <- df %>%
select(Class, Analyte, CAS) %>%
filter(Analyte != "") %>%
full_join(tox_list$chem_info, by = "CAS") %>%
select(CAS, Class.x, Class.y)
changes = data.frame(
chems = c("Dibenzo[a,h]anthracene", "Bupropion", "Citalopram",
"Duloxetine", "Methadone", " Propranolol",
"Sertraline", "tramadol", " Tris(1-chloro-2-propyl)phosphate (TCPP)"),
Dave = c("53-07-3", "34911-55-2", "59729-33-8",
"116539-59-4", "76-99-3", "525-66-6",
"79617-96-2", "27203-92-5", "26248-87-3"),
Laura = c("53-70-3", "34841-39-9", "219861-08-2",
"136434-34-9", "1095-90-5", "318-98-9",
"79559-97-0", "36282-47-0", "13674-84-5"),
stringsAsFactors = FALSE)
# write.csv(changes, "change_cas.csv", row.names = FALSE)
df <- df %>%
filter(!is.na(Analyte))
new_CAS <- df %>%
select(CAS) %>%
left_join(changes, by=c("CAS"="Dave")) %>%
mutate(CAS = ifelse(!is.na(Laura), Laura, CAS)) %>%
filter(!is.na(CAS))
orig_cas <- df$CAS
df$CAS <- new_CAS$CAS
# writeData(wb, sheet = "Table S2",
# startRow = 6, startCol = 3,
# x = df$CAS)
# changedCAS <- createStyle(fgFill = "steelblue2")
#
# addStyle(wb, sheet = "Table S2",
# style = changedCAS,
# cols = 3, gridExpand = FALSE,
# rows = 5 + which(!orig_cas == new_CAS))
ALL_TOX_DATA <- readRDS(file.path(Sys.getenv("PASSIVE_PATH"),
"data","data_for_git_repo","raw",
"all_tox.rds"))
chem_info <- tox_list$chem_info
ALL_TOX_DATA_in_study <- ALL_TOX_DATA %>%
select(CAS = casn, endPoint=aenm, modl_acc, flags, hitc) %>%
filter(CAS %in% chem_info$CAS) %>%
group_by(CAS) %>%
summarize(`Total ToxCast assays` = length(unique(endPoint)),
`Assays with hits` = length(unique(endPoint[hitc == 1])))
assays_left <- chemicalSummary %>%
select(CAS, endPoint) %>%
distinct() %>%
group_by(CAS) %>%
summarize(`Assays in study` = length(unique(endPoint)))
chem_info <- chem_info %>%
left_join(ALL_TOX_DATA_in_study, by="CAS", ) %>%
left_join(assays_left, by="CAS") %>%
mutate(`Total ToxCast assays` = ifelse(is.na(`Total ToxCast assays`), 0, `Total ToxCast assays`),
`Assays with hits` = ifelse(is.na(`Assays with hits`) &
`Total ToxCast assays` != 0, 0,
`Assays with hits`),
`Assays in study` = ifelse(is.na(`Assays in study`) &
`Assays with hits` != 0, 0,
`Assays in study`))
format_2 <- function(x, nd_text = "ND"){
x_txt <- ifelse(!is.finite(x) | x == 0,
nd_text,
formatC(signif(x, digits = 2), digits=2, format = "fg")
)
x_txt <- gsub(" ", "", x_txt)
return(x_txt)
}
df_tox <- df %>%
select(CAS, Analyte) %>%
left_join(chem_info, by="CAS") %>%
select(-Analyte, -chnm, -Class) %>%
mutate(`2010_MDL` = format_2(1000*`2010_MDL`, nd_text = "--"),
`2010_MQL` = format_2(1000*`2010_MQL`, nd_text = "--"),
`2014_MDL` = format_2(1000*`2014_MDL`, nd_text = "--"),
`2014_MQL` = format_2(1000*`2014_MQL`, nd_text = "--"))
# writeData(wb, sheet = "Table S2",
# startRow = 5, startCol = 24,
# x = df_tox)
chem_stats <- tox_list$chem_data %>%
mutate(Value = 1000 * Value) %>%
group_by(CAS) %>%
mutate(tots_mean = format_2(mean(Value[Value > 0])),
tots_median = format_2(median(Value[Value > 0]))) %>%
group_by(CAS, `Sample Date`, tots_mean, tots_median) %>%
summarise(min = min(Value[Value > 0]),
max = max(Value),
mean = mean(Value[Value > 0]),
median = median(Value[Value > 0]),
n_dets = sum(is.na(comment)),
samples = n()) %>%
ungroup() %>%
mutate(max = format_2(max),
min = format_2(min),
mean = format_2(mean),
median = format_2(median),
n_dets = format_2(n_dets, nd_text = "0")) %>%
pivot_wider(id_cols = c("CAS", "tots_mean", "tots_median"),
names_from = `Sample Date`, values_fill = list(mean = "--",
min = "--",
max = "--",
median = "--",
samples = "0",
n_dets = "--"),
values_from = c("mean", "min", "max", "median", "n_dets", "samples")) %>%
arrange(match(CAS, df$CAS)) %>%
left_join(df_tox, by = "CAS") %>%
rowwise() %>%
mutate(min_tots = format_2(min(c(as.numeric(min_2010), as.numeric(min_2014)), na.rm = TRUE)),
max_tots = format_2(max(c(as.numeric(max_2010), as.numeric(max_2014)), na.rm = TRUE))) %>%
select(CAS,
`Total ToxCast assays`, `Assays with hits`, `Assays in study`,
min_2010, max_2010, median_2010, mean_2010, n_dets_2010, samples_2010, `2010_MDL`, `2010_MQL`,
min_2014, max_2014, median_2014, mean_2014, n_dets_2014, samples_2014, `2014_MDL`, `2014_MQL`,
min_tots, max_tots, tots_median, tots_mean, sites_det, sites_tested)
writeData(wb, sheet = "Table S2",
startRow = 6, startCol =6, colNames = FALSE,
x = select(chem_stats, -CAS))
saveWorkbook(wb, file = "test.xlsx", overwrite = TRUE)
saveWorkbook(wb, file = file.path(path_to_data, "Supplemental", "Table2_Combo.xlsx"), overwrite = TRUE)
|
91c5a58072c85cc1f5899ba29578821eaabbfcdc
|
b12382e16a602599d0725b5446bb6e5680da3e66
|
/R/Newton-Raphson/NLS_NR/Scripts/main_simulation.R
|
a15a05b0eb4c7e17d42b918e605c8c991d1d1d7a
|
[] |
no_license
|
hhnguyen2/Summer-2016
|
edd9de12fff00d5af3c058c30a7d07e71ba4289b
|
bccd6810ce3c49f8a0925b1954cff7643eb9648c
|
refs/heads/master
| 2021-01-20T17:20:31.080061
| 2016-08-04T20:11:56
| 2016-08-04T20:11:56
| 60,541,681
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,013
|
r
|
main_simulation.R
|
## CONDUCT SIMULATION
# Set initial conditions and initialize variables
init_gamma <- rep(0,5)
init_alpha <- rep(0,5)
init_B <- rep(0,2)
# Set truth
my.gamma <- c(0.25,-0.25,0.10,-0.45,0.75) # my.gamma: logistic parameters for Z
alph <- c(1,0,0,0,0) # alph: logistic parameters for A|Z,X
mu <- c(1.62,0,0.58,-1.54,-0.65,0.55,1.26) # mu: logistic paramters for A, delta, x
#psi <- 0.6
#psi <- c(0.25,0.17,0.13,0.38,-0.16) # psi: used to coerce Pr.A.zx between (0,1)
####################
## BEGIN SIMULATION
### Set initial seed
i <- 550
set.seed(-23018349 - i)
## Generate simulation data
sim.data <- gen_sim.data(my.gamma,alph,mu,psi)
xi <- extract_xi(sim.data)
## Approximate alpha_hat by fitting logit Pr(z|X) = alpha'x
gamma_opt <- optim(init_gamma,opt_fr,data=sim.data)$par
gamma_nr <- newtonRaphson(init_gamma,sim.data,usem1 = FALSE)
gamma_glm <- as.numeric(glm(zi~x.1+x.2+x.3+x.4,data=sim.data,family=binomial)$coefficients)
gamma_hat <- gamma_nr
## Generate f.zx & W into preallocated spot in sim.data
sim.data$f.hat.zx <- gen_f.hat.zx(gamma_hat,sim.data)
sim.data$W <- gen_W(sim.data)
## Fit E[W|X] = {exp(alpha'x) - 1} / {exp(alpha'x) + 1} for each i
# Approximate alpha_hat
alpha_opt <- optim(init_alpha,opt_grr,data=sim.data)$par
alpha_nr <- newtonRaphson(init_alpha,sim.data,usem1 = TRUE)
alpha_hat <- alpha_nr
## Generate E[W|X],R, M for each person i
sim.data$E.Wx <- gen_E.Wx(alpha_hat,sim.data)
sim.data$R <- sim.data$Yi / sim.data$E.Wx
sim.data$M <- 1 / (sim.data$f.hat.zx)
## Define Z_bar, M_bar, R_bar
Z_bar <- cbind(sim.data$ones,sim.data$zi)
M_bar <- diag(sim.data$M)
R_bar <- sim.data$R
## Compute B_bar
B_closed <- solve(t(Z_bar) %*% M_bar %*% Z_bar) %*% (t(Z_bar) %*% M_bar %*% R_bar)
B_lm <- lm(R~1+zi, weight = 1/f.hat.zx, data = sim.data)$coefficients
B_opt <- optim(init_B,opt_hrrr,data=sim.data)$par
## Output
## Diag
my.gamma
gamma_nr
gamma_glm
gamma_opt
alph
alpha_nr
alpha_opt
B_closed
B_lm
B_opt
mu
## END SIMULATION
###################
|
2dd5972b4706fb4c488908f51a0fa4cf41bbac4f
|
6ed875a510b7db9f5d44852b3e836cf78ff91d15
|
/home/data/minap_rscript.r
|
569878325e6a936a131c88f4829b23ddd869a58f
|
[] |
no_license
|
maya70/qualdemo
|
1547ba7b63ea7451286559014d76cc0031fe6800
|
80688db47ec4d2ea6aea54c69aa2621fdce38104
|
refs/heads/master
| 2020-06-22T18:35:06.328545
| 2019-07-19T13:06:21
| 2019-07-19T13:06:21
| 197,774,229
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,747
|
r
|
minap_rscript.r
|
# read raw admission data from its current location
library(readr)
library(lubridate)
library(parsedate)
source_file_path <- "C:/Users/scsmel/Dropbox/Leeds/Qualdash related/Data/"
dest_file_path <- "C:/Bitnami/wampstack-7.1.13-1/apache2/htdocs/Qualdashv1/home/data/minap_admission/"
#dateFormat <- "%d-%m-%y %H:%M"
dateFormat <- "%d/%m/%Y %H:%M"
audit_filename <- "minap_dummy.csv"
source = paste(source_file_path, audit_filename, sep='')
madmission <- read_csv(source)
# get years in data
admdate <- as.Date(madmission$`3.06 ArrivalAtHospital`, format=dateFormat)
adyear <- year(admdate)
madmission <- cbind(madmission, adyear)
# Select all columns with Date data type
allDates <- lapply(madmission, function(x) !all(is.na(as.Date(as.character(x),format=dateFormat))))
df <- as.data.frame(allDates)
colnames(df) <- colnames(madmission)
dateFields <- df[which(df==TRUE)]
# Unify date formats to ISO format
for(col in colnames(madmission)){
if(col %in% colnames(dateFields)){
vector <- madmission[col]
temp <- lapply(vector, function(x) as.POSIXlt(x, format=dateFormat))
madmission[col] <- temp
}
}
# Derived columns
v427 <- madmission$`4.27 DischargedOnThieno` == 1
v431 <- madmission$`4.31 DischargedOnTicagrelor` == 1
madmission$P2Y12 <- as.numeric(v431 | v427)
# break it into separate files for individual years
# and store the new files in the MINAP admissions folder under documnt root
for(year in unique(adyear)){
tmp = subset(madmission, adyear == year)
fn = paste(dest_file_path, gsub(' ','', year), '.csv', sep='' )
write.csv(tmp, fn, row.names = FALSE)
}
yfn = paste(dest_file_path, 'avail_years.csv', sep='' )
write.csv(unique(adyear), yfn, row.names = FALSE)
|
404eae9494adfd4c729a9767dd2aaed000f837d4
|
1651d18a23bb6e822aa73fac319111a786afe8a4
|
/man/clusterExtractp.Rd
|
b5571ad8ab5d4ab412999e9d7e589b85a932b00d
|
[] |
no_license
|
fomotis/cyanoFilter
|
ac1ba3b95225bc67b3130019160101a79435816c
|
5c358c86a9e22670b84d80ac73135afbdf2457e9
|
refs/heads/master
| 2021-08-01T21:38:38.323652
| 2021-07-29T14:01:03
| 2021-07-29T14:01:03
| 150,075,530
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,903
|
rd
|
clusterExtractp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cluster_extract.R
\name{clusterExtractp}
\alias{clusterExtractp}
\title{takes a flowframe, name of cluster column and extracts part of flowframe
that makes up proportion.}
\usage{
clusterExtractp(flowfile, cluster_var = "Clusters", proportion = 1)
}
\arguments{
\item{flowfile}{flowframe after debris are removed.}
\item{cluster_var}{column name in expression matrix containing the
cluter indicators}
\item{proportion}{value between 0 and 1 indicating percentage of the total
particles wanted}
}
\value{
a list containing \itemize{
\item \strong{particles_per_cluster}
\item \strong{clusters_proportion}
\item \strong{flowfile_proportion}
}
}
\description{
takes a flowframe, name of cluster column and extracts part of flowframe
that makes up proportion.
}
\examples{
flowfile_path <- system.file("extdata", "B4_18_1.fcs",
package = "cyanoFilter",
mustWork = TRUE)
flowfile <- flowCore::read.FCS(flowfile_path, alter.names = TRUE,
transformation = FALSE, emptyValue = FALSE,
dataset = 1)
flowfile_nona <- cyanoFilter::noNA(x = flowfile)
flowfile_noneg <- cyanoFilter::noNeg(x = flowfile_nona)
flowfile_logtrans <- cyanoFilter::lnTrans(x = flowfile_noneg,
c('SSC.W', 'TIME'))
cells_nonmargin <- cyanoFilter::cellMargin(flowframe = flowfile_logtrans,
Channel = 'SSC.W',
type = 'estimate', y_toplot = "FSC.HLin")
fin <- phytoFilter(flowfile = reducedFlowframe(cells_nonmargin),
pig_channels = c("RED.B.HLin", "YEL.B.HLin", "RED.R.HLin"),
com_channels = c("FSC.HLin", "SSC.HLin"))
clusterExtractp(flowfile = reducedFlowframe(fin),
cluster_var = "Clusters",
proportion = 0.80)
}
|
534128f6b927a1f9547baa9a2576607efe2eb76a
|
c35961807afd74625a5285071ae8db15254ca59d
|
/Player performance/playereffects.R
|
c084a25e5be323f869a5f6800f98033c19dda008
|
[] |
no_license
|
miketar27/hockey
|
0cfde5a84dcba2af788a4bbed06ec0aa598276c3
|
f4c2db836f08f11168478c41804ccf39e6c2a33d
|
refs/heads/master
| 2021-01-12T04:51:25.252345
| 2013-11-22T02:51:57
| 2013-11-22T02:52:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,346
|
r
|
playereffects.R
|
## set the working directory (user specific)
setwd("/Users/apple/Dropbox/Sen/hockey/player\ effect")
# load the 'gamlr' package
library(gamlr)
library(Matrix)
# Read in the data set
load("../shot\ and\ goal\ comparison/results/hockey_goals.rda")
## Build up the design matrices and response vectors
x <- cBind(config,onice)
# y=1 for Home goals and y=0 for Away goals
y <- as.numeric(goal$who == "HOME")
# declare lists to store the by-year design matrices and response vectors
x_byyear <- list()
y_byyear <- list()
j = 1
# parse the big design matrix in terms of season
for (i in levels(factor(goal$season))){
ind <- which(goal$season == i)
x_byyear[[j]] <- x[ind,]
y_byyear[[j]] <- y[ind]
j <- j+1
}
# gamlr (lasso) regression on the entire career
fit_career <- gamlr(x=x, y=y, family="binomial", standardize=FALSE, gamma=0,
verb=FALSE, free=1:ncol(config))
# coefficients which give the minimum AIC
# pull out the coefficients for players
beta_career <- coef(fit_career, which.min(AIC(fit_career)))[-c(1:8),]
# gamlr (lasso) regression on each season
# gives an error of infinite likelihood for i=10
beta_season <- list()
for (i in 1:length(x_byyear)){
fit_season <- gamlr(x=x_byyear[[i]], y=y_byyear[[i]], family="binomial", standardize=FALSE, gamma=0,
verb=FALSE, free=1:ncol(config))
beta_season[[i]] <- coef(fit_season, which.min(AIC(fit_season)))[-c(1:8),]
}
# beta matrix with dim=nplayers*(1+nseasons), 1 here represents career
beta <- Matrix(cbind(beta_career,do.call("cbind", beta_season)), sparse=TRUE)
colnames(beta) <- c("career",levels(factor(goal$season)))
# output the beta matrix to a file
# rows with all zero entries are deleted
beta_summary <- summary(beta) # including the row, col, val of nonzero elements
beta_df <- data.frame(player=rownames(beta)[beta_summary$i], season=colnames(beta)[beta_summary$j],
effect=beta_summary$x)
write.table(beta_df,row.names=FALSE,sep="|",file="playereffect.txt")
# read it back to check
beta_df_check <- read.table("playereffect.txt",colClasses=c("factor","factor","numeric"),header=TRUE,sep="|")
beta_check <- sparseMatrix(i=as.numeric(beta_df_check$player),j=as.numeric(beta_df_check$season),
x=beta_df_check$effect,dimnames=list(levels(beta_df_check$player),levels(beta_df_check$season)))
|
64d287cf576e454a77b13e9c412c384698e7b536
|
e1028dc1fbccf8988375330dffc3cf7c7dbde24a
|
/man/convertIncome2incomeGroup.Rd
|
430d8e6a96e8e43eccfee0977b80d4e6db1b3eec
|
[] |
no_license
|
n8thangreen/STIecoPredict
|
aab10d94b24e825273f91d67ba6d425854109df5
|
48369489baf9f8ea152f929758fdb625b6bab7fc
|
refs/heads/master
| 2020-12-09T17:55:11.140024
| 2020-06-02T14:25:18
| 2020-06-02T14:25:18
| 37,931,678
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 387
|
rd
|
convertIncome2incomeGroup.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Convert2Groups.R
\name{convertIncome2incomeGroup}
\alias{convertIncome2incomeGroup}
\title{Convert Income to Income Group}
\usage{
convertIncome2incomeGroup(income)
}
\arguments{
\item{income}{(integer)}
}
\value{
character strings of categories
}
\description{
Convert Income to Income Group
}
\examples{
}
|
adfcac6ef06e66770d626564efb8afbabe3e3cd7
|
8d96a11ce8eb0687f69070d7d8d511632d289ec1
|
/R Data Visualisations - Tasmin.R
|
5a06360010b45aa6d5e60f93fafd794982e73957
|
[] |
no_license
|
TasminG/HLTWK10Tasmin
|
12fd4506682a7830fcc35fc4abf385285d341653
|
3851ab7448b1b2ab08060979e54a81f59a53bee1
|
refs/heads/main
| 2023-08-23T22:15:15.863784
| 2021-10-07T19:47:59
| 2021-10-07T19:47:59
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 537
|
r
|
R Data Visualisations - Tasmin.R
|
install.packages("ggplot2")
library("ggplot2")
#Using the ggplot in-built data sets in RStudio and the qplot function
#create data visualization using your preferred data set.
str(mpg)
qplot(displ, hwy, color = class, data = mpg)
plot(mtcars$mpg, xlab = "Number of cars", ylab = "Miles per Gallon", col = "red")
plot(mtcars$hp,mtcars$mpg, xlab = "HorsePower", ylab = "Miles per Gallon", type = "h", col = "red")
table(mpg$manufacturer)
ggplot(data=mpg) + geom_bar(mapping=aes(x=manufacturer,fill=manufacturer))
|
7ff6329d42e8014250033f4310b80b7de29f882a
|
2bd3c511663349c7b493d57a9f9bbe228f8a9852
|
/.Rproj.user/4B7E6015/sources/s-BDF32986/38BC7E9A-contents
|
c8d5a0771aabfed64cb80e1acad0bb2dc516b262
|
[] |
no_license
|
dndufufu/rmytools
|
29ed0f90703b103eaf756ec210173ca7003ac6bc
|
2e7e06137739408a152e1587471f98a837ad8bbc
|
refs/heads/master
| 2023-07-11T05:33:52.309637
| 2021-08-13T00:53:26
| 2021-08-13T00:53:26
| 395,356,705
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,010
|
38BC7E9A-contents
|
#' Using a dataframe specific column to plot a pie figure
#'
#' @param data a dataframe or tibble
#' @param ncol specific column, factor
#'
#' @return a figure as ggplot2 object
#' @export
#'
#' @examples
#' my_pie(mtcars,ncol=2)
#' my_pie(diamonds,ncol=3)
#' my_pie(diamonds,ncol=3)+guides(fill="none")
#'
my_pie <- function(data,ncol){
plotdat <- as.data.frame(table(data[,ncol])) %>% dplyr::arrange(-Freq)
plotdat$Label <- paste(plotdat$Var1, paste0("(",round(((plotdat$Freq/sum(plotdat$Freq))*100),2),"%)"))
p <- ggplot(plotdat, aes (x="", y = Freq, fill = factor(Var1))) +
geom_col(position = 'stack', width = 1) +
geom_text_repel(aes(label = Label, x = 1.3),size=5,
position = position_stack(vjust = 0.5)) +
theme_classic() +
theme(plot.title = element_text(hjust=0.5),
axis.line = element_blank(),
axis.text = element_blank(),
axis.ticks = element_blank()) +
labs(fill = "Category",x = NULL,y = NULL) +
coord_polar("y")
}
|
|
9faff5f69b649856ac4e64ea641a40bbe49129b8
|
d7506b0faaef0363c0c28d401b736b83d6db83c3
|
/R/modify_pal.R
|
6463b258fa3718abea4d284139cdeb3fe14b9cf0
|
[
"MIT"
] |
permissive
|
GenChangHSU/PalCreatoR
|
9cff9754dc20c56377188db9c2d171051f60bf89
|
c6b97c276f3cca58fad66529cfaefc7ed7b3c8ec
|
refs/heads/master
| 2023-04-01T23:32:20.095353
| 2021-04-01T15:13:38
| 2021-04-01T15:13:38
| 322,634,068
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,551
|
r
|
modify_pal.R
|
#' @title Modify alpha transparency of the colors in the palette
#'
#' @description \code{modify_pal} modifies alpha transparency of the colors in the palette.
#'
#' @param pal a vector of hexadecimal color codes (not necessarily returned from \code{\link[PalCreatoR:create_pal]{create_pal}).
#' @param alpha a single number or a vector of numbers between 0 and 1. These values define the degree
#' of transparency of the colors in the palette. If \code{alpha} is a single number, the transparency of
#' all the colors in the palette will be set to that value; if \code{alpha} is a vector of numbers, the
#' transparency of the colors in the palette will be set to the corresponding alpha values.
#' Also note that if the vector lengths of \code{pal} and \code{alpha} differ, extra elements in the longer
#' vector will be omitted to match the length of the shorter one. See 'Details' section for more information
#' on the concept of alpha transparency.
#' @param show.pal logical. Whether to display the modified palette or not. Default to \code{TRUE}.
#' @param title a character string giving the title of the displayed palette.
#' @param ... additional arguments passed to \code{\link[ggplot2:theme]{ggplot2::theme}}.
#'
#' @details An alpha value defines the "transparency", or "opacity" of the color. A value of 0 means completely
#' transparent (i.e., the background will completely “show through”); a value of 1 means completely opaque
#' (i.e., none of the background will “show through”). In short, the lower the alpha value is, the lower "amount"
#' of the color will be.
#'
#' @return A vector of hexadecimal color codes with two additional digits defining the degree of transparency.
#'
#' @importFrom magrittr "%>%"
#'
#' @examples \dontrun{
#' library(PalCreatoR)
#' image_path <- system.file("Mountain.JPG", package = "PalCreatoR")
#'
#' My_pal <- create_pal(image = image_path,
#' n = 5,
#' resize = 0.1,
#' method = "kmeans",
#' colorblind = FALSE,
#' sort = "value",
#' show.pal = TRUE,
#' title = "My Palette")
#'
#' My_new_pal <- modify_pal(pal = My_pal,
#' alpha = c(0.2, 0.4, 0.6, 0.8, 1.0),
#' show.pal = TRUE,
#' title = "My New Palette")
#' print(My_new_pal)}
modify_pal <- function(pal,
alpha,
show.pal = TRUE,
title = "",
...) {
# Error messages -------------------------------------------------------------------------
# 1. pal argument
color_check <- sapply(pal, function(X){
grepl(pattern = "^#[0-9A-Fa-f]{6}$", x = X)
})
if (any(color_check == F)) {
stop('One or more incorrect hex color codes passed in the "pal" argument!')
}
# 2. alpha argument
if (any(!alpha <= 1) || any(!alpha >= 0)) {
stop('One or more incorrect values passed in the "alpha" argument!')
}
# 3. show.pal argument
if (is.logical(show.pal) == F) {
stop('Argument passed to "show.pal" is not logical!')
}
# Function body -----------------------------------------------------------
# 1. Check the lengths of the pal and alpha vectors
if (length(pal) != length(alpha) &&
length(pal) != 1 && length(alpha) != 1) {
l_pal <- length(pal)
l_alpha <- length(alpha)
n_shorter <- min(l_pal, l_alpha)
df <- data.frame(hex = pal[1:n_shorter], alpha = alpha[1:n_shorter])
warning(
'The lengths of "pal"" and "alpha" differ; extra elements in the longer vector
are omitted to match the length of the shorter one!'
)
} else {
df <- data.frame(hex = pal, alpha = alpha)
}
# 2. Get the hex codes with the additional two alpha digits
hex_codes <- purrr::map2(
.x = df$hex,
.y = df$alpha,
.f = function(x, y) {
rgb_val <- col2rgb(x, alpha = F) %>% as.vector()
hex_code <-
rgb(
r = rgb_val[1],
g = rgb_val[2],
b = rgb_val[3],
alpha = y * 255,
maxColorValue = 255
)
return(hex_code)
}
) %>% unlist()
# 3. Visualize the palette
n <- length(hex_codes)
if (show.pal == T) {
if (n <= 10) {
Pal_df <- hex_codes %>%
data.frame(Hex_code = .) %>%
dplyr::mutate(x = rep(1, n),
y = 10:(10 - n + 1))
}
if (n > 10 & n %% 10 != 0) {
q <- n %/% 10
m <- n %% 10
Pal_df <- hex_codes %>%
data.frame(Hex_code = .) %>%
dplyr::mutate(x = c(rep(1:q, each = 10), rep(q + 1, m)),
y = c(rep(10:1, q), 10:(10 - m + 1)))
}
if (n > 10 & n %% 10 == 0) {
q <- n %/% 10
Pal_df <- hex_codes %>%
data.frame(Hex_code = .) %>%
dplyr::mutate(x = c(rep(1:q, each = 10)),
y = c(rep(10:1, q)))
}
p <- ggplot2::ggplot(Pal_df, ggplot2::aes(x = x, y = y)) +
ggplot2::geom_tile(ggplot2::aes(fill = Hex_code)) +
ggplot2::geom_label(ggplot2::aes(label = Hex_code),
fill = "grey",
size = 4) +
ggplot2::scale_fill_identity() +
ggplot2::theme_void() +
ggplot2::labs(title = title) +
ggplot2::theme(plot.title = ggplot2::element_text(hjust = 0.5, size = 18)) +
ggplot2::theme(...)
print(p)
}
# 9. Return the palette vector
return(hex_codes)
}
|
917e15af4bfed45994f53ef4b5abf6ca3d0206f4
|
86b16636c31254eb27c63574f369e9d1b6c73c52
|
/R/mod_marcacao_ponto.R
|
c0c52989c8a4ecd877842512000e6cdd08bdedea
|
[
"MIT"
] |
permissive
|
leandro-vento/Ponto
|
cee8f122fd225e2466559b79323ba122e6a589a2
|
870269bf23918a058a2f5c1bb450c298538b25b3
|
refs/heads/master
| 2022-12-07T16:33:12.269521
| 2020-08-20T14:57:54
| 2020-08-20T14:57:54
| 288,650,351
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,479
|
r
|
mod_marcacao_ponto.R
|
#' marcacao_ponto UI Function
#'
#' @description A shiny Module.
#'
#' @param id,input,output,session Internal parameters for {shiny}.
#'
#' @noRd
#'
#' @importFrom shiny NS tagList
mod_marcacao_ponto_ui <- function(id){
ns <- NS(id)
tabItem(tabName = "marcacao_ponto", align = "center",
fluidRow(
column(8,
box(width = 12,
solidHeader = TRUE,
status = "primary",
title = "Ponto",
align = "center",
fluidRow(
column(1, align = "left",
uiOutput(ns("botao_start_stop"))
),
column(4,
htmlOutput(ns("horas_dia"))
),
column(5,
htmlOutput(ns("horas_mes"))
),
column(2, align = "right",
actionButton(ns("editar_ponto"), label = NULL, icon = icon("edit")),
actionButton(ns("excluir_ponto"), label = NULL, icon = icon("times"))
)
),
fluidRow(
column(12,
DT::dataTableOutput(ns("lista_ponto"))
)
)
)
),
column(4,
box(width = 12,
solidHeader = TRUE,
status = "primary",
title = "Status",
align = "center",
fluidRow(
column(12, align = "left",
actionButton(ns("atualizar_lista_online_offline"), label = NULL, icon = icon("sync"))
)
),
fluidRow(
column(12,
DT::dataTableOutput(ns("lista_online_offline"))
)
)
)
)
)
)
}
#' marcacao_ponto Server Function
#'
#' @noRd
mod_marcacao_ponto_server <- function(input, output, session, usuario, con){
ns <- session$ns
con <- con
# Função que converte decimal para formato hora para exibição----
converter_decimal_para_hora <- function(tempo){
horas <- as.numeric(as.integer(tempo))
saldo1 <- tempo - horas
minutos <- as.numeric(as.integer(saldo1*60))
saldo2 <- saldo1 - (minutos/60)
segundos <- as.numeric(as.integer(saldo2*3600))
convertido <- paste0(if(nchar(horas)==1){0}, horas, ":", if(nchar(minutos)==1){0}, minutos, ":", if(nchar(segundos)==1){0}, segundos)
convertido
}
# Função para atualizar as horas realizadas no dia----
horas_dia_refresh <- function(){
output$horas_dia <- renderText({
FuncionarioLogado <- RSQLite::dbGetQuery(conn = con, paste0("SELECT * FROM Funcionarios WHERE Login='", usuario$usuario(), "'"))
if(nrow(FuncionarioLogado) > 0){
PontoFuncionario <- RSQLite::dbGetQuery(conn = con, paste0("SELECT * FROM Ponto WHERE Login='", usuario$usuario(), "' AND Data=DATE('now', 'localtime')"))
horas_dia <- sum(PontoFuncionario$SaldoNumero, na.rm = TRUE)
horas_dia <- converter_decimal_para_hora(horas_dia)
paste0("<font color=\"#000000\"><b>Horas no dia: ", horas_dia, "</b></font>")
}else{
paste0("")
}
})
}
horas_dia_refresh()
# Função para atualizar as horas realizadas no mês----
horas_mes_refresh <- function(){
output$horas_mes <- renderText({
FuncionarioLogado <- RSQLite::dbGetQuery(conn = con, paste0("SELECT * FROM Funcionarios WHERE Login='", usuario$usuario(), "'"))
if(nrow(FuncionarioLogado) > 0){
PontoFuncionario <- RSQLite::dbGetQuery(conn = con, paste0("SELECT * FROM Ponto WHERE Login='", usuario$usuario(), "' AND strftime('%M/%Y', Data)=strftime('%M/%Y', DATE('now', 'localtime'))"))
horas_mes <- sum(PontoFuncionario$SaldoNumero, na.rm = TRUE)
horas_mes <- converter_decimal_para_hora(horas_mes)
paste0("<font color=\"#000000\"><b>Horas no mês: ", horas_mes, "</b></font>")
}else{
paste0("")
}
})
}
horas_mes_refresh()
# Função que exibe a lista dos funcionários que estão online e offline----
lista_online_offline_refresh <- function(){
output$lista_online_offline <- DT::renderDataTable(DT::datatable({
ListaFuncionarios <- RSQLite::dbGetQuery(conn = con, "SELECT * FROM Funcionarios")
ListaPonto <- RSQLite::dbGetQuery(conn = con, paste0("SELECT DISTINCT Login, Status FROM Ponto WHERE Status='online'"))
ListaFuncionarios <- dplyr::select(ListaFuncionarios, Login, Nome, HorasDiarias, Equipe)
ListaFuncionarios <- dplyr::right_join(ListaPonto, ListaFuncionarios, by = "Login")
ListaFuncionarios <- dplyr::select(ListaFuncionarios, Nome, Status)
ListaFuncionarios$Status <- ifelse(is.na(ListaFuncionarios$Status), "offline", ListaFuncionarios$Status)
ListaFuncionarios
}, options = list(pageLength = 27, dom = "tp", scrollX = TRUE, scrollY = TRUE), colnames = c("Nome", "Status"), rownames = FALSE, selection = "single") %>%
DT::formatStyle("Status", target = 'row',
color = "#FFFFFF",
backgroundColor = DT::styleEqual(c("online", "offline"), c("green", "red")))
)
}
lista_online_offline_refresh()
# Evento para atualizar a lista online-offline----
observeEvent(input$atualizar_lista_online_offline, {
lista_online_offline_refresh()
})
# Verificação para renderizar o botão de play ou stop----
observe({
FuncionarioLogado <- RSQLite::dbGetQuery(conn = con, paste0("SELECT * FROM Ponto WHERE Login='", usuario$usuario(), "' AND Data=DATE('now', 'localtime') AND Status='online'"))
if(nrow(FuncionarioLogado) > 0){
output$botao_start_stop <- renderUI({
actionButton(ns("adicionar_ponto"), label = NULL, icon = icon("stop"))
})
}else{
output$botao_start_stop <- renderUI({
actionButton(ns("adicionar_ponto"), label = NULL, icon = icon("play"))
})
}
})
# Função que atualiza a lista de ponto do funcionário----
lista_ponto_refresh <- function(){
output$lista_ponto <- DT::renderDataTable(DT::datatable({
ListaPonto <- RSQLite::dbGetQuery(conn = con, paste0("SELECT * FROM Ponto WHERE Login='", usuario$usuario(), "' AND Data=DATE('now', 'localtime')"))
ListaPonto <- dplyr::select(ListaPonto, Entrada, Saida, SaldoHora)
ListaPonto
}, options = list(pageLength = 27, dom = "tp", scrollX = TRUE, scrollY = TRUE), colnames = c("Entrada", "Saida", "Saldo"), rownames = FALSE, selection = "single"))
}
# Função que exibe a lista de ponto se o usuário está logado----
observe({
FuncionarioLogado <- RSQLite::dbGetQuery(conn = con, paste0("SELECT * FROM Funcionarios WHERE Login='", usuario$usuario(), "'"))
if(nrow(FuncionarioLogado) > 0){
lista_ponto_refresh()
}
})
# Evento para adicionar uma nova marcação no ponto----
observeEvent(input$adicionar_ponto, {
FuncionarioLogado <- RSQLite::dbGetQuery(conn = con, paste0("SELECT * FROM Funcionarios WHERE Login='", usuario$usuario(), "'"))
if(nrow(FuncionarioLogado) > 0){
ListaPontoFuncionarioData <- RSQLite::dbGetQuery(conn = con, paste0("SELECT * FROM Ponto WHERE Login='", usuario$usuario(), "' AND Data=DATE('now', 'localtime')"))
# Inserir nova Entrada
if(nrow(ListaPontoFuncionarioData) == 0){
RSQLite::dbSendQuery(con, paste0("INSERT INTO Ponto (Login, Data, Entrada, Status) VALUES ('", usuario$usuario(), "', DATE('now', 'localtime'), TIME('now', 'localtime'), 'online')"))
output$botao_start_stop <- renderUI({
actionButton(ns("adicionar_ponto"), label = NULL, icon = icon("stop"))
})
lista_online_offline_refresh()
}
# Se o funcionario marcou o ponto no dia
else if(nrow(ListaPontoFuncionarioData) > 0){
ListaPontoFuncionarioDataEntradaSaida <- RSQLite::dbGetQuery(conn = con, paste0("SELECT * FROM Ponto WHERE Login='", usuario$usuario(), "' AND Data=DATE('now', 'localtime') AND Status='online'"))
# Inserir Saída
if(nrow(ListaPontoFuncionarioDataEntradaSaida) > 0){
RSQLite::dbSendQuery(con, paste0("UPDATE Ponto SET Saida = TIME('now', 'localtime'), Status = 'offline', SaldoNumero = (CAST((strftime('%s', TIME('now', 'localtime'))-strftime('%s', Entrada)) AS REAL)/3600), SaldoHora = '' WHERE Login='", usuario$usuario(), "' AND Data=DATE('now', 'localtime') AND Saida IS NULL"))
SaldoNumero <- RSQLite::dbGetQuery(conn = con, paste0("SELECT * FROM Ponto WHERE Login='", usuario$usuario(), "' AND Data=DATE('now', 'localtime') AND Status='offline' AND SaldoHora=''"))
SaldoHora <- converter_decimal_para_hora(SaldoNumero$SaldoNumero)
RSQLite::dbSendQuery(con, paste0("UPDATE Ponto SET SaldoHora = '", SaldoHora, "' WHERE Login='", usuario$usuario(), "' AND Data=DATE('now', 'localtime') AND SaldoHora = ''"))
output$botao_start_stop <- renderUI({
actionButton(ns("adicionar_ponto"), label = NULL, icon = icon("play"))
})
lista_online_offline_refresh()
horas_dia_refresh()
horas_mes_refresh()
}
# Inserir nova Entrada
else{
RSQLite::dbSendQuery(con, paste0("INSERT INTO Ponto (Login, Data, Entrada, Status) VALUES ('", usuario$usuario(), "', DATE('now', 'localtime'), TIME('now', 'localtime'), 'online')"))
output$botao_start_stop <- renderUI({
actionButton(ns("adicionar_ponto"), label = NULL, icon = icon("stop"))
})
lista_online_offline_refresh()
}
}
}
lista_ponto_refresh()
})
# Evento para Editar a marcação no ponto----
observeEvent(input$editar_ponto, {
})
# Evento para Excluir a marcação no ponto----
observeEvent(input$excluir_ponto, {
if(length(input$lista_ponto_rows_selected) > 0){
showModal(
tags$div(id="modal1",
modalDialog(size = "m",
title = NULL,
box(width = 12,
status = "primary",
solidHeader = TRUE,
align = "center",
title = "Tem certeza que deseja excluir essa marcação?",
fluidRow(
column(6, align = "center",
actionButton(ns("excluir_marcacao_sim"), label = h4("SIM"))
),
column(6, align = "center",
actionButton(ns("excluir_marcacao_nao"), label = h4("NÃO"))
)
)
),
easyClose = TRUE,
footer = NULL
)
)
)
}
})
}
## To be copied in the UI
# mod_marcacao_ponto_ui("marcacao_ponto_ui_1")
## To be copied in the server
# callModule(mod_marcacao_ponto_server, "marcacao_ponto_ui_1")
|
e3e63df316ac0ebccdcf03f0f91d21d2f2d43470
|
91da7c1d3fa3a1db0eb324587a5701354b2bbfc7
|
/homework_files/homework_1/homework1.R
|
9c9ff749fe902a99298922d18c7c90842a3f6e69
|
[] |
no_license
|
zidan40o0/R_workshop
|
6ab721d023f0830fd0dc6d1dc0a72e9fc1d3c3fb
|
1a682e44fa471fd7d443f93c3aa6a39258636768
|
refs/heads/main
| 2023-03-27T17:26:22.153974
| 2021-03-11T13:05:43
| 2021-03-11T13:05:43
| 330,746,056
| 0
| 1
| null | 2021-02-18T18:40:41
| 2021-01-18T17:56:38
|
R
|
UTF-8
|
R
| false
| false
| 1,459
|
r
|
homework1.R
|
#Homework Assignment#
# 1- Make a new project
# 2- Pull the main git branch into your computer
# 3- Create a new branch in your repo and name it myname_homework1_sol
# 4- You see the file named "supermarket_sales - Sheet1.csv" in R, rename it according to the good practice we discussed then
# import this file into a dataframe.
# 5- What is the class of this data and what is its disk size?
# 6- Does this data have any factor variables?
# 7- How many rows and columns it has?
# 8- Give basic statistics on the data at hand
# 9- Does it have any empty values? If so, where?
# 10- Explore the grepl() function, what does it do? (Please use the built-in documentation system by typing ?grepl() )
#
# Bonus 1:
# Give the summary statistics of the first 100 rows for the unit_price column. (default ordering)
#
# Bonus 2:
# Illustrate at least three different methods to get data only from the seventh column
#
# Extra Bonus 3:
# Keep only the rows that have Rating lower than the median
#
# Final Task:
# Save, Commit and Push your script to your remote github repo with the comment "Done!".
#
#
# Please remember to write proper comments and documentation when working on your scripts.
#
# I will correct everything on next week's session.
#
# (this is the link for how to clone my remote repo to your own : https://stackoverflow.com/questions/18200248/cloning-a-repo-from-someone-elses-github-and-pushing-it-to-a-repo-on-my-github)
|
e91261a630afaf23fa757450bc71d0dc5bae748c
|
8fa881542cc1f5bcd4029063f8c5096e42dbf62b
|
/TP4.R
|
bcb1c0d6b00aca876cf5db9bbe25d30aec6efee8
|
[] |
no_license
|
suyiqing/sy09_tp
|
3704e26e84b299bef89e79c4a044adeff5515920
|
8c0a38b1aa7fdb41c00e6573a508fa23e4d96861
|
refs/heads/master
| 2020-03-14T06:09:07.643661
| 2018-04-29T08:31:36
| 2018-04-29T08:31:36
| 131,478,066
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,388
|
r
|
TP4.R
|
notes <- read.csv("donnees/sy02-p2016.csv", na.strings="", header=T)
notes$nom <- factor(notes$nom, levels=notes$nom)
notes$correcteur.median <- factor(notes$correcteur.median, levels=c("Cor1","Cor2","Cor3","Cor4","Cor5","Cor6","Cor7","Cor8"))
notes$correcteur.final <- factor(notes$correcteur.final, levels=c("Cor1","Cor2","Cor3","Cor4","Cor5","Cor6","Cor7","Cor8"))
notes$niveau <- factor(notes$niveau, ordered=T)
notes$resultat <- factor(notes$resultat, levels=c("F","Fx","E","D","C","B","A"), ordered=T)
# valeur manquante
notes[which(is.na(notes$dernier.diplome.obtenu)),]
notes[which(is.na(notes$note.median)),]
notes[which(is.na(notes$correcteur.median)),] # lie avec note.median
notes[which(is.na(notes$note.final)),] # un peu lie avec median
notes[which(is.na(notes$correcteur.final)),] # lie avec final
notes[which(is.na(notes$note.totale)),] # lie avec median et final
notes[which(is.na(notes$resultat)),] # res lie avec note totale
plot(notes)
notes
boxplot(resultat~dernier.diplome.obtenu, data = notes)
boxplot(resultat~specialite, data = notes)
boxplot(resultat~niveau, data = notes)
boxplot(note.median~correcteur.median, data = notes)
boxplot(note.final~correcteur.final, data = notes)
# median
# aggregate : Splits the data into subsets, computes summary statistics for each, and returns the result in a convenient form
moy.median <- aggregate(note.median~correcteur.median, data=notes, FUN=mean)
names(moy.median) <- c("correcteur","moy.median")
std.median <- aggregate(note.median~correcteur.median, data=notes, FUN=sd)
names(std.median) <- c("correcteur","std.median")
# merge : Merge two data frames by common columns or row names, or do other versions of database join operations.
median <- merge(moy.median, std.median)
# final
moy.final <- aggregate(note.final~correcteur.final, data=notes, FUN=mean)
names(moy.final) <- c("correcteur","moy.final")
std.final <- aggregate(note.final~correcteur.final, data=notes, FUN=sd)
names(std.final) <- c("correcteur","std.final")
final <- merge(moy.final, std.final)
# correcteurs
correcteurs <- merge(median, final, all=T)
corr.acp <- correcteurs[-c(2,3),]
mf1 <- as.matrix(corr.acp[,2:5])
mf <- scale(mf1, scale = FALSE)
covariance <- cov(mf)
inertieTotal <- sum(eigen(covariance)$values)
eigen(covariance)$values[1]/inertieTotal
# composants pricipals
vecteurP <- eigen(covariance)$vectors
# coord dans ACP
corrACP <- mf1 %*% vecteurP
# question 2
plot(corrACP[,c(2,1)], pch=20, asp=1)
# comparer avec jai calcule dans tp2, un peu diff
var(corr.acp[,2])
print(inertie(corr.acp[,2:5]))
var(corr.acp[,2])/inertie(corr.acp[,2:5])
inertie <- function(donnee)
{
res <- 0
p <- dim(donnee)[2]
for(j in 1:p)
{
tmp <- donnee[,j]
moy <- mean(donnee[,j])
res <- res + sum((tmp - moy)^2)
}
#ici le poids est 1/n
res <- 1/length(tmp) * res
}
# question 3 p43
correlation <- cor(mf1, corrACP)
#D <- diag(1/(sqrt((n-1)/n)*apply(X, 2, sd))) %*% U %*% sqrt(L)
plot(-1:1,-1:1,type="n",xlab="Axe 1",ylab="Axe 2")
points(x = correlation[,1], y = correlation[,2])
text(correlation[,c(1,2)], row.names(correlation), pos=4);
abline(h=0)
abline(v=0)
curve(sqrt(1-x^2),-1,1,add=TRUE)
curve(-sqrt(1-x^2),-1,1,add=TRUE)
# question 4
corrACP[,1] %*% t(vecteurP[,1])
# q5 = q2
corr2.acp <- correcteurs[c(2,3),]
corr2.acp[1,4] <- mean(corr.acp[,4])
corr2.acp[1,5] <- mean(corr.acp[,5])
corr2.acp[2,2] <- mean(corr.acp[,2])
corr2.acp[2,3] <- mean(corr.acp[,3])
mf2 <- as.matrix(corr2.acp[,2:5])
for(j in 1:4)
{
#apply(mf2[,j], mf2[,j] - mean(mf[,j]))
mf2[,j] <- mf2[,j] - mean(corr.acp[,j+1])
}
corrACP2 <- mf2 %*% vecteurP
plot(corrACP[,c(2,1)], pch=20, asp=1)
points(x = corrACP2[,1], y = corrACP2[,2])
plot(corrACP[,c(3,1)], pch=20, asp=1)
points(x = corrACP2[,1], y = corrACP2[,3])
# Crab
library(MASS)
data(crabs)
crabsquant <- crabs[,4:8]
summary(crabs)
summary(crabsquant)
boxplot(crabsquant)
class(crabsquant)
crabsquant_c <- scale(crabsquant, scale = FALSE)
cov_crab <- cov(crabsquant_c)
eigen(cov_crab)$values
cor(crabsquant_c)
plot(crabsquant_c)
# composants pricipals
vp_crab <- eigen(cov_crab)$vectors
# coord dans ACP
crabACP <- crabsquant_c %*% vp_crab
plot(crabACP[,c(2,1)], pch=20, asp=1)
plot(crabACP[,c(2,1)], col=c("red", "blue")[crabs$sp], pch=c(21,24)[crabs$sp])
# Pima
Pima <- read.csv("donnees/Pima.csv", header = T)
Pima$z <- factor(Pima$z)
plot(Pima)
|
1d50359e709ed29106374cc40baa1712d91fa22b
|
2b850f9fdfa54159f05553050ad21600e6a58246
|
/R/plot.obscor.R
|
1edf6d8f1247d46b0f848fc920c1138bff783431
|
[] |
no_license
|
cran/palaeoSig
|
f1dece921733d50a2915d47bd188ecdc32392ced
|
29f25c31bf18651a5d50928ebe61f2b247f22962
|
refs/heads/master
| 2023-03-15T03:17:27.916002
| 2023-03-10T08:30:02
| 2023-03-10T08:30:02
| 17,698,185
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,227
|
r
|
plot.obscor.R
|
#' @describeIn obs.cor Plots for obscor object
#' @param x An obscor object.
#' @param xlab X-axis label if the default is unsatisfactory.
#' @param ylab Y-axis label if the default is unsatisfactory.
#' @param f Scale factor for the abundances, the maximum cex of points for the
#' which=1 plot.
#' @param which Which type of plot. which = 1 gives a plot of RDA scores against
#' species optima. which = 2 gives a histogram showing the null distribution of
#' correlations between RDA scores and species optima, together with the
#' observed correlation.
#' @param variable_names Name of environmental variable (only 1 currently) for
#' the label on the observed correlation with which = 2
#' @param abun Which species weighting required for plots. See details
#' @param p_val P value to draw a line vertical line at (with which=2)
#' @importFrom graphics plot hist abline box
#' @importFrom stats quantile
#' @method plot obscor
#' @export
plot.obscor <- function(x, xlab, ylab, f = 5, which = 1,
variable_names = "env",
abun = "abun.calib", p_val = 0.05, ...) {
weightings <- c(
"abun.fos", "abun.calib", "abun.joint", "n2.fos",
"n2.calib", "n2.joint", "unweighted"
)
w <- pmatch(abun, weightings)
if (is.na(w)) {
stop("Unknown abundance weighting")
}
w <- weightings[w]
if (which == 1) {
if (missing(xlab)) {
xlab <- "WA optima"
}
if (missing(ylab)) {
ylab <- "RDA scores"
}
if (w == "unweighted") {
a <- rep(1, nrow(x$ob$x))
} else {
a <- x$ob$x[[w]]
a <- a / max(a) * f
}
plot(
x = x$ob$x$Optima, y = x$ob$x$RDA1, cex = a,
xlab = xlab, ylab = ylab, ...
)
} else if (which == 2) {
if (missing(xlab)) {
xlab <- ifelse(w == "unweighted", "Correlation", "Weighted correlation")
}
sim <- x$sim[[w]]
ob <- x$ob$res[w]
hist(sim,
xlim = range(c(sim, ob)), xlab = xlab,
col = "grey80", border = NA, ...
)
abline(v = ob, col = 1)
abline(v = quantile(sim, prob = 1 - p_val), col = 2, lty = 3)
text(ob, par()$usr[4] * 0.9, label = variable_names, srt = 90, pos = 2)
box()
} else {
stop("which==what")
}
}
#' @describeIn obs.cor Identify species on obs.cor plot
#' @param labels Labels for the points in identify. By default, the species
#' names from intersection of colnames(spp) and colnames(fos) are used.
#' @param \dots Other arguments to plot or identify
#' @importFrom graphics identify
#'
identify.obscor <- function(x, labels, ...) {
if (missing(labels)) {
labels <- rownames(x$ob$x)
}
identify(x$ob$x[, 1:2], labels = labels, ...)
}
#' @describeIn obs.cor autoplot for obscor object
#' @param object An obscor object.
#' @param top Proportion of the figure below the environmental name labels.
#' @param nbins integer giving number of bins for the histogram
#' @importFrom ggplot2 autoplot ggplot geom_point scale_size_area
#' @importFrom stats quantile
#' @importFrom rlang .data
#' @importFrom magrittr %>%
#' @importFrom dplyr mutate
#' @method autoplot obscor
#' @export
autoplot.obscor <- function(object, which = 1, variable_names = "env",
abun = "abun.calib", p_val = 0.05,
nbins = 20, top = 0.7, ...) {
weightings <- c(
"abun.fos", "abun.calib", "abun.joint", "n2.fos",
"n2.calib", "n2.joint", "unweighted"
)
w <- pmatch(abun, weightings)
if (is.na(w)) {
stop("Unknown abundance weighting")
}
abun <- weightings[w]
if (which == 1) {
object$ob$x %>%
mutate(unweighted = 1) %>%
ggplot(aes(x = .data$Optima, y = .data$RDA1, size = .data[[abun]])) +
geom_point(alpha = 0.3) +
scale_size_area() +
labs(x = "WA optima", y = "RDA scores", size = "Abundance")
} else if (which == 2) {
xlab <- ifelse(w == "unweighted", "Correlation", "Weighted correlation")
x_fort <- fortify_palaeosig(
sim = object$sim[, abun],
variable_names = variable_names,
p_val = p_val,
nbins = nbins,
top = top,
EX = object$ob$res[abun]
)
autoplot_sig(x_fort, xlab = xlab, xmin = NA_real_)
} else {
stop("Unknown plot")
}
}
|
166b1a2a6c982c9f5c23bf5c5b1299fcd8325279
|
fe1b2831b88fd1bfb8831fef64251e6f0f1c88b1
|
/NSC_R_workshop_series_I/NSC-R Workshop_08_Notes.R
|
c06f1d8d3a39f0586b1720cfe3e29e3ba4feb671
|
[] |
no_license
|
Jonkman1/NSC_R
|
dafac864ec7ce1eb70c859c8cbf34c155ed5a21a
|
7a94fabce76c783b71d37dc1602de7535b88b4b5
|
refs/heads/main
| 2023-08-25T21:35:20.988088
| 2021-09-14T19:06:11
| 2021-09-14T19:06:11
| 403,142,724
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,881
|
r
|
NSC-R Workshop_08_Notes.R
|
# NSC-R Workshop #8 Notes
# Regression modeling in R
# R version 4.0.0
# WB July 20, 2020
# These are the libaries that you need for the functions
# in this script
library(tidyverse)
library(broom) # tidy, augment and glance
library(car) # vif (variance inflation factors)
library(MASS) # negative binomial regression model
# Here is a (fictive) tiny dataset on juvenile delinquency
# (age, gender, number of crimes)
mydata <- tibble(
age = c( 14, 14, 14, 14, 14, 14, 15, 15, 15, 16,
16, 16, 16, 17, 17, 17, 17, 18, 18, 18),
sex = c( "F","M","F","M","F","M","F","F","M","M",
"F","M","F","M","M","F","F","F","M","M"),
crime = c( 0, 1, 0, 0, 2, 4, 0, 3, 6, 5,
3, 8, 6, 9, 7, 0, 1, 2, 1, 4)
)
# Display dataset
mydata
## # A tibble: 20 x 3
## age sex crime
## <dbl> <chr> <dbl>
## 1 14 F 0
## 2 14 M 1
## 3 14 F 0
## 4 14 M 0
## 5 14 F 2
## 6 14 M 4
## 7 15 F 0
## 8 15 F 3
## 9 15 M 6
## 10 16 M 5
## 11 16 F 3
## 12 16 M 8
## 13 16 F 6
## 14 17 M 9
## 15 17 M 7
## 16 17 F 0
## 17 17 F 1
## 18 18 F 2
## 19 18 M 1
## 20 18 M 4
# Let us first look at the age-crime relationship using a scatterplot
ggplot(data=mydata, mapping=aes(x=age, y=crime)) +
geom_point()
# The general setup for an OLS model estimation function is:
#
# RESULT <- lm(EQUATION, DATA, ...)
# where
# RESULT = the objct in which all results (estimates, standard errors, R2)
# are stored
# FORMULA = DEPVAR ~ INDEPVARS (the regression equation)
# where DEPVAR is the dependent (Y) variables and
# DEPVARS are the independent (X) variables (X1 + X2 + X3 ...)
# DATA = tibble (dataframe) where the data are stored
my_linearmodel_1 <- lm(formula = crime ~ age,
data=mydata)
# If you just type the name of the results, you get very little.
# Just an echo of the function call, the names of the X variables
# and their estimated unstandardized coefficients :
my_linearmodel_1
## Call:
## lm(formula = crime ~ age, data = mydata)
##
## Coefficients:
## (Intercept) age
## -5.7653 0.5629
# Beware, the is a lot more in 'my_linearmodel_1' than this
# Look here:
str(my_linearmodel_1)
## (output not shown, WB)
# To get more details, we often use summary()
# This gives us coefficients, standard errors, T values, and p values.
# In addition, it provides descriptives of the residuals, and summary
# statstics of the model (such as R squared)
summary(my_linearmodel_1)
## Call:
## lm(formula = crime ~ age, data = mydata)
##
## Residuals:
## Min 1Q Median 3Q Max
## -3.8036 -2.1778 -0.3036 2.1036 5.1964
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -5.7653 6.9849 -0.825 0.420
## age 0.5629 0.4416 1.275 0.219
##
## Residual standard error: 2.854 on 18 degrees of freedom
## Multiple R-squared: 0.08278, Adjusted R-squared: 0.03182
## F-statistic: 1.624 on 1 and 18 DF, p-value: 0.2187
# Looking a the scatterplot once more, and trying to graph the relationship
# with a 'smoother', we suspect a curvilinear (inverse U-shaped) relation
ggplot(data=mydata, mapping=aes(x=age, y=crime)) +
geom_point() +
geom_smooth()
# Maybe we should model the age-relation in a more flexible way, creating
# dummy indicators for age categories 15,16,17 and 18, with age 14 as the
# reference category.
# An easy way to achive this is by telling R that it should treat 'age' as
# a categorical (=nominal) variable.
# (a factor is a special type of vector/variable in R that we have not
# discussed in the NSC-R workshop)
# Note that I store the results of this model in a new object
# called 'my-linearmodel_2'
my_linearmodel_2 <- lm(formula= crime ~ as.factor(age), data=mydata)
summary(my_linearmodel_2)
## Call:
## lm(formula = crime ~ as.factor(age), data = mydata)
##
## Residuals:
## Min 1Q Median 3Q Max
## -4.250 -1.208 -0.250 1.875 4.750
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) 1.167 1.092 1.069 0.3021
## as.factor(age)15 1.833 1.891 0.970 0.3476
## as.factor(age)16 4.333 1.726 2.511 0.0240 *
## as.factor(age)17 3.083 1.726 1.786 0.0943 .
## as.factor(age)18 1.167 1.891 0.617 0.5465
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 2.674 on 15 degrees of freedom
## Multiple R-squared: 0.3288, Adjusted R-squared: 0.1499
## F-statistic: 1.837 on 4 and 15 DF, p-value: 0.1742
# Include us sex as a second variable
# Let us first do the scatterplot for for boys (M) and girls (F)
# separately in the same graph.
# It appears girls commit less crime
ggplot(data=mydata, mapping=aes(x=age, y=crime, color=sex)) +
geom_point()
# Here is how we specify the model (note the "+" between the X variables)
my_linearmodel_3 <- lm(formula= crime ~ age + sex,
data=mydata)
summary(my_linearmodel_3)
# Note how R indicates a string variable (variable name + value)
## Call:
## lm(formula = crime ~ age + sex, data = mydata)
##
## Residuals:
## Min 1Q Median 3Q Max
## -4.4814 -1.4350 -0.2168 1.6654 4.1131
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -5.5901 6.2715 -0.891 0.3852
## age 0.4673 0.3987 1.172 0.2573
## sexM 2.6598 1.1520 2.309 0.0338 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Residual standard error: 2.562 on 17 degrees of freedom
## Multiple R-squared: 0.3017, Adjusted R-squared: 0.2196
## F-statistic: 3.673 on 2 and 17 DF, p-value: 0.04722
# Interactions (main effects + interaction)
my_linearmodel_4 <- lm(formula= crime ~ age + sex + age:sex , data=mydata)
summary(my_linearmodel_4)
## Call:
## lm(formula = crime ~ age + sex + age:sex, data = mydata)
##
## Residuals:
## Min 1Q Median 3Q Max
## -4.8297 -1.6150 0.0553 1.5433 4.1957
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -2.3696 9.5789 -0.247 0.808
## age 0.2609 0.6117 0.426 0.675
## sexM -3.1981 12.9791 -0.246 0.809
## age:sexM 0.3723 0.8215 0.453 0.656
##
## Residual standard error: 2.624 on 16 degrees of freedom
## Multiple R-squared: 0.3106, Adjusted R-squared: 0.1813
## F-statistic: 2.403 on 3 and 16 DF, p-value: 0.1056
# A shorter notation is as follows:
# (* means: include vars on the left, vars on the right, and their
# interactions)
my_linearmodel_5 <- lm(formula= crime ~ age*sex , data=mydata)
summary(my_linearmodel_5)
# (output omitted)
# How to access the model results?
# (other than by printing them on screen)
summary(my_linearmodel_1)
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -5.7653 6.9849 -0.825 0.420
## age 0.5629 0.4416 1.275 0.219
##
## (...)
# the coefficients() functions returns the mdel coefficients (estimates)
coefficients(my_linearmodel_1)
## (Intercept) age
## -5.7652695 0.5628743
# the confint() function return the confidence intervals around the
# coefficients (by default 95%, but you can change it)
confint(my_linearmodel_1, level=0.95)
## 2.5 % 97.5 %
## (Intercept) -20.4400589 8.909520
## age -0.3649626 1.490711
confint(my_linearmodel_1, level=0.90)
## 5 % 95 %
## (Intercept) -17.8775791 6.347040
## age -0.2029458 1.328694
# For some, you need to know a bit more of the model
# (standard error is square root of variance, and
# variance is on diaginal of the variance-covariance
# matrix, remember :-)
# standard errors:
sqrt(diag(vcov(my_linearmodel_1)))
## (Intercept) age
## 6.9849281 0.4416332
# The predicted values for every point in the data
fitted(my_linearmodel_1)
## 1 2 3 4 5 6 7 8
## 2.114970 2.114970 2.114970 2.114970 2.114970 2.114970 2.677844 2.677844
## 9 10 11 12 13 14 15 16
## 2.677844 3.240719 3.240719 3.240719 3.240719 3.803593 3.803593 3.803593
## 17 18 19 20
## 3.803593 4.366467 4.366467 4.366467
# or like this (vector into tibble)
tibble(predictions = fitted(my_linearmodel_1))
# The residuals for every point in the data
residuals(my_linearmodel_1)
## 1 2 3 4 5 6 7
## -2.1149701 -1.1149701 -2.1149701 -2.1149701 -0.1149701 1.8850299 -2.6778443
## 8 9 10 11 12 13 14
## 0.3221557 3.3221557 1.7592814 -0.2407186 4.7592814 2.7592814 5.1964072
## 15 16 17 18 19 20
## 3.1964072 -3.8035928 -2.8035928 -2.3664671 -3.3664671 -0.3664671
# BROOM PACKAGE (tidy your regression results)
# The broom package contain three functions
# (note N = nr of cases, K = number of variables including the constant)
# (1) tidy() returns results per variable (a K-row tibble)
# (2) augment() returns results per case (a N-row tibble)
# (3) glance() returns results per model (a 1-row tibble)
# NOTE: I am often confused between glimpse() and glance(). In the English
# language they are more related than in the R language
# tidy() function organizes common regression output
# (estimate, se, T-value, p-value) in a tibble
# In other words: the regression table in your paper
tidy_mylinearmodel_3 <- tidy(x=my_linearmodel_3)
tidy_mylinearmodel_3
## # A tibble: 3 x 5
## term estimate std.error statistic p.value
## <chr> <dbl> <dbl> <dbl> <dbl>
## 1 (Intercept) -5.59 6.27 -0.891 0.385
## 2 age 0.467 0.399 1.17 0.257
## 3 sexM 2.66 1.15 2.31 0.0338
# You may want to add a 90% confidence interval:
tidy_mylinearmodel_3CI <- tidy(x=my_linearmodel_3,
conf.int=TRUE, conf.level=.90)
tidy_mylinearmodel_3CI
## # A tibble: 3 x 7
## term estimate std.error statistic p.value conf.low conf.high
## <chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 (Intercept) -5.59 6.27 -0.891 0.385 -16.5 5.32
## 2 age 0.467 0.399 1.17 0.257 -0.226 1.16
## 3 sexM 2.66 1.15 2.31 0.0338 0.656 4.66
# augment() adds model-baseed information about each case to the dataset
# typically, you will be interested in predicted (fitted) values and in
# residuals. augment() will provide these in ".fitted" and ".resid"
# respectively. It will also aoutput five other variables, including
# sigma and Cook's distance
#
aug_mydata_linearmodel_1 <- augment(x=my_linearmodel_1,
data=mydata)
aug_mydata_linearmodel_1
## # A tibble: 20 x 10
## age sex crime .fitted .se.fit .resid .hat .sigma .cooksd .std.resid
## <dbl> <chr> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
## 1 14 F 0 2.11 1.00 -2.11 0.123 2.88 0.0441 -0.792
## 2 14 M 1 2.11 1.00 -1.11 0.123 2.92 0.0123 -0.417
## 3 14 F 0 2.11 1.00 -2.11 0.123 2.88 0.0441 -0.792
## 4 14 M 0 2.11 1.00 -2.11 0.123 2.88 0.0441 -0.792
## 5 14 F 2 2.11 1.00 -0.115 0.123 2.94 0.0001 -0.0430
## 6 14 M 4 2.11 1.00 1.89 0.123 2.90 0.0350 0.706
## 7 15 F 0 2.68 0.719 -2.68 0.0635 2.86 0.0319 -0.970
## 8 15 F 3 2.68 0.719 0.322 0.0635 2.94 0.00046 0.117
## 9 15 M 6 2.68 0.719 3.32 0.0635 2.82 0.0490 1.20
## 10 16 M 5 3.24 0.648 1.76 0.0515 2.90 0.0109 0.633
## 11 16 F 3 3.24 0.648 -0.241 0.0515 2.94 0.00020 -0.0866
## 12 16 M 8 3.24 0.648 4.76 0.0515 2.69 0.0796 1.71
## 13 16 F 6 3.24 0.648 2.76 0.0515 2.85 0.0268 0.993
## 14 17 M 9 3.80 0.844 5.20 0.0874 2.62 0.174 1.91
## 15 17 M 7 3.80 0.844 3.20 0.0874 2.82 0.0659 1.17
## 16 17 F 0 3.80 0.844 -3.80 0.0874 2.77 0.0933 -1.40
## 17 17 F 1 3.80 0.844 -2.80 0.0874 2.85 0.0507 -1.03
## 18 18 F 2 4.37 1.18 -2.37 0.171 2.87 0.0857 -0.911
## 19 18 M 1 4.37 1.18 -3.37 0.171 2.80 0.174 -1.30
## 20 18 M 4 4.37 1.18 -0.366 0.171 2.93 0.00206 -0.141
# You can now easily plot the points (as points)
# and the predicted values (as a line)
ggplot(data=aug_mydata_linearmodel_1) +
geom_point(mapping=aes(x=age, y=crime)) +
geom_line(mapping=aes(x=age, y=.fitted, color="red"))
# augment data second model (age as a nominal variable)
aug_mydata_linearmodel_2 <- augment(x=my_linearmodel_2,
data=mydata)
# plot points and predictions
ggplot(data=aug_mydata_linearmodel_2) +
geom_point(mapping=aes(x=age, y=crime)) +
geom_line(mapping=aes(x=age, y=.fitted, color="red"))
# or plot the residuals
ggplot(data=aug_mydata_linearmodel_2) +
geom_point(mapping=aes(x=age, y=.resid))
# model statistics
modelstat_my_linearmodel_2 <- glance(x=my_linearmodel_2)
modelstat_my_linearmodel_2
## # A tibble: 1 x 11
## r.squared adj.r.squared sigma statistic p.value df logLik AIC BIC
## <dbl> <dbl> <dbl> <dbl> <dbl> <int> <dbl> <dbl> <dbl>
## 1 0.329 0.150 2.67 1.84 0.174 5 -45.2 102. 108.
## # ... with 2 more variables: deviance <dbl>, df.residual <int>
# GENERALIZED LINEAR MODELS
#
# Large family of models that include
# - logistic regression (binary = dichotomous = 0/1 dependent variable)
# - Poisson regression (count = 0-N dependent variable)
# - negative binomial regression (count = 0-N dependent variable)
# and many more
#
# all estimated with the glm() function
# Let us first use the general linear model to estimate the
# ordinary linear model:
my_generallinearmodel_1 <- glm(formula= crime ~ age, data=mydata,
family=gaussian(link="identity"))
summary(my_generallinearmodel_1)
## Call:
## glm(formula = crime ~ age, family = "gaussian", data = mydata)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -3.8036 -2.1778 -0.3036 2.1036 5.1964
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -5.7653 6.9849 -0.825 0.420
## age 0.5629 0.4416 1.275 0.219
##
## (Dispersion parameter for gaussian family taken to be 8.142914)
##
## Null deviance: 159.80 on 19 degrees of freedom
## Residual deviance: 146.57 on 18 degrees of freedom
## AIC: 102.59
##
## Number of Fisher Scoring iterations: 2
# compare glm and lm: same estimates
summary(my_linearmodel_1)
## Call:
## lm(formula = crime ~ age, data = mydata)
##
## Residuals:
## Min 1Q Median 3Q Max
## -3.8036 -2.1778 -0.3036 2.1036 5.1964
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -5.7653 6.9849 -0.825 0.420
## age 0.5629 0.4416 1.275 0.219
##
## Residual standard error: 2.854 on 18 degrees of freedom
## Multiple R-squared: 0.08278, Adjusted R-squared: 0.03182
## F-statistic: 1.624 on 1 and 18 DF, p-value: 0.2187
summary(my_generallinearmodel_1)
## Call:
## glm(formula = crime ~ age, family = "gaussian", data = mydata)
##
## Deviance Residuals:
## Min 1Q Median 3Q Max
## -3.8036 -2.1778 -0.3036 2.1036 5.1964
##
## Coefficients:
## Estimate Std. Error t value Pr(>|t|)
## (Intercept) -5.7653 6.9849 -0.825 0.420
## age 0.5629 0.4416 1.275 0.219
##
## (Dispersion parameter for gaussian family taken to be 8.142914)
##
## Null deviance: 159.80 on 19 degrees of freedom
## Residual deviance: 146.57 on 18 degrees of freedom
## AIC: 102.59
##
## Number of Fisher Scoring iterations: 2
## LOGIT REGRESSION = LOGISTIC REGRESSION
# We create a binary dependent variable: crime versus no crime
mydata$anycrime <- mydata$crime > 0
my_logit_1 <- glm(formula= anycrime ~ age,
data=mydata,
family=binomial(link="logit"))
summary(my_logit_1)
my_logit_2 <- glm(formula= anycrime ~ age + sex,
data=mydata,
family=binomial(link="logit"))
summary(my_logit_2)
# Direct acces to model outcomes
coefficients(my_logit_2)
fitted(my_logit_2)
residuals(my_logit_2)
# But the broom package also works here
# Table of estimates
estimates_my_logit_2 <- tidy(x=my_logit_2, conf.int=TRUE, conf.level=.90)
estimates_my_logit_2
# You can request also exp(coefficient)
# (note confidence interval is also exponentiated)
exp_estimates_my_logit_2 <- tidy(x=my_logit_2, exponentiate=TRUE,
conf.int=TRUE, conf.level=.90)
exp_estimates_my_logit_2
# You can plot coefficients with ggplot()
exp_estimates_my_logit_2 %>%
filter(term != "(Intercept)") %>% # we do not want to plot the constant
ggplot() +
geom_point(mapping=aes(x=estimate, y=term))
# Add predictions
augmented_my_logit_2 <- augment(x=my_logit_2, data=mydata)
# Plot predicted p
# (p = exp(B) / (exp(B) + 1)
augmented_my_logit_2$predprob <- exp(augmented_my_logit_2$.fitted) /
(exp(augmented_my_logit_2$.fitted) + 1)
ggplot(data=augmented_my_logit_2) +
geom_line(mapping=aes(x=age, y=predprob, color=sex))
# Model statistics (note they are different from those reported by
# glance of a lm() model)
modelstat_my_logit_2 <- glance(x=my_logit_2)
modelstat_my_logit_2
# Poisson model for count data
my_Poisson_1 <- glm(formula= crime ~ age + sex,
data=mydata,
family=poisson(link="log"))
summary(my_Poisson_1)
estimates_myPoisson_1 <- tidy(x=my_Poisson_1, exponentiate=TRUE,
conf.int=TRUE, conf.level=.90)
augmented_myPoisson_1 <- augment(x=my_Poisson_1, data=mydata)
modelstat_myPoisson_1 <- glance(x=my_Poisson_1)
# negative binomial model for count data
library(MASS)
my_negbin_1 <- glm.nb(formula= crime ~ age + sex,
data=mydata,
link=log)
summary(my_negbin_1)
estimates_my_negbin_1 <- tidy(x=my_negbin_1, exponentiate=TRUE,
conf.int=TRUE, conf.level=.90)
augmented_my_negbin_1 <- augment(x=my_negbin_1, data=mydata)
modelstat_my_negbin_1 <- glance(x=my_negbin_1)
# Collineary check: function vif() from the 'car' package
vif(my_linearmodel_3)
## age sex
## 1.010896 1.010896
|
3f6ed4ae43e8ed72a6310cf399331a9de85692f3
|
5e67544bd977d277ea24050d1aafa1c9bed9cf86
|
/explore/tl_followers.R
|
a690aa93bf7fcfc2ba2f243e51c9ba17b1da0028
|
[] |
no_license
|
balachia/Currency
|
8d08a1c11a6472e7b019c641afb64ad90c7e1b7b
|
ff46e4b042eb176cb7787ba524c52d21303cd5ce
|
refs/heads/master
| 2021-01-17T04:46:31.851629
| 2016-07-14T22:45:06
| 2016-07-14T22:45:06
| 15,240,711
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,041
|
r
|
tl_followers.R
|
library(data.table)
rm(list=ls())
setwd('~/Data/forex/tables-2014/Rds')
ba <- readRDS('BrokerAccount.Rds')
ba <- ba[,list(brokerAccount_id=id, user_id, baseCurrency)]
setkey(ba,brokerAccount_id)
# make fpt
fpt <- readRDS('ForexPositionTracker.Rds')
fpt[,c('version','correlationId','position_id','original_id','residual_id',
'followedClose','brokerOrderId','stopEnterPrice','rollovers',
'dollarPnlExclusions','pipsPnl','closeExecutionCount',
'followedTrade_id','openBrokerTradePositionId','closeBrokerTradePositionId',
'openPersistTime','closePersistTime') := NULL]
fpt <- fpt[status=='CLOSED']
gc()
setnames(fpt, c('openPrice','openTime','closePrice','closeTime',
'dollarPnl','tradePnl','openBalance'),
c('openprice','opendate','closeprice','closedate',
'dollarpnl','tradepnl','openbalance'))
fpt[,longtr := 2 * as.numeric(direction=='LONG') - 1]
fpt[,clopdiff := closeprice - openprice]
fpt[,pctreturn := (clopdiff / openprice) * longtr]
fpt <- fpt[!is.na(pctreturn)]
# split out follower trades
fpt.f <- fpt[!is.na(openCause_id) | !is.na(closeCause_id)]
fpt <- fpt[is.na(openCause_id) & is.na(closeCause_id)]
fpt <- fpt[closureReason == '']
# merge in user ids
setkey(fpt,brokerAccount_id)
fpt <- merge(fpt,ba,all.x=TRUE)
setkey(fpt.f,brokerAccount_id)
fpt.f <- merge(fpt.f,ba,all.x=TRUE)
# merge in trade leader ids
ref.trades <- fpt[,list(closeCause_id = id, tl_id = user_id, tl_baid = brokerAccount_id)]
setkey(ref.trades,closeCause_id)
setkey(fpt.f,closeCause_id)
fpt.f <- merge(fpt.f, ref.trades, all.x=TRUE)
fpt.f <- fpt.f[!is.na(tl_id)]
tl.assoc <- fpt.f[,list(.N), by=list(user_id,tl_id,tl_baid)]
u.assocs <- tl.assoc[,list(tls = length(unique(tl_id))),by=user_id]
t.assocs <- tl.assoc[,list(us = length(unique(user_id))), by=tl_id]
# export trade leader ids
setwd('~/Data/forex')
saveRDS(tl.assoc[,list(tl.user=1),by=tl_id],
'Rds/trade-leader-users.Rds')
saveRDS(tl.assoc[,list(tl.ba=1),by=tl_baid],
'Rds/trade-leader-brokeraccounts.Rds')
|
3cc449aabf0a124f444f3f96855a1ceeab341578
|
1ef2ca8618fbf2b0fcdb062e957af9429b3fd9da
|
/man/CacheIndividualTrees.Rd
|
705035fb013a0adb440848dfcf1ceada512398da
|
[] |
no_license
|
jwiggi18/HistoryOfEarth
|
c4d27692161a0e293a56d9221629b43ddd2a1da8
|
8f136cc9aa2e2f292d24774153eb0cfe231905e5
|
refs/heads/master
| 2020-04-15T21:55:28.707830
| 2019-07-17T19:07:02
| 2019-07-17T19:07:02
| 165,052,095
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 459
|
rd
|
CacheIndividualTrees.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/historyofearth.R
\name{CacheIndividualTrees}
\alias{CacheIndividualTrees}
\title{Cache trees for website}
\usage{
CacheIndividualTrees(taxa = GetTaxa(), age_df = GetAgeDF(),
height = 800, width = 900)
}
\arguments{
\item{taxa}{Vector of taxa}
\item{age_df}{From GetAgeDF}
\item{height}{Height in pixels}
\item{width}{Width in pixels}
}
\description{
Cache trees for website
}
|
0af4eb48cbf7aaf97186f85276748360bb9c95b9
|
ff59003d5f85134185f059ab3c3ce5f15df3414d
|
/MakePlots.R
|
b589aa5745c7802bd2dc69b507b76c2730dd5c71
|
[] |
no_license
|
yannicao/Reprojection
|
9842a8d07fb0326681db717757f9526888818c2a
|
cc3051396b2b5a5b795b4b968082814b3c849125
|
refs/heads/master
| 2021-01-10T01:22:14.859357
| 2016-02-22T05:10:04
| 2016-02-22T05:10:04
| 51,965,004
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,243
|
r
|
MakePlots.R
|
library(geosphere)
library(RColorBrewer)
source("WRF_functions.R")
# Do some plots for the article
#
plotPolyT = function(latMin, latMax, long=0, col="red2", ...) {
lats = seq(latMin,latMax,.1)
latsT = transform2sphere(lats)
diff = distHaversine(cbind(long,lats),cbind(long,latsT)) / 1000 # Convert to km
polygon(c(long,diff,long),c(latMin,lats,latMax),col=col,...)
}
latMax.d03 = 42.69
latMin.d03 = 40.97
latMax.d02 = 44.61
latMin.d02 = 38.93
latMax.d01 = 50.23
latMin.d01 = 32.90
lats = 20:60
latsT = transform2sphere(lats)
diff = distHaversine(cbind(0,lats),cbind(0,latsT)) / 1000 # Convert to km
cols = brewer.pal(3,"OrRd")
pdf("WRFLatError.pdf",width=6,height=6)
plot(diff,lats,xlab="Error (km)",ylab="Latitude (deg)",pch=19,type="l")
plotPolyT(latMin.d01, latMax.d01, col=cols[1],border=NA,density=30)
plotPolyT(latMin.d02, latMax.d02, col=cols[2],border=NA,density=40)
plotPolyT(latMin.d03, latMax.d03, col=cols[3],border=NA)
lines(diff,lats)
legend('topright', legend=c("d01","d02","d03"),fill=cols,bty="n")
dev.off()
# A cool website with nice great circles
#
# https://flowingdata.com/2011/05/11/how-to-map-connections-with-great-circles/
|
4415f4f3af41c4ed040eb34938b1e6e81de6c15a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/influence.SEM/examples/sem.fitres.Rd.R
|
5e3868b2994a09ef655bb7c364d62c2696fe9457
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 553
|
r
|
sem.fitres.Rd.R
|
library(influence.SEM)
### Name: sem.fitres
### Title: Fitted values and residuals
### Aliases: sem.fitres obs.fitres lat.fitres
### Keywords: utility
### ** Examples
data("PDII")
model <- "
F1 =~ y1+y2+y3+y4
"
fit0 <- sem(model, data=PDII)
out <- sem.fitres(fit0)
head(out)
par(mfrow=c(2,2))
plot(e.y1~hat.y1,data=out)
plot(e.y2~hat.y2,data=out)
plot(e.y3~hat.y3,data=out)
plot(e.y4~hat.y4,data=out)
qqnorm(out$e.y1); qqline(out$e.y1)
qqnorm(out$e.y2); qqline(out$e.y2)
qqnorm(out$e.y3); qqline(out$e.y3)
qqnorm(out$e.y4); qqline(out$e.y4)
|
0d38cf849710714e24740ce62054a6fc51d39a20
|
600ce8eed536ade87d512c4c7fa0047ca84047a0
|
/Preparation/prepareModel.R
|
3693ff9f20eadf37a5c93ecc5d95ef7b8cf5494e
|
[] |
no_license
|
ravi-addanki/predictNextWord
|
1623120590167fe40b982c2213cf6772d0bb109c
|
1e4266d1b71c816bdc3f3b974038c3b45fb1d447
|
refs/heads/master
| 2021-01-01T22:39:35.028481
| 2020-02-18T15:51:49
| 2020-02-18T15:51:49
| 239,374,668
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,010
|
r
|
prepareModel.R
|
## Prepare Model
options(expressions = 5e5)
options(java.parameters = "- Xmx4g")
library(tm)
library(SnowballC)
library(RWeka)
library(tm.plugin.webmining)
library(stringi)
library(stringr)
library("dplyr")
time_from <-Sys.time()
txtFileDir <- paste0(getwd(),"/data/final/en_US")
(ovid <- Corpus(DirSource(txtFileDir),readerControl = list(
reader=readPlain,language="en_US",load=TRUE)))
## PreProcess
rmNonEng <- function(x4) removeNonASCII(x4, fields = c("Content", "Heading", "Description"),from="UTF-8",to="ASCII//TRANSLIT")
ovid <- tm_map(ovid,FUN=rmNonEng)
ovid <-tm_map(ovid,FUN=removeNumbers)
ovid <-tm_map(ovid,FUN=stripWhitespace)
ovid <-tm_map(ovid,content_transformer(tolower))
#ovid <-tm_map(ovid,removeWords, stopwords("english"))
#ovid <-tm_map(ovid,removeWords, c("can", "may", "upon", "shall", "will","must", ""))
ovid <-tm_map(ovid,content_transformer(function(x) gsub(x, pattern = "\\.", replacement = " STOP ")))
ovid <-tm_map(ovid,FUN=stripWhitespace)
ovid <-tm_map(ovid,FUN=removePunctuation)
blogCnt <- length(ovid[[1]]$content)
newsCnt <- length(ovid[[2]]$content)
twitterCnt <- length(ovid[[3]]$content)
set.seed(2020-02-02)
blogSmp <- sample(1:blogCnt,blogCnt*.02,replace=FALSE)
newsSmp <- sample(1:newsCnt,newsCnt*.02,replace=FALSE)
twitterSmp <- sample(1:twitterCnt,twitterCnt*.02,replace=FALSE)
ovidDev <- ovid
ovidDev[[1]]$content <- ovidDev[[1]]$content[blogSmp]
ovidDev[[2]]$content <- ovidDev[[2]]$content[newsSmp]
ovidDev[[3]]$content <- ovidDev[[3]]$content[newsSmp] # Well I know this is mistake
# But, it does not matter
# ovidDev[[3]]$content <- ovidDev[[3]]$content[twitterSmp]
aStops <- removePunctuation(stopwords("english"))
ovid[[1]]$content <- ovid[[1]]$content[-blogSmp]
ovid[[2]]$content <- ovid[[2]]$content[-newsSmp]
ovid[[3]]$content <- ovid[[3]]$content[-newsSmp] # Well I know this is mistake
# But, it does not matter
# ovid[[3]]$content <- ovid[[3]]$content[-twitterSmp]
ovid2 <- ovid
ovid2 <-tm_map(ovid2,removeWords, aStops)
ovidDev <-tm_map(ovidDev,removeWords, aStops)
for(iIter in 1:5) {
ovid2 <-tm_map(ovid2,content_transformer(function(x) gsub(x, pattern = " [[:alnum:]]{1,2} ", replacement = " ")))
ovid2 <-tm_map(ovid2,content_transformer(function(x) gsub(x, pattern = "^[[:alnum:]]{1,2} ", replacement = "")))
ovid2 <-tm_map(ovid2,content_transformer(function(x) gsub(x, pattern = " [[:alnum:]]{1,2}$", replacement = "")))
ovidDev <-tm_map(ovidDev,content_transformer(function(x) gsub(x, pattern = " [[:alnum:]]{1,2} ", replacement = " ")))
ovidDev <-tm_map(ovidDev,content_transformer(function(x) gsub(x, pattern = "^[[:alnum:]]{1,2} ", replacement = "")))
ovidDev <-tm_map(ovidDev,content_transformer(function(x) gsub(x, pattern = " [[:alnum:]]{1,2}$", replacement = "")))
}
a <- ovid2[[2]]$content
b <- grep("\07",a)
if(length(b) > 0) a <- a[-b]
ovid2[[2]]$content <- a
time_to <-Sys.time()
time_to - time_from
ovid <- ovid2[1:1]
ovid <- tm_map(ovid,content_transformer(rmUnk2),w1)
wLines <- ovid[[1]]$content
con1 <- file("data/trainSamp/en_US/blog.txt",open="w")
writeLines(wLines,con1)
close(con1)
ovid <- ovid2[2:2]
ovid <- tm_map(ovid,content_transformer(rmUnk2),w1)
wLines <- ovid[[1]]$content
con1 <- file("data/trainSamp/en_US/news.txt",open="w")
writeLines(wLines,con1)
close(con1)
ovid <- ovid2[3:3]
ovid <- tm_map(ovid,content_transformer(rmUnk2),w1)
wLines <- ovid[[1]]$content
con1 <- file("data/trainSamp/en_US/twitter.txt",open="w")
writeLines(wLines,con1)
close(con1)
twoWordsDT <- NULL
threeWordsDT <- NULL
oneWordDT <- NULL
txtFileDir <- paste0(getwd(),"/data/trainSamp/en_US")
(ovid <- Corpus(DirSource(txtFileDir),readerControl = list(
reader=readPlain,language="en_US",load=TRUE)))
# make tokens for super set.
Tokens1 <-TermDocumentMatrix(ovid,control=list(tolower=FALSE,tokenize=function(x){
NGramTokenizer(x=x,control = Weka_control(min = 1, max = 1))
}))
Tokens2 <-TermDocumentMatrix(ovid,control=list(tolower=FALSE,tokenize=function(x){
NGramTokenizer(x=x,control = Weka_control(min = 2, max = 2))
}))
freq1 <- rowSums(as.matrix(Tokens1))
freq2 <- rowSums(as.matrix(Tokens2))
freq2 <- freq2[freq2>1]
save(list=c("freq1","freq2"),file="freq12.Rdt")
Tokens1 <- Tokens2 <-Tokens3 <- freq1 <- freq2 <- NULL
ovid <- ovid2[1]
freq3 <- Tokens3 <- freq4 <- Tokens4 <- NULL
gc()
Tokens4 <-TermDocumentMatrix(ovid,control=list(tolower=FALSE,tokenize=function(x){
NGramTokenizer(x=x,control = Weka_control(min = 4, max = 4))
}))
freq4 <- rowSums(as.matrix(Tokens4))
freq4 <- freq4[freq4>1]
save(list=c("freq4"),file="freq41.Rdt")
freq3 <- Tokens3 <- freq4 <- Tokens4 <- NULL
gc()
Tokens3 <-TermDocumentMatrix(ovid,control=list(tolower=FALSE,tokenize=function(x){
NGramTokenizer(x=x,control = Weka_control(min = 3, max = 3))
}))
freq3 <- rowSums(as.matrix(Tokens3))
freq3 <- freq3[freq3>1]
save(list=c("freq3"),file="freq31.Rdt")
ovid <- ovid2[2]
freq3 <- Tokens3 <- freq4 <- Tokens4 <- NULL
gc()
Tokens4 <-TermDocumentMatrix(ovid,control=list(tolower=FALSE,tokenize=function(x){
NGramTokenizer(x=x,control = Weka_control(min = 4, max = 4))
}))
freq4 <- rowSums(as.matrix(Tokens4))
freq4 <- freq4[freq4>1]
save(list=c("freq4"),file="freq42.Rdt")
freq3 <- Tokens3 <- freq4 <- Tokens4 <- NULL
gc()
Tokens3 <-TermDocumentMatrix(ovid,control=list(tolower=FALSE,tokenize=function(x){
NGramTokenizer(x=x,control = Weka_control(min = 3, max = 3))
}))
freq3 <- rowSums(as.matrix(Tokens3))
freq3 <- freq3[freq3>1]
save(list=c("freq3"),file="freq32.Rdt")
ovid <- ovid2[3]
freq3 <- Tokens3 <- freq4 <- Tokens4 <- NULL
gc()
Tokens4 <-TermDocumentMatrix(ovid,control=list(tolower=FALSE,tokenize=function(x){
NGramTokenizer(x=x,control = Weka_control(min = 4, max = 4))
}))
freq4 <- rowSums(as.matrix(Tokens4))
freq4 <- freq4[freq4>1]
save(list=c("freq4"),file="freq43.Rdt")
freq3 <- Tokens3 <- freq4 <- Tokens4 <- NULL
gc()
Tokens3 <-TermDocumentMatrix(ovid,control=list(tolower=FALSE,tokenize=function(x){
NGramTokenizer(x=x,control = Weka_control(min = 3, max = 3))
}))
freq3 <- rowSums(as.matrix(Tokens3))
freq3 <- freq3[freq3>1]
save(list=c("freq3"),file="freq33.Rdt")
freq3 <- Tokens3 <- freq4 <- Tokens4 <- NULL
gc()
# New prepare 3
#freq11 <- subset(freq1,!grepl("STOP",names(freq1)))
#freq21 <- subset(fre21,!grepl("STOP",names(freq2)))
#freq31 <- subset(freq3,!grepl("STOP", names(freq3)) )
library(data.table)
library("dplyr")
load("freq12.Rdt")
twoWordsDT <- as.data.table(freq2)
oneWordDT <- as.data.table(freq1)
twoWordsDT$name <- names(freq2)
oneWordDT$name <- names(freq1)
twoWordsDT <- subset(twoWordsDT,!grepl("STOP",twoWordsDT$name))
oneWordDT <- subset(oneWordDT,!grepl("STOP",oneWordDT$name))
load("freq41.Rdt")
load("freq31.Rdt")
fourWordsDT1 <- as.data.table(freq4)
threeWordsDT1 <- as.data.table(freq3)
fourWordsDT1$name <- names(freq4)
threeWordsDT1$name <- names(freq3)
threeWordsDT1 <- subset(threeWordsDT1,!grepl("STOP",threeWordsDT1$name))
load("freq32.Rdt")
load("freq42.Rdt")
fourWordsDT2 <- as.data.table(freq4)
threeWordsDT2 <- as.data.table(freq3)
fourWordsDT2$name <- names(freq4)
threeWordsDT2$name <- names(freq3)
threeWordsDT2 <- subset(threeWordsDT2,!grepl("STOP",threeWordsDT2$name))
load("freq33.Rdt")
load("freq43.Rdt")
fourWordsDT3 <- as.data.table(freq4)
threeWordsDT3 <- as.data.table(freq3)
fourWordsDT3$name <- names(freq4)
threeWordsDT3$name <- names(freq3)
threeWordsDT3 <- subset(threeWordsDT3,!grepl("STOP",threeWordsDT3$name))
library("dplyr")
threeWordsDT <- threeWordsDT1 %>% funion(threeWordsDT2,all = TRUE) %>%
funion(threeWordsDT3,all = TRUE) %>%
group_by(name) %>% summarize(freq3 = sum(freq3)) %>%
as.data.table()
fourWordsDT <- fourWordsDT1 %>% funion(fourWordsDT2,all = TRUE) %>%
funion(fourWordsDT3,all = TRUE) %>%
group_by(name) %>% summarize(freq4 = sum(freq4)) %>%
as.data.table()
twoWordsDT$first <- sapply(strsplit(twoWordsDT$name," "),'[[',1)
twoWordsDT$second <- sapply(strsplit(twoWordsDT$name," "),'[[',2)
threeWordsDT$first <- sapply(strsplit(threeWordsDT$name," "),'[[',1)
threeWordsDT$second <- sapply(strsplit(threeWordsDT$name," "),'[[',2)
threeWordsDT$third <- sapply(strsplit(threeWordsDT$name," "),'[[',3)
fourWordsDT$first <- sapply(strsplit(fourWordsDT$name," "),'[[',1)
fourWordsDT$second <- sapply(strsplit(fourWordsDT$name," "),'[[',2)
fourWordsDT$third <- sapply(strsplit(fourWordsDT$name," "),'[[',3)
fourWordsDT$fourth <- sapply(strsplit(fourWordsDT$name," "),'[[',4)
fourWordsDT3 <-fourWordsDT2 <-fourWordsDT1 <- NULL
threeWordsDT3 <-threeWordsDT2 <-threeWordsDT1 <- NULL
freq4 <-freq3 <- freq2 <- freq1 <- NULL
twoPairDT <- twoWordsDT[,c("first","second","freq2")]
twoPairDT1 <- fourWordsDT[fourWordsDT$second != fourWordsDT$third
& fourWordsDT$second != "STOP"
,c("first","third","freq4")]
colnames(twoPairDT1)<-colnames(twoPairDT)
twoPairDT <- funion( twoPairDT, twoPairDT1, all = TRUE)
twoPairDT1 <- fourWordsDT[fourWordsDT$second != fourWordsDT$fourth
& fourWordsDT$second != "STOP"
& fourWordsDT$third != "STOP"
& fourWordsDT$third != fourWordsDT$fourth
,c("first","fourth","freq4")]
colnames(twoPairDT1)<-colnames(twoPairDT)
twoPairDT <- funion( twoPairDT, twoPairDT1, all = TRUE)
twoPairDT1 <- fourWordsDT[fourWordsDT$second != fourWordsDT$first
& fourWordsDT$third != "STOP"
& fourWordsDT$third != fourWordsDT$fourth
,c("second","fourth","freq4")]
colnames(twoPairDT1)<-colnames(twoPairDT)
twoPairDT <- funion( twoPairDT, twoPairDT1, all = TRUE)
twoPairDT <- twoPairDT[twoPairDT$first != "STOP",]
twoPairDT <- twoPairDT[twoPairDT$second != "STOP",]
threePairDT <- as.data.table(threeWordsDT[,c("first","second","third","freq3")])
threePairDT1 <- fourWordsDT[fourWordsDT$third != fourWordsDT$fourth
& fourWordsDT$third != "STOP"
,c("first","second","fourth","freq4")]
colnames(threePairDT1)<-colnames(threePairDT)
threePairDT <- funion( threePairDT, threePairDT1, all = TRUE)
threePairDT1 <- fourWordsDT[fourWordsDT$second != fourWordsDT$third
& fourWordsDT$second != "STOP"
,c("first","third","fourth","freq4")]
colnames(threePairDT1)<-colnames(threePairDT)
threePairDT <- funion( threePairDT, threePairDT1, all = TRUE)
threePairDT <- threePairDT[threePairDT$first != "STOP",]
threePairDT <- threePairDT[threePairDT$second != "STOP",]
threePairDT <- threePairDT[threePairDT$third != "STOP",]
threePairDT <- threePairDT %>% group_by(first,second,third) %>%
summarize(freq3 = sum(freq3)) %>% as.data.table()
twoPairDT <- twoPairDT %>% group_by(first,second) %>%
summarize(freq2 = sum(freq2)) %>% as.data.table()
twoPairDT1 <- threePairDT1 <- NULL
|
a9f89804cb0ac6d0441da9fe55cba0334a901ca1
|
226e1acd1b811900c0ee019fdbaa50da81cf7a8f
|
/Code/R_scripts/Figure_generation/Volcano_Hist.R
|
12577e55b1ddcd78ac745c032252c0c3e7e53845
|
[] |
no_license
|
Harrison5692/Bioinformatics_work
|
7b2a02aa0452f543bc4e057f7272baaf89d43680
|
25315cf3867d6d869e0952d72ed82645c8509102
|
refs/heads/master
| 2022-11-16T00:25:49.033210
| 2020-07-13T06:01:09
| 2020-07-13T06:01:09
| 279,223,690
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,207
|
r
|
Volcano_Hist.R
|
DMSO_TGFB_DESeq2 <- DMSO_vs_TGFb
#-----------P-values
### CHANGING COLOR TO RED FOR LOG2FC > 1 IN MAGNITUDE ###
DMSO_TGFB_DESeq2$color[DMSO_TGFB_DESeq2$logfc > 1 & DMSO_TGFB_DESeq2$pvalue < .05] = 'blue'
DMSO_TGFB_DESeq2$color[DMSO_TGFB_DESeq2$logfc < -1 & DMSO_TGFB_DESeq2$pvalue < .05] = 'red'
DMSO_TGFB_DESeq2[is.na(DMSO_TGFB_DESeq2)] <- 'black'
plot(DMSO_TGFB_DESeq2$logfc, -log10(DMSO_TGFB_DESeq2$pvalue),
pch=20, main = 'DMSO vs TGFb Volcano Plot', xlim = c(-3, 3),
ylab= '-log10 P_Value', xlab= 'Log2 fold change DMSO vs TGFb',
col = DMSO_TGFB_DESeq2$color)
abline(h=(DMSO_TGFB_DESeq2$pvalue=1.3), v =c(-1,1), col='blue')
#-----------FDR values
DMSO_TGFB_DESeq2$color[DMSO_TGFB_DESeq2$logfc > 1 & DMSO_TGFB_DESeq2$fdr < .05] = 'blue'
DMSO_TGFB_DESeq2$color[DMSO_TGFB_DESeq2$logfc < -1 & DMSO_TGFB_DESeq2$fdr < .05] = 'red'
DMSO_TGFB_DESeq2[is.na(DMSO_TGFB_DESeq2)] <- 'black'
plot(DMSO_TGFB_DESeq2$logfc, -log10(DMSO_TGFB_DESeq2$fdr),
pch=20, main = 'DMSO vs TGFb Volcano Plot', xlim = c(-3, 3),
ylab= '-log10 FDR', xlab= 'Log2 fold change DMSO vs TGFb',
col = DMSO_TGFB_DESeq2$color)
abline(h=(DMSO_TGFB_DESeq2$fdr=1.3), v =c(-1,1), col='blue')
# Histograms
breaks75 = 75
breaks20 = 20
hist(DMSO_TGFB_DESeq2$pvalue, main = "DMSO vs TGFb P-values", xlab = 'Range', breaks = breaks75)
hist(DMSO_TGFB_DESeq2$fdr, xlim = c(0,1), main = "DMSO vs TGFb P-values", xlab = 'Range', breaks = breaks75)
hist(DMSO_TGFB_DESeq2$pvalue, main = "DMSO vs TGFb P-values", xlab = 'Range', breaks = breaks20)
hist(DMSO_TGFB_DESeq2$fdr, xlim = c(0,1), main = "DMSO vs TGFb FDR", xlab = 'Range', breaks = breaks20)
#=============================
#=====TGFB_vs_A485============
#=============================
TGFB_A485 <- `de_file(1).tab`
TGFB_A485_DESeq2 <- TGFB_A485
### CHANGING COLOR TO RED FOR LOG2FC > 1 IN MAGNITUDE ###
TGFB_A485_DESeq2$color[TGFB_A485_DESeq2$logfc > 1 & TGFB_A485_DESeq2$pvalue < .05] = 'blue'
TGFB_A485_DESeq2$color[TGFB_A485_DESeq2$logfc < -1 & TGFB_A485_DESeq2$pvalue < .05] = 'red'
TGFB_A485_DESeq2[is.na(TGFB_A485_DESeq2)] <- 'black'
plot(TGFB_A485_DESeq2$logfc, -log10(TGFB_A485_DESeq2$pvalue),
pch=20, main = 'TGFb vs A485 Volcano Plot', xlim = c(-3, 3),
ylab= '-log10 P_Value', xlab= 'Log2 fold change DMSO vs TGFb',
col = TGFB_A485_DESeq2$color)
abline(h=(TGFB_A485_DESeq2$pvalue=1.3), v =c(-1,1), col='blue')
#-----------FDR values
TGFB_A485_DESeq2$color[TGFB_A485_DESeq2$logfc > 1 & TGFB_A485_DESeq2$fdr < .05] = 'blue'
TGFB_A485_DESeq2$color[TGFB_A485_DESeq2$logfc < -1 & TGFB_A485_DESeq2$fdr < .05] = 'red'
TGFB_A485_DESeq2[is.na(TGFB_A485_DESeq2)] <- 'black'
plot(TGFB_A485_DESeq2$logfc, -log10(TGFB_A485_DESeq2$fdr),
pch=20, main = 'TGFB vs A485 Volcano Plot', xlim = c(-3, 3), ylim = c(0,2),
ylab= '-log10 FDR', xlab= 'Log2 fold change TGFB vs A485',
col = TGFB_A485_DESeq2$color)
abline(h=(TGFB_A485_DESeq2$fdr=1.3), v =c(-1,1), col='blue')
# Histograms
breaks75 = 75
breaks20 = 20
hist(TGFB_A485_DESeq2$pvalue, main = "TGFB vs A485 P-values", xlab = 'Range', breaks = breaks75)
hist(TGFB_A485_DESeq2$fdr, xlim = c(0,1), main = "TGFB vs A485 P-values", xlab = 'Range', breaks = breaks75)
hist(TGFB_A485_DESeq2$pvalue, main = "TGFB vs A485 P-values", xlab = 'Range', breaks = breaks20)
hist(TGFB_A485_DESeq2$fdr, xlim = c(0,1), main = "TGFB vs A485 FDR", xlab = 'Range', breaks = breaks20)
#=============================
#=====TGFB_vs_PFCBP1============
#=============================
TGFB_PFCBP1 <- TGFB_PFCBP1_DESeq2
### CHANGING COLOR TO RED FOR LOG2FC > 1 IN MAGNITUDE ###
TGFB_PFCBP1$color[TGFB_PFCBP1$logfc > 1 & TGFB_PFCBP1$pvalue < .05] = 'blue'
TGFB_PFCBP1$color[TGFB_PFCBP1$logfc < -1 & TGFB_PFCBP1$pvalue < .05] = 'red'
TGFB_PFCBP1[is.na(TGFB_PFCBP1)] <- 'black'
plot(TGFB_PFCBP1$logfc, -log10(TGFB_PFCBP1$pvalue),
pch=20, main = 'TGFB vs PFCBP1 Volcano Plot', xlim = c(-3, 3),
ylab= '-log10 P_Value', xlab= 'Log2 fold change TGFB vs PFCBP1',
col = TGFB_PFCBP1$color)
abline(h=(TGFB_PFCBP1$pvalue=1.3), v =c(-1,1), col='blue')
#-----------FDR values
TGFB_PFCBP1$color[TGFB_PFCBP1$logfc > 1 & TGFB_PFCBP1$fdr < .05] = 'blue'
TGFB_PFCBP1$color[TGFB_PFCBP1$logfc < -1 & TGFB_PFCBP1$fdr < .05] = 'red'
TGFB_PFCBP1[is.na(TGFB_PFCBP1)] <- 'black'
plot(TGFB_PFCBP1$logfc, -log10(TGFB_PFCBP1$fdr),
pch=20, main = 'TGFB vs PFCBP1 Volcano Plot', xlim = c(-3, 3), ylim = c(0,2),
ylab= '-log10 FDR', xlab= 'Log2 fold change TGFB vs PFCBP1',
col = TGFB_PFCBP1$color)
abline(h=(TGFB_PFCBP1$fdr=1.3), v =c(-1,1), col='blue')
# Histograms
breaks75 = 75
breaks20 = 20
hist(TGFB_PFCBP1$pvalue, main = "TGFB vs PFCBP1 P-values", xlab = 'Range', breaks = breaks75)
hist(TGFB_PFCBP1$fdr, xlim = c(0,1), main = "TGFB vs PFCBP1 P-values", xlab = 'Range', breaks = breaks75)
hist(TGFB_PFCBP1$pvalue, main = "TGFB vs PFCBP1 P-values", xlab = 'Range', breaks = breaks20)
hist(TGFB_PFCBP1$fdr, xlim = c(0,1), main = "TGFB vs PFCBP1 FDR", xlab = 'Range', breaks = breaks20)
|
4cd4f4ae1f99f9707b0c7742434cff0efa6dbdc2
|
b45845ee528ed22b433c5e876c87d088ce5ac860
|
/perfomix_check_list.R
|
5cc7355645f530d69a078fb80f9fc2c5b1c0146c
|
[] |
no_license
|
martinEcarnot/vrac
|
00445d290377b68c812173c20761094654d5878e
|
47bdfc47fd15454e30fdcf777c6d461a95cb38dc
|
refs/heads/master
| 2022-12-21T10:01:16.702031
| 2022-12-13T08:07:24
| 2022-12-13T08:07:24
| 232,131,113
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 453
|
r
|
perfomix_check_list.R
|
f="D:/2021/perfomix/perfomixspectro_prioritary-mixtures_long_ME.csv"
l=read.table(f,sep=";", header = T)
## IHS
# i20=which(l$season=="2019-2020")
i20=which(l$season=="2020-2021")
# l20=paste0(l[i20,"xy"],'-',l[i20,"id"],'*','sp.gz')
l20=paste0('*',l[i20,"xy"],'*','sp.gz')
for (i in 1:length(i20)) {
# print(l20[i])
c=Sys.glob(file.path("D:/2021/perfomix/Recolte_2021/CHS",l20[i]))
if (length(c)<1) {cat(paste0(substr(l20[i],2,7),"\n"))}
}
|
b0f8caaa4417b0d75011db4cd31d9697d4a08e40
|
0084280ad5d1400c280c110c402d3018b7a129af
|
/R/timing/pyclone_cluster_ccf.R
|
d46d3d1e7d4ca5c681b1093162ceef6ce4eb464b
|
[
"MIT"
] |
permissive
|
fpbarthel/GLASS
|
457626861206a5b6a6f1c9541a5a7c032a55987a
|
333d5d01477e49bb2cf87be459d4161d4cde4483
|
refs/heads/master
| 2022-09-22T00:45:41.045137
| 2020-06-01T19:12:30
| 2020-06-01T19:12:47
| 131,726,642
| 24
| 10
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,388
|
r
|
pyclone_cluster_ccf.R
|
library(DBI)
library(tidyverse)
library(ggplot2)
con <- DBI::dbConnect(odbc::odbc(), "GLASSv2")
res <- dbGetQuery(con, read_file("sql/pyclone/pyclone_cluster_stats2.sql"))
tmp <- res %>%
mutate(known_driver = ifelse(is.na(num_drivers), "No known driver", "Known driver mutation"),
plot_id = paste(case_barcode,cluster_id))
#wheel("#B4464B",7)
#test = tmp %>% count(case_barcode, gene_symbol)
g1 <- ggplot(tmp, aes(x=sample_type, y=mean, group=plot_id, color = known_driver)) +
geom_point() +
geom_line(na.rm = TRUE) +
facet_wrap(~idh_codel_subtype) +
scale_color_manual(values = c("#2FB3CA","#F1564F")) +
theme_bw(base_size = 12) +
labs(x = "Sample Type", y = "Cancer Cell Fraction", color = "Gene Symbol")
g1
pdf("figures/pyclone_cluster_ccf_paired_ladderplot.pdf", width=12, height = 8)
g1
dev.off()
tmp <- res %>%
group_by(case_barcode, idh_codel_subtype) %>%
summarize(n_cluster = n_distinct(cluster_id)) %>%
ungroup()
g2 <- ggplot(tmp, aes(x=idh_codel_subtype, y = n_cluster)) + geom_boxplot()
g2
test_subgroup <- function(df) {
wtest = wilcox.test(df$mean ~ df$sample_type, paired = TRUE)
data.frame(n = nrow(df), median_a = median(df$mean[df$sample_type == "P"]), median_b = median(df$mean[df$sample_type == "R"]), wilcox_p = wtest$p.value, wilcox_v = wtest$statistic)
}
tmp %>% group_by(idh_codel_subtype) %>% do(test_subgroup(.))
tmp %>% group_by(idh_codel_subtype) %>% do(test_subgroup(.))
### PLOT barplot of clonal/subclobal
tmp <- res %>% filter(rank==1) %>%
select(case_barcode, idh_codel_subtype, gene_symbol, clonality_a, clonality_b) %>%
gather(c(clonality_a, clonality_b), key = "sample_type", value = "clonality") %>%
mutate(sample_type = factor(sample_type, levels = c("clonality_a", "clonality_b"), labels = c("P","R")),
plot_id = paste(case_barcode,gene_symbol),
gene_label = factor(case_when(gene_symbol == "TP53" ~ "TP53",
gene_symbol == "IDH1" ~ "IDH1",
gene_symbol == "ATRX" ~ "ATRX",
gene_symbol == "PTEN" ~ "PTEN",
gene_symbol == "PIK3CA" ~ "PIK3CA",
gene_symbol == "NF1" ~ "NF1",
gene_symbol == "EGFR" ~ "EGFR",
TRUE ~ NA_character_))) %>%
filter(complete.cases(clonality))
g2 <- ggplot(tmp, aes(x=clonality, fill = sample_type)) +
geom_bar(position = "dodge") +
facet_wrap(~idh_codel_subtype, scales = "free_y") +
labs(x = "Clonality", y = "Number of Mutations", fill = "Sample Type") +
theme_bw(base_size = 12) +
scale_fill_manual(values = c("#B47846", "#4682B4"))
pdf("figures/shared_frac_clonality.pdf", width=12, height = 8)
g2
dev.off()
tmp <- res %>% filter(rank==1) %>%
select(case_barcode, idh_codel_subtype, gene_symbol, clonality_a, clonality_b) %>%
mutate(clonality = sprintf("%s-%s", clonality_a, clonality_b)) %>%
filter(complete.cases(clonality_a, clonality_b))
g3 <- ggplot(tmp, aes(x=1, fill=clonality)) +
geom_bar() +
facet_wrap(~idh_codel_subtype, scales = "free") +
labs(x = "Subtype", y = "Number of Mutations", fill = "Clonality") +
theme_bw(base_size = 12)
g3
pdf("figures/shared_frac_clonality_paired.pdf", width=12, height = 8)
g3
dev.off()
|
dc1ed469ed810c86e61bbae459d253300a2e5ab9
|
617042ee3e269b7a444bc2cb43b58e596da48bc3
|
/plot4.R
|
233e1cfe40abcf24adff5455ba74f87f3a1c4b24
|
[] |
no_license
|
marimit1/ExData_Plotting1
|
8229fa111d36b9f37bc317eb5f4cf425272070fb
|
d0b22a8332c3b3aa5079a57f9791908b4c722f84
|
refs/heads/master
| 2021-05-11T02:37:01.829896
| 2018-02-05T00:08:14
| 2018-02-05T18:20:41
| 118,368,136
| 0
| 0
| null | 2018-01-21T19:49:34
| 2018-01-21T19:49:33
| null |
UTF-8
|
R
| false
| false
| 1,950
|
r
|
plot4.R
|
#### plot4
### set working directory
setwd("./R_specialization/ExData_Plotting1")
### load libraries
library(data.table)
library(sqldf)
library(lubridate)
### read data
# download data
dataset_url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(dataset_url, destfile = "household_power_consumption.zip")
# unzip data set
unzip(zipfile="household_power_consumption.zip",exdir="household_power_consumption")
# read data for dates 1/2/2007 and 2/2/2007
DT <- read.csv.sql(file = "./household_power_consumption/household_power_consumption.txt",
sql = "select * from file where Date = '1/2/2007' or Date = '2/2/2007'",
header = TRUE, sep = ";")
## work with dates
# transform date
DT$Date <- dmy(DT$Date)
# make date_time column
DT$date_time <- ymd_hms(paste(DT$Date, DT$Time))
# change local system to work with English names of weekdays
Sys.setlocale("LC_TIME", "English")
DT$weekday <- wday(DT$Date, label = TRUE)
### make plot4
# make plot
par(mfrow = c(2,2), mar = c(4,4,1,1))
with(DT, {
plot(date_time, Global_active_power, type ="l",
xlab = " ", ylab = "Global Active Power")
plot(date_time, Voltage, type ="l",
xlab = "daytime", ylab = "Voltage")
plot(date_time, Sub_metering_1, type = "n", xlab = " ", ylab = "Energy sub metering")
lines(date_time, Sub_metering_1, col = "black")
lines(date_time, Sub_metering_2, col = "red")
lines(date_time, Sub_metering_3, col = "blue")
legend("topright", lty = c("solid", "solid", "solid"), lwd = c(1,1,1), col = c("black","red","blue"),
legend = c("Sub_metering_1","Sub_metering_2", "Sub_metering_3"),
cex = 0.8, bty = "n", inset = c(0.1,0))
plot(date_time, Global_reactive_power, type ="l",
xlab = "daytime")
})
# save plot
dev.copy(png, file = "plot4.png", width = 480, height = 480)
dev.off()
|
43d6cb3781fe19ccda2ed0a95e859025e0aaad5d
|
7ae34b4e174fc64bd602f4bd3dbb40c0f10e3fa9
|
/pkg/gMCP/inst/unitTests/runit.gMCP.R
|
d65ec315fc3c9130569456a792a44f2c83cf3efc
|
[] |
no_license
|
kornl/gMCP
|
ff53468562d58d1530f2f2f613b503d382f11d6b
|
e3e82ac4eab1e3b4d4ca769c3aa372d8bb1aa8c7
|
refs/heads/master
| 2021-01-15T09:32:47.828717
| 2020-03-22T20:44:36
| 2020-03-22T20:44:36
| 10,665,881
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,847
|
r
|
runit.gMCP.R
|
test.Simes <- function() {
m <- matrix(0,nr=4,nc=4)
m[1,3] <- m[2,4] <- m[3,2] <- m[4,1] <- 1
w <- c(1/2, 1/2, 0, 0)
p1 <- c(0.01, 0.005, 0.01, 0.5)
p2 <- c(0.01, 0.005, 0.015, 0.022)
a <- 0.05
g <- matrix2graph(m, w)
result1 <- gMCP(g, pvalues=p1, test="Simes", alpha=a)
result2 <- gMCP(g, pvalues=p2, test="Simes", alpha=a)
checkEquals(unname(result1@rejected), c(TRUE, TRUE, TRUE, FALSE))
checkEquals(unname(result2@rejected), c(TRUE, TRUE, TRUE, TRUE))
}
checkWeights <- function(graph, pvalues) {
# Compares the weights of the gMCP-R-code, gMCP-C-code, power-C-code and parametric-R-code
result <- gMCP(graph, pvalues, keepWeights=FALSE)
rejected <- getRejected(result)
weights <- getWeights(result)
result2 <- gMCP(graph, pvalues, useC=TRUE, keepWeights=FALSE)
rejected2 <- getRejected(result2)
weights2 <- getWeights(result2)
checkEquals(rejected, rejected2)
checkEquals(weights, weights2)
result <- gMCP(graph, pvalues, keepWeights=TRUE)
rejected <- getRejected(result)
weights <- getWeights(result)
result3 <- graphTest(pvalues=pvalues, alpha=0.05, graph=substituteEps(graph))
m3 <- attr(result, "last.G")
weights3 <- attr(result3, "last.alphas") / 0.05
rejected3 <- result3!=0
checkEquals(unname(rejected), unname(rejected3)) # TODO fix naming
#checkEquals(unname(weights), weights3) TODO check why NaNs occur
}
test.checkWeights <- function() {
graphs <- list(BonferroniHolm(5),
parallelGatekeeping(),
improvedParallelGatekeeping(),
BretzEtAl2011(),
#HungEtWang2010(),
#HuqueAloshEtBhore2011(),
HommelEtAl2007(),
HommelEtAl2007Simple(),
MaurerEtAl1995(),
improvedFallbackI(weights=rep(1/3, 3)),
improvedFallbackII(weights=rep(1/3, 3)),
cycleGraph(nodes=paste("H",1:4,sep=""), weights=rep(1/4, 4)),
fixedSequence(5),
fallback(weights=rep(1/4, 4)),
#generalSuccessive(weights = c(1/2, 1/2)),
simpleSuccessiveI(),
simpleSuccessiveII(),
#truncatedHolm(),
BauerEtAl2001(),
BretzEtAl2009a(),
BretzEtAl2009b(),
BretzEtAl2009c()#,
#FerberTimeDose2011(times=5, doses=3, w=1/2),
#Ferber2011(),
#Entangled1Maurer2012(),
#Entangled2Maurer2012(),
)
for (graph in graphs) {
p <- gMCP:::permutations(length(getNodes(graph)))
for (i in 1:(dim(p)[1])) {
pvalues <- p[i,]
pvalues[pvalues==0] <- 0.00001
checkWeights(graph, pvalues)
}
}
}
test.upscale <- function() {
g <- BonferroniHolm(5)
r1 <- gMCP(g, pvalues=c(0.01, 0.02, 0.04, 0.04, 0.7))
# Simple Bonferroni with empty graph:
g2 <- matrix2graph(matrix(0, nrow=5, ncol=5))
r2 <- gMCP(g2, pvalues=c(0.01, 0.02, 0.04, 0.04, 0.7))
# With 'upscale=TRUE' equal to BonferroniHolm:
r3 <- gMCP(g2, pvalues=c(0.01, 0.02, 0.04, 0.04, 0.7), upscale=TRUE)
checkEquals(r1@rejected, r3@rejected)
checkTrue(all(r1@rejected>=r2@rejected)) # FALSE<TRUE
}
|
ff7ca7f1fe53f4788359d396da47c0b0a77e6c40
|
84a81beb43008d608479b4e5c993ca86cfe86873
|
/man/fullResults.Rd
|
94e8df75d414061d580c6eb01fd7abceb9b58d21
|
[] |
no_license
|
andrew-edwards/sizeSpectra
|
bb3204c5190ec6ccf09ef3252da30f0c2b4ac428
|
517c18d84f4326b59807de5235ab4cddac74876b
|
refs/heads/master
| 2023-06-22T17:57:26.718351
| 2023-06-12T16:51:23
| 2023-06-12T16:51:23
| 212,250,882
| 7
| 8
| null | null | null | null |
UTF-8
|
R
| false
| true
| 737
|
rd
|
fullResults.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{fullResults}
\alias{fullResults}
\title{Full results from using each fitting method on each year of IBTS data set.}
\format{
Data frame with 240 rows (one row for each of 30 years for each of
the 8 methods) and corresponding columns for each year-method combination:
\itemize{
\item Year: Year of data
\item Method: Method used
\item b: Estimated size-spectrum exponent $b$
\item confMin, confMax: Minimum and maximum of 95\\% confidence interval of
$b$
\item stdErr: Standard error of estimate of $b$
}
}
\source{
Vignette \code{MEPS_IBTS_2}.
}
\usage{
fullResults
}
\description{
Used to make MEPS Figure 1.
}
\keyword{datasets}
|
56d972674b92aaef409244c7ca74aab721e99385
|
b416c4fb16eea0a456e2d442193e62761380ae27
|
/R/selectGenes.R
|
d47e7011cad63fada68ea2331827f97f42da7db7
|
[] |
no_license
|
CenterForStatistics-UGent/SPsimSeq
|
8bfbfa1bc37cb75d1727eb0e64380ced90f4d89e
|
8ef681373e1e0330b3077173c71a859c9e9a464f
|
refs/heads/master
| 2022-02-11T05:43:24.961643
| 2022-01-25T19:24:45
| 2022-01-25T19:27:09
| 169,562,897
| 12
| 5
| null | 2020-04-02T14:23:53
| 2019-02-07T11:42:51
|
R
|
UTF-8
|
R
| false
| false
| 966
|
r
|
selectGenes.R
|
#' Sample genes from candidate genes
#'
#' @param pDE fraction of genes to be made DE
#' @param exprmt.design the experiment design
#' @param n.genes the total number of genes required
#' @param null.genes0,nonnull.genes0 Candidate null and non-null genes
#'
#' @return a vector of selected genes
selectGenes <- function(pDE, exprmt.design, n.genes, null.genes0, nonnull.genes0){
if(pDE>0 && length(unique(exprmt.design$sub.groups))>1){
null.genes = sample(null.genes0, (1-pDE)*(n.genes),
replace = (1-pDE)*n.genes > length(null.genes0))
nonnull.genes = if(length(nonnull.genes0)>0){
sample(nonnull.genes0, pDE*n.genes,
replace = pDE*n.genes > length(nonnull.genes0))
} else {NULL}
sel.genes <- c(nonnull.genes, null.genes)
} else{
sel.genes <- sample(null.genes0, n.genes,
replace = n.genes > length(null.genes0))
}
names(sel.genes) = sel.genes
sel.genes
}
|
841610d62e4b5b1ee288765e6de37e5e17daa7dc
|
48dc3bb4b86faaeb0d0380e5a7bb7c17b0ad3ab4
|
/man/SymbiotaR2.Rd
|
3348fdad6e1ec827b310b9ac42c4249fe30112ce
|
[
"MIT"
] |
permissive
|
ropensci/SymbiotaR2
|
3e80e24cec07c8cbdc86e2d235fe362d924e818b
|
f0231b51066d15e7d6a64baf5c63eb37236a23bd
|
refs/heads/master
| 2023-04-14T10:19:27.583114
| 2022-01-26T22:26:15
| 2022-01-26T22:26:15
| 190,439,935
| 0
| 0
|
MIT
| 2022-01-26T22:26:16
| 2019-06-05T17:34:58
|
R
|
UTF-8
|
R
| false
| true
| 2,385
|
rd
|
SymbiotaR2.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SymbiotaR2-package.R
\docType{package}
\name{SymbiotaR2}
\alias{SymbiotaR2}
\alias{package-SymbiotaR2}
\alias{SymbiotaR2-package}
\title{Downloading data from Symbiota2 portals into R}
\description{
This package allows users to access and
download from Symbiota2, a content management system
for biodiveristy data.
}
\section{About}{
Symbiota2 is the improved, refactored version of Symbiota,
an open source content management system for biological
specimen data. SymbiotaR2 allows users to access the data
available at Symbiota2 portals. By specifying the URL
of the relevant portal, and the resource to be downloaded,
users can use SymbiotaR2 to deliver biological specimen-data
in an R format.
}
\section{Code Structure}{
Package functions are organized by API family, which generally
group the functions by the type of resource they pull from the portal.
Each function can either return an individual resources (through
specifying the `id` argument) or a collection of resources (through
specifying the `page` argument). After providing either the `id`
or the `page` of resources, and the URL of the relevant portal,
SymbiotaR2 will return an R object (for `id`, usually a list; for
`page`, usually a data.frame).
}
\section{Portal Specification}{
All SymbiotaR2 commands require a URL that directs to the Symiobta2
portal to download data from. Users need to make sure they are granted
access to a Symbiota2 portal before trying to download data from it.
The address of a Symbiota2 portal is provided as the `url` string
argument to each function. To specify a default URL, use the
`SymbiotaR2_setup` function, which will the default url to your
.Rprofile.
This package only allows users to access data from existing Symbiota2
portals; to create a new Symbiota2 portal, see the documentation at
https://symbiota2.github.io/Symbiota2/setup/installation.html
}
\examples{
\dontrun{
myURL <- "http://ImaginarySymbiota2Portal.com/api"
myTaxa <- Taxa(id = 12, url = myURL)
str(myTaxa)
myOccurrences <- Occurrence(page = 2, url = myURL)
length(myOccurrences)
}
}
\references{
https://symbiota.org/docs/
Gries, C., Gilbert, E. E., & Franz, N. M. (2014). Symbiota - A virtual platform for creating voucher-based biodiversity information communities. Biodiversity Data Journal, 2, e1114.
}
|
752e5d183a1b35a4aa1d9cdc3248d958ebce140f
|
53dc2ae7dddb95bac3dbcd885237cadc554e97c4
|
/man/moviemeter.Rd
|
40e92dd06089b5a071b1b90d82109d9042d45d1d
|
[] |
no_license
|
hrbrmstr/moviemeter
|
273deabfc6b4aff341ef3659e5331c4654404f28
|
4e117616c6a9b799df02e13b41a2c0a2789e1952
|
refs/heads/master
| 2021-01-10T22:53:34.591103
| 2016-10-08T15:11:15
| 2016-10-08T15:11:15
| 70,337,498
| 8
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 518
|
rd
|
moviemeter.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/moviemeter-package.R
\docType{package}
\name{moviemeter}
\alias{moviemeter}
\alias{moviemeter-package}
\title{Tools to work with the MovieMeter API}
\description{
Get an API key: \url{https://www.moviemeter.nl/site/registerclient/}
}
\details{
Read about their API \url{http://wiki.moviemeter.nl/index.php/API}
}
\note{
MovieMeter requests citations when publishing any derivative work from their data.
}
\author{
Bob Rudis (bob@rud.is)
}
|
e78c4f08dc265b372ed4222db33a00a09f79c836
|
982273312b912edf4ce88ef85b94d387928c6f37
|
/src_presentation.R
|
26c93b21ad59a1165987294aba339f5df3d827c8
|
[] |
no_license
|
smckechnie/examples
|
e64d451c9b1dca85b921f7a9250472b0e7700baa
|
b1bb0378def28198c23c1bedf55e9974d154e18a
|
refs/heads/master
| 2020-04-16T08:47:53.883104
| 2019-01-13T20:54:22
| 2019-01-13T20:54:22
| 165,438,246
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,946
|
r
|
src_presentation.R
|
setwd("C:/Users/steph/OneDrive/Documents/Stat 281/Project")
setwd("C:/Users/mckec/OneDrive/Documents/Stat 281/Project")
# Import data
library(readr)
train_main <- read_csv('smartphone_train.csv')
test <- read_csv('smartphone_test.csv')
train_labels <- train_main$Activity
train_levels <- as.numeric(as.factor(train_labels))
test_labels <- test$Activity
# Remove labels from training and test sets
train_main <- subset(train_main,select=-c(Activity,subject))
test <- subset(test,select=-c(Activity,subject))
var.names <- make.names(names(train_main), unique = TRUE)
names(train_main) <- 1:dim(train_main)[2]
names(test) <- 1:dim(test)[2]
library(rpart)
library(rpart.plot)
fit <- rpart(train_labels~., data=train_main,method="class")
fit
rpart.plot(fit)
rpart.plot(fit,under=TRUE)
rpart.plot(fit,fallen.leaves=FALSE)
rpart.plot(fit,varlen=10)
rpart.plot(fit,tweak=1.5)
pdf("tree.pdf",width=10.5, height=10)
rpart.plot(fit,varlen=10,tweak=1.2)
dev.off()
pred <- predict(fit,test,type="class")
accuracy <- table(pred,test_labels)
accuracy
sum(diag(accuracy))/sum(accuracy)
temp <- as.data.frame.matrix(accuracy)
stargazer(as.matrix(temp))
require(randomForest)
library(caret)
names(train_main) <- var.names
names(test) <- var.names
fit.class <- randomForest(as.factor(train_labels)~., data=train_main, importance=T)
fit.class
pred2 <- predict(fit.class, newdata = test)
table(pred2, test_labels)
accur <- table(pred2, test_labels)
sum(diag(accur))/sum(accur)
temp <- as.data.frame.matrix(accur)
stargazer(as.matrix(temp))
library(class)
pred.knn <- knn(train_main, test, cl = factor(train_labels), k = 6)
table(pred.knn, test_labels)
accur <- table(pred.knn, test_labels)
sum(diag(accur))/sum(accur)
rowsums = apply(accur, 1, sum)
colsums = apply(accur, 2, sum)
precision = diag(accur) / colsums
recall = diag(accur) / rowsums
f1 = 2 * precision * recall / (precision + recall)
data.frame(precision, recall, f1)
#### additional methods
# Support vector machine
library(e1071)
model <- svm(as.factor(train_labels)~., data=train_main,type="C-classification")
# perform a grid search
tuneResult <- tune.svm(as.factor(train_labels)~., data=train_main, gamma = 10^(-6:-1), cost = c(1,10))
summary(tuneResult)
# plot(tuneResult)
pred3 <- predict(model, newdata = test)
table(pred3, test_labels)
accur <- table(pred3, test_labels)
sum(diag(accur))/sum(accur)
# Neural Network
library(neuralnet)
f <- as.formula(paste("activity ~", paste(var.names[!var.names %in% "activity"], collapse = " + ")))
train_main$activity <- as.factor(train_labels)
train_main$activity <- train_levels
nn <- neuralnet(f,data=train_main,hidden=6, lifesign = "minimal", linear.output=FALSE)
plot(nn, rep = "best")
pred4 <- predict(nn, newdata = test)
table(pred3, test_labels)
accur <- table(pred3, test_labels)
sum(diag(accur))/sum(accur)
|
9cd810d323f02ca6cda5560e2e2040f73b557194
|
4253ddfc4c9b7b3e98349693c6288553d3919756
|
/factor-character.R
|
4a06558289225f1e00f26c33ea4f47b7c828c5bc
|
[] |
no_license
|
esoterikosQ/MLB
|
b6980135bdb431e111ce697c8adbb7cb554c2e76
|
85497a58ae00bb8260a1d5f588307248ccdf1ec9
|
refs/heads/master
| 2020-03-26T13:11:22.038442
| 2018-08-19T13:25:13
| 2018-08-19T13:25:13
| 144,927,714
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 787
|
r
|
factor-character.R
|
# A random independent variable
continuous_x <- rnorm(10,10,3)
# A random categorical variable as a character vector:
character_x <- (rep(c("dog","cat"),5))
# Convert the character vector to a factor variable.
factor_x <- as.factor(character_x)
# Give the two categories random values:
character_x_value <- ifelse(character_x == "dog", 5*rnorm(1,0,1), rnorm(1,0,2))
# Create a random relationship between the indepdent variables and a dependent variable
continuous_y <- continuous_x*10*rnorm(1,0) + character_x_value
# Compare the output of a linear model with the factor variable and the character vector. Note the warning that is given with the character vector.
summary(lm(continuous_y ~ continuous_x + factor_x))
summary(lm(continuous_y ~ continuous_x + character_x))
|
77dcdd9813ce76687ecc3d797849f8bfa3fdc064
|
c68aea1de91b46ae684792123c61e84c44ea0266
|
/books/cs/data-mining/learning-data-mining-with-r/1rst-edition/chapter2/R/ch_02_eclat.R
|
9edad3907cfe5cd1677c95623bbe7c4336003b1a
|
[
"Apache-2.0"
] |
permissive
|
Winfredemalx54/algorithm-challenger-1
|
12e23bed89ca889701db1b17ac540ce62ce86d8e
|
761c2c39e041fb155f853385998d5c6318a39913
|
refs/heads/master
| 2022-11-22T15:03:01.548605
| 2020-07-11T12:26:31
| 2020-07-11T12:26:31
| 297,955,141
| 3
| 0
|
Apache-2.0
| 2020-09-23T11:58:19
| 2020-09-23T11:58:18
| null |
UTF-8
|
R
| false
| false
| 1,702
|
r
|
ch_02_eclat.R
|
#clean the workspace and memory
rm( list=ls() )
gc()
tbl <- read.csv("../data/itemsets002.csv", header=FALSE)
tbl <- as.matrix(tbl)
colnames(tbl) <- NULL
itemsets <- t(tbl)
print(itemsets)
items <- c(1,2,3,4,5)
min_sup <- 0.22*nrow(itemsets)
f <- NULL
ff <- NULL
testEclat <- function(data,base_items,MIN_SUP){
print(data)
p <- GetFrequentTidSets(data,base_items,MIN_SUP)
print(p)
Eclat(p,f,MIN_SUP,length(base_items))
return(f)
}
GetFrequentTidSets <- function(data,base_items,MIN_SUP){
tidsets <- NULL
data <- cbind(data,apply(data,1,sum))
items <- diag(length(base_items))
for(idx in seq(nrow(data))){
tidsets <- rbind(tidsets,c(items[idx,],data[idx,]))
}
tidsets <- tidsets[tidsets[,ncol(tidsets)]>MIN_SUP,-ncol(tidsets)]
return(tidsets)
}
Eclat <- function(p,f,MIN_SUP,parameter=NULL){
len <- nrow(p)
for(idx in seq(len)){
a <- p[idx,]
AddFrequentItemset(f,a)
pa <- NULL
jdx <- idx + 1
while(idx<jdx && jdx<=len){
b <- p[jdx,]
ab <- MergeTidSets(a,b,parameter)
if(GetSupport(ab,parameter)>=MIN_SUP){
pa <- rbind(pa,ab)
}
jdx <- jdx + 1
}
rownames(pa) <- NULL
if(!IsEmptyTidSets(pa)){
#print(pa)
Eclat(pa,f,MIN_SUP,parameter)
}
}
}
IsEmptyTidSets <- function(pa){
if(length(pa)>0)return(FALSE)
return(TRUE)
}
MergeTidSets <- function(a,b,parameter=NULL){
len4i <- parameter
len4t <- length(a)
return(c(ifelse(a[1:len4i]+b[1:len4i],1,0),a[(len4i+1):len4t]*b[(len4i+1):len4t]))
}
AddFrequentItemset <- function(f,p){
ff <<- rbind(ff,p)
}
GetSupport <- function(ab,parameter=NULL){
len4i <- parameter
len4t <- length(ab)
return(sum(ab[(len4i+1):len4t]))
}
testEclat(itemsets,items,min_sup)
rownames(ff) <- NULL
print(ff)
|
c77fe11e8d71eb4be14a3c5edd171ec687bad707
|
ade67a67fbbf9f8f132096193758e7e58a95c307
|
/man/NA_hms_.Rd
|
728a32cf9627088dfd5bfa17f8723fd688d593d3
|
[
"MIT"
] |
permissive
|
poissonconsulting/dttr2
|
f4ea3e045b46e2ee23445cd81445b512e04f9cea
|
be0eb04a2b353cd791989c89eef41a818c50972c
|
refs/heads/main
| 2023-07-07T07:55:28.489864
| 2023-07-04T16:16:49
| 2023-07-04T16:16:49
| 186,494,511
| 10
| 2
|
NOASSERTION
| 2023-06-27T00:25:55
| 2019-05-13T20:56:44
|
R
|
UTF-8
|
R
| false
| true
| 309
|
rd
|
NA_hms_.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/na.R
\docType{data}
\name{NA_hms_}
\alias{NA_hms_}
\title{Missing hms}
\format{
An object of class \code{hms} (inherits from \code{difftime}) of length 1.
}
\usage{
NA_hms_
}
\description{
A missing hms object
}
\keyword{datasets}
|
74f8eb96abde02927406fab294058ae1d3674736
|
42b3ddece3c7e9b56d513823c5dc2f03d0b17979
|
/man/graph_environment.Rd
|
5c25b929dab5e1d398fc31dc3c8877bbb8363158
|
[] |
no_license
|
digideskio/admixturegraph
|
1a52155638b76d089f1ad9945c70a6a814eeedd1
|
5be6fbde1688cb96debabe74c25e4834063aa308
|
refs/heads/master
| 2021-01-17T05:29:54.186661
| 2016-06-10T13:48:40
| 2016-06-10T13:48:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,035
|
rd
|
graph_environment.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/evaluate-f-statistics.R
\name{graph_environment}
\alias{graph_environment}
\title{Build an environment in which f statistics can be evaluated.}
\usage{
graph_environment(parameters, edge_lengths = NULL, admix_prop = NULL)
}
\arguments{
\item{parameters}{The parameters of a graph as returned by
\code{\link{extract_graph_parameters}}.}
\item{edge_lengths}{If specified, a vector of edge lengths. Otherwise
defaults are used.}
\item{admix_prop}{If specified, a vector of admixture proportions.
Otherwise defaults are used.}
}
\value{
A list containing two values: \code{edges}, a vector of edges and
\code{admix_prop}, a vector containing admixture proportions.
}
\description{
Constructs an environment in which the \eqn{f} statistics for a graph can be
evaluted, based on the parameters in a graph and values for edge lengths and
admixture proportions (with defaults if not specified).
}
|
b13eb8a2714789c2b9a8d8152a7a7db5b935d43a
|
8bbc42c11e4f98b2fa4cb8d8895436408c00cfa5
|
/proj20-BrainAgeEval/Analyze_Results.R
|
ea77a44ee1fedc806b272eaa3bc7b7aac43d9bc5
|
[] |
no_license
|
ccplabwustl/RobertJirsaraie
|
f2007b48bf9a9dd32e73d8998c176eea8082554e
|
b8032078400278f46a252379bf487c23d89435be
|
refs/heads/master
| 2023-04-06T23:25:53.870308
| 2022-10-14T13:00:55
| 2022-10-14T13:00:55
| 296,682,721
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,965
|
r
|
Analyze_Results.R
|
#!/usr/bin/env Rscript
######################
library(gamm4) ; library(lme4) ; library(nlme) ; library(parameters) ; library(performance)
library(ggplot2) ; library(ggforce) ; library(corrplot) ; library(ggeffects)
library(tidyr) ; library(plyr) ; library(dplyr) ; library(purrr)
library(knitr) ; library(psych) ; library(broom.mixed)
library(Rmisc) ; library(merTools) ; library(lm.beta)
library(lmerTest) ; library(gamm4) ; library(mgcv)
cor.mtest <- function(mat, ...) {
mat <- as.matrix(mat)
n <- ncol(mat)
p.mat<- matrix(NA, n, n)
diag(p.mat) <- 0
for (i in 1:(n - 1)) {
for (j in (i + 1):n) {
tmp <- cor.test(mat[, i], mat[, j], ...)
p.mat[i, j] <- p.mat[j, i] <- tmp$p.value
}
}
colnames(p.mat) <- rownames(p.mat) <- colnames(mat)
p.mat
}
normalize <- function(x) {
return((x- min(x,na.rm=T)) /(max(x,na.rm=T)-min(x,na.rm=T)))
}
LmerBetas <- function(object) {
sdy <- sd(getME(object,"y"))
sdx <- apply(getME(object,"X"), 2, sd)
sc <- fixef(object)*sdx/sdy
se.fixef <- coef(summary(object))[,"Std. Error"]
se <- se.fixef*sdx/sdy
return(data.frame(stdcoef=sc, stdse=se))
}
######
### Read the Datasets
######
NEURO_PATH<-"/Users/Jirsaraie/Box/Research/proj20-BrainAgeEval/audits"
COMBINE<-read.csv(paste0(NEURO_PATH,"/COMBINE.csv"))
PRISMA<-read.csv(paste0(NEURO_PATH,"/PRISMA.csv"))
TRIO<-read.csv(paste0(NEURO_PATH,"/TRIO.csv"))
HCP<-read.csv(paste0(NEURO_PATH,"/HCP.csv"))
######
### Model Accuracy
######
###Scatterplots of Brain Age Models Fit with Age By Dataset
TEMP <-pivot_longer(COMBINE,cols=4:9,names_to="MODELS",values_to="BRAINAGE")
TEMP$MODELS<-factor(TEMP$MODELS,levels=c("DBN","rDBN","tDBN","GTB","rGTB","tGTB"))
ggplot(TEMP, aes(x=age,y=BRAINAGE,group=DATA,color=DATA,shape=DATA)) +
geom_abline(intercept=0,slope=1,size=2,alpha=1,linetype="dashed") +
geom_point(size=1.45,alpha=0.575) +
stat_smooth(method = "lm",se=F,size=2.5,alpha=0.5,fullrange=T) +
scale_shape_manual(values=c(16,17,15))+
scale_color_manual(values=c("#007A39","#0080FE","#A0522D")) +
scale_fill_manual(values=c("#007A39","#0080FE","#A0522D")) +
theme_classic() + facet_wrap(~MODELS,scales = "free")
###Benchmark Developmental Brain Age Metrics
for (COL in 4:11){
DATA<-PRISMA ; TYPE<-"LONG" ; print(paste0(names(DATA)[COL]))
if (TYPE == "CROSS"){
COEF<-round(lm(DATA[,COL]~DATA[,"age"])$coefficients,digits=2)
} else if (TYPE == "LONG"){
COEF<-round(fixef(lmer(DATA[,COL] ~ DATA[,"age"] + (1 | DATA[,"sub"]))),digits=2)
}
MAE<-round(mean(abs((DATA[,COL]-DATA[,"age"])),na.rm=T),digits=2)
CORR<-round(cor(DATA[,COL],DATA[,"age"],use = "complete.obs"),digits=2)
print(paste0("MAEx",MAE,"_SLOPEx",COEF[2],"_Interceptx",COEF[1],"_CORRx",CORR))
}
######
### Model Sensitivity
######
###Individual Differences in Cognition #3:8 / 23:28 / 29:34
OUT<-as.data.frame(names(HCP)[3:8]) ; names(OUT)[1]<-"models"
TEMP <-pivot_longer(HCP,cols=3:8,names_to="models",values_to="brainages")
TEMP_NEST <- TEMP %>% group_by(models) %>% nest
for (COL in grep("_RAW",names(HCP))[1:5]){
MODELS <- TEMP_NEST %>% mutate(fit1 = map(data, ~ lm(HCP[,COL] ~ brainages + age + sex + EulerNumber, data = .)))
MODELS <- MODELS %>% mutate(tidy = map(fit1, broom::tidy))
for (MOD in 1:dim(MODELS)[1]){
print(paste0(MODELS$models[MOD],"x",names(HCP)[COL]))
print(model_parameters(MODELS$fit1[[MOD]])) #print(lm.beta(MODELS$fit1[[MOD]]))
OUT[which(OUT$models==MODELS$models[MOD]),names(HCP)[COL]]<-round(model_parameters(MODELS$fit1[[MOD]])[2,6],digits=3)
}
}
OUT<-as.data.frame(names(PRISMA)[4:9]) ; names(OUT)[1]<-"models" #4:9 / 32:37 / 38:43
TEMP <-pivot_longer(PRISMA,cols=4:9,names_to="models",values_to="brainages")
TEMP_NEST <- TEMP %>% group_by(models) %>% nest
for (COL in grep("_RAW",names(PRISMA))[1:5]){
MODELS <- TEMP_NEST %>% mutate(fit1 = map(data, ~ lmer(PRISMA[,COL] ~ brainages + age + sex + EulerNumber + (1 | sub), data = .)))
MODELS <- MODELS %>% mutate(tidy = map(fit1, broom::tidy))
for (MOD in 1:dim(MODELS)[1]){
print(paste0(MODELS$models[MOD],"x",names(PRISMA)[COL]))
print(model_parameters(MODELS$fit1[[MOD]])) ; #print(lm.beta.lmer(MODELS$fit1[[MOD]]))
OUT[which(OUT$models==MODELS$models[MOD]),names(PRISMA)[COL]]<-round(model_parameters(MODELS$fit1[[MOD]])[2,6],digits=3)
}
}
######
### Create Scatterplot to Show the Relability of Brain Age Perdictions at an Subject Level
######
ROWS1 <- as.numeric(row.names(COMBINE[order(COMBINE[which(COMBINE$DATA=="PDS_TRIO"),"age"]),]))
ROWS2 <- as.numeric(row.names(COMBINE[order(COMBINE[which(COMBINE$DATA=="PDS_PRISMA"),"age"]),]))+dim(TRIO)[1]
ROWS3 <- as.numeric(row.names(COMBINE[order(COMBINE[which(COMBINE$DATA=="HCP_TEST"),"age"]),]))+(dim(TRIO)[1]+dim(PRISMA)[1])
COMBINE<-COMBINE[c(ROWS1,ROWS2,ROWS3),] ; COMBINE[,"ORDER"]<-1:nrow(COMBINE)
#Scatterplots of MAEs
TEMP <-pivot_longer(COMBINE,cols=38:43,names_to="MODELS",values_to="BRAINAGE") #4:9 - 32:37 - 38:43
TEMP$MODELS<-factor(gsub("_NORM","",gsub("_ERROR","", TEMP$MODELS)),levels=c("DBN","rDBN","tDBN","GTB","rGTB","tGTB"))
ggplot(TEMP, aes(x=ORDER, y=BRAINAGE, color=MODELS)) +
geom_rect(aes(xmin=0,xmax=432,ymin=-Inf,ymax=Inf),alpha=1,fill="#70a078") +
geom_rect(aes(xmin=433,xmax=713,ymin=-Inf,ymax=Inf),alpha=1,fill="#70a6f8") +
geom_rect(aes(xmin=713,xmax=1106,ymin=-Inf,ymax=Inf),alpha=1,fill="#9d6953") +
geom_hline(yintercept=0, color="#000000", size=1.5) + geom_point(size=1.75) + theme_classic() +
scale_color_manual(values=c("#0022ff","#00d9ff","#040080","#7c3f00","#db9e65","#472309"))
#Scatterplots of MAEs REDUCED
INDEX<-c((28*1:10),(28*1:10)+432,(35*1:10)+713)
TEMP <-pivot_longer(COMBINE,cols=38:43,names_to="MODELS",values_to="BRAINAGE") #4:9 - 32:37 - 38:43
TEMP$MODELS<-factor(gsub("_NORM","",gsub("_ERROR","",TEMP$MODELS)),levels=c("DBN","rDBN","tDBN","GTB","rGTB","tGTB"))
ggplot(TEMP, aes(x=ORDER, y=BRAINAGE, color=MODELS)) +
geom_rect(aes(xmin=0,xmax=432,ymin=-Inf,ymax=Inf),alpha=1,fill="#70a078") +
geom_rect(aes(xmin=433,xmax=713,ymin=-Inf,ymax=Inf),alpha=1,fill="#70a6f8") +
geom_rect(aes(xmin=713,xmax=1106,ymin=-Inf,ymax=Inf),alpha=1,fill="#9d6953") +
geom_hline(yintercept=0, color="#000000", size=2.5) + geom_point(size=1.75) + theme_classic() +
scale_color_manual(values=c("#0022ff","#00d9ff","#040080","#7c3f00","#db9e65","#472309"))
#Histogram of Reliability by Models
HIST_RAW <-pivot_longer(COMBINE,cols=c(59,61),names_to="MODELS",values_to="DEVS")
MEANS1<-ddply(COMBINE,"DATA", summarize, mean=mean(GTB_SD_RAW), model="GTB")
MEANS2<-ddply(COMBINE,"DATA", summarize, mean=mean(DBN_SD_RAW), model="DBN")
MEAN<-rbind(MEANS1,MEANS2)
ggplot(HIST_RAW, aes(x=DEVS, color=MODELS, fill=MODELS)) +
geom_histogram(aes(y=..count..), position="identity", alpha=0.70) +
geom_vline(data=MEAN, aes(xintercept=mean, color=model),linetype="longdash",size=1.5) +
scale_color_manual(values=c("#0022ff","#0022ff","#7c3f00","#7c3f00")) +
scale_fill_manual(values=c("#0022ff","#7c3f00")) +
theme_classic() + facet_wrap(~DATA,scale="free")
HIST_NORM <-pivot_longer(COMBINE,cols=c(60,62),names_to="MODELS",values_to="DEVS")
MEANS1<-ddply(COMBINE,"DATA", summarize, mean=mean(GTB_SD_NORM), model="GTB")
MEANS2<-ddply(COMBINE,"DATA", summarize, mean=mean(DBN_SD_NORM), model="DBN")
MEAN<-rbind(MEANS1,MEANS2)
ggplot(HIST_NORM, aes(x=DEVS, color=MODELS, fill=MODELS)) +
geom_histogram(aes(y=..count..), position="identity", alpha=0.70) +
geom_vline(data=MEAN, aes(xintercept=mean, color=model),linetype="longdash",size=1.5) +
scale_color_manual(values=c("#0022ff","#0022ff","#7c3f00","#7c3f00")) +
scale_fill_manual(values=c("#0022ff","#7c3f00")) +
theme_classic() + facet_wrap(~DATA,scale="free")
#Histogram of Reliability by Test Sample
DATAxERRORxNORM<-ddply(COMBINE,"DATA", summarize, error=mean(GTB_aSD_NORM))
ggplot(COMBINE, aes(x=GTB_aSD_NORM, color=DATA, fill=DATA)) +
geom_histogram(aes(y=..count..), position="identity", alpha=0.70) +
geom_vline(data=DATAxERRORxNORM, aes(xintercept=error, color=DATA),linetype="longdash",size=3.5) +
scale_color_manual(values=c("#007A39","#0080FE","#A0522D")) +
scale_fill_manual(values=c("#007A39","#0080FE","#A0522D")) +
theme_classic()
###Get Model Parameters Regarding Individual Differences in Relability
model_parameters(lmer(BRAINAGE_SD_NORM ~ age + sex + EulerNumber + (1 | sub),data=PRISMA))
model_parameters(lmer(BRAINAGE_SD_NORM ~ age + sex + EulerNumber + (1 | sub),data=TRIO))
model_parameters(lm(BRAINAGE_SD_NORM ~ age + sex + EulerNumber + numNavs_T1w, data=HCP))
LONG <-pivot_longer(COMBINE,cols=grep("_SD_NORM",names(COMBINE))[2:3],names_to="MODELS",values_to="DEVS")
ggplot(LONG, aes(x=age,y=DEVS,group=DATA,color=DATA,shape=DATA)) +
geom_point(size=1.5,alpha=0.7) + scale_shape_manual(values=c(16,17,15)) +
stat_smooth(method = "loess",se=F,size=3,alpha=1,fullrange=T) +
scale_color_manual(values=c("#007A39","#0080FE","#A0522D")) +
scale_fill_manual(values=c("#007A39","#0080FE","#A0522D")) +
theme_classic() + facet_wrap(~MODELS)
###Create Visuals of Reliability Differences in Age
NEURO_PATH<-"/Users/Jirsaraie/Box/Research/proj20-BrainAgeEval/audits"
LONG<-read.csv(paste0(NEURO_PATH,"/NEURO_Traj.csv"))
FINAL<-read.csv(paste0(NEURO_PATH,"/NEURO_Grp.csv"))
ggplot(LONG, aes(x=age,y=Values, group=ROIs,color=FEATURE)) +
geom_smooth(method = "loess",se=F,size=0.2,alpha=1,fullrange=F) +
scale_color_manual(values=c("#0062ff","#f70505","#28b03f","#ffa600")) +
theme_classic() + facet_wrap(~DATA, scale="free")
#Variability Between Age Groups By Feature
ggplot(FINAL, aes(x=age_bin,y=DEV_MIN_COV,group=feature,color=FEATURE)) +
geom_line(size=0.6,alpha=0.5) + geom_point(size=0.7,alpha=0.6) +
geom_point(FINAL,mapping=aes(x=age_bin,y=MEAN_MIN_COV,color="#000000"),size=3,shape=19) +
scale_color_manual(values=c("#000000","#0062ff","#f70505","#28b03f","#ffa600")) +
theme_classic() + facet_wrap(~data,scale="free")
#Variability Between Age Groups By Feature Type
FINAL$age_bin<-factor(FINAL$age_bin,levels=c("YOUNG","MID","OLD"))
ggplot(FINAL, aes(age_bin, DEV_RAW_COV)) +
geom_jitter(width=0.4, alpha=1,size=0.5,aes(colour = FEATURE)) +
geom_point(FINAL,mapping=aes(x=age_bin, y=MEAN_RAW_COV),size=3,shape=19) +
scale_color_manual(values=c("#0062ff","#f70505","#28b03f","#ffa600")) +
facet_wrap(~data,scale="free") + theme_classic() + ylim(0.14,0.20)
#Variability Between Age Groups Stats
anova(lmer(DEV_MIN_COV~age_bin + (1|feature),PDSTrio))
anova(lmer(DEV_MIN_COV~age_bin + (1|feature),PDSPrisma))
anova(lmer(DEV_MIN_COV~age_bin + (1|feature),HCPTest))
########⚡⚡⚡⚡⚡⚡#################################⚡⚡⚡⚡⚡⚡################################⚡⚡⚡⚡⚡⚡#######
#### ⚡ ⚡ ⚡ ⚡ ⚡ ⚡ ⚡ ⚡ ⚡ ⚡ ⚡ ⚡ ⚡ ⚡ ⚡ ⚡ ⚡ ⚡ ####
########⚡⚡⚡⚡⚡⚡#################################⚡⚡⚡⚡⚡⚡################################⚡⚡⚡⚡⚡⚡#######
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.