blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
78413bdd19962652ac9ada5185c395cf035ef268
|
7afbb148ec11b3105aaead6bdd900f847e49eb18
|
/tests/testthat/test-harmonic.R
|
9fc96c7b9549505a62129a7c55484cf71367f94e
|
[
"MIT"
] |
permissive
|
tidymodels/recipes
|
88135cc131b4ff538a670d956cf6622fa8440639
|
eb12d1818397ad8780fdfd13ea14d0839fbb44bd
|
refs/heads/main
| 2023-08-15T18:12:46.038289
| 2023-08-11T12:32:05
| 2023-08-11T12:32:05
| 76,614,863
| 383
| 123
|
NOASSERTION
| 2023-08-26T13:43:51
| 2016-12-16T02:40:24
|
R
|
UTF-8
|
R
| false
| false
| 13,426
|
r
|
test-harmonic.R
|
library(testthat)
library(recipes)
x_year <- seq(0, 86400 * 365 * 4, by = 31556926)
x_month_sidereal <- seq(0, 86400 * 7, by = 2360592)
x_month_synodic <- seq(0, 86400 * 7, by = 2551443)
x_month_average <- seq(0, 86400 * 365, by = 2629744)
x_week <- seq(0, 86400 * 365, by = 86400 * 7)
x_day <- seq(0, 86400 * 365, by = 86400)
x_hour <- seq(0, 86400 * 7, by = 3600)
x_minute <- seq(0, 86400, by = 60)
x_second <- 0:86400
test_that("harmonic error", {
harmonic_dat <- tibble(
osc = sin(2 * pi * x_second / (3600 * 6)),
time_var = x_second
)
# missing input
expect_snapshot(error = TRUE,
recipe(osc ~ time_var, data = harmonic_dat) %>%
step_harmonic(time_var,
frequency = 1,
cycle_size = NA
)
)
expect_snapshot(error = TRUE,
recipe(osc ~ time_var, data = harmonic_dat) %>%
step_harmonic(time_var,
frequency = 1,
starting_val = 0,
cycle_size = NA
)
)
expect_snapshot(error = TRUE,
recipe(osc ~ time_var, data = harmonic_dat) %>%
step_harmonic(time_var,
frequency = 1,
starting_val = 0,
cycle_size = "a"
)
)
# starting_val is numeric, Date or POSIXt
expect_snapshot(error = TRUE,
recipe(osc ~ time_var, data = harmonic_dat) %>%
step_harmonic(time_var,
frequency = 1,
starting_val = "a",
cycle_size = 86400
)
)
expect_snapshot(error = TRUE,
recipe(osc ~ time_var, data = harmonic_dat) %>%
step_harmonic(
time_var,
frequency = 1,
starting_val = factor("a"),
cycle_size = 86400
)
)
})
test_that("harmonic multiple variables", {
harmonic_dat_mult <- tibble(
osc = sin(2 * pi * x_second / (3600 * 6)),
time_var_1 = x_second,
time_var_2 = x_second * 2
)
rec <- recipe(osc ~ time_var_1 + time_var_2, data = harmonic_dat_mult) %>%
step_harmonic(time_var_1, time_var_2,
frequency = c(5, 10),
cycle_size = 1
) %>%
prep() %>%
bake(new_data = NULL)
expect_equal(rec$time_var_1_sin_1, rec$time_var_2_sin_2)
expect_equal(rec$time_var_1_cos_1, rec$time_var_2_cos_2)
})
test_that("harmonic frequencies", {
harmonic_dat <- tibble(
osc = sin(2 * pi * x_second / (3600 * 6)),
time_var = x_second
)
rec <- recipe(osc ~ time_var, data = harmonic_dat) %>%
step_harmonic(time_var,
frequency = c(1, 1.93, 2),
cycle_size = 86400
) %>%
prep() %>%
bake(new_data = NULL)
expect_equal(ncol(rec), 7)
})
test_that("harmonic phase", {
harmonic_dat_1 <- tibble(
osc = sin(2 * pi * x_second / 86400),
time_var = x_second
)
rec_1 <- recipe(osc ~ time_var, data = harmonic_dat_1) %>%
step_harmonic(time_var,
frequency = 1,
cycle_size = 86400
) %>%
prep() %>%
bake(new_data = NULL)
# different starting point
harmonic_dat_2 <- tibble(
osc = sin(2 * pi * (x_second + 43200) / 86400),
time_var = x_second
)
rec_2 <- recipe(osc ~ time_var, data = harmonic_dat_2) %>%
step_harmonic(time_var,
frequency = 1,
cycle_size = 86400
) %>%
prep() %>%
bake(new_data = NULL)
fit_1 <- lm(osc ~ time_var_sin_1 + time_var_cos_1 - 1, rec_1)
fit_2 <- lm(osc ~ time_var_sin_1 + time_var_cos_1 - 1, rec_2)
co_1 <- coefficients(fit_1)
co_2 <- coefficients(fit_2)
expect_equal(as.numeric(atan2(co_1[1], co_1[2]) - atan2(co_2[1], co_2[2])), pi,
ignore_attr = TRUE
)
# set reference (starting_val) at half period
rec_3 <- recipe(osc ~ time_var, data = harmonic_dat_2) %>%
step_harmonic(time_var,
frequency = 1,
starting_val = 43200,
cycle_size = 86400
) %>%
prep() %>%
bake(new_data = NULL)
fit_3 <- lm(osc ~ time_var_sin_1 + time_var_cos_1 - 1, rec_3)
co_3 <- coefficients(fit_3)
expect_equal(atan2(co_1[1], co_1[2]), atan2(co_3[1], co_3[2]))
})
test_that("harmonic model recovers amplitude", {
set.seed(123)
amplitude <- abs(rnorm(1))
harmonic_dat <- tibble(
osc = amplitude * sin(2 * pi * x_second / (3600 * 6)),
time_var = x_second
)
rec <- recipe(osc ~ time_var, data = harmonic_dat) %>%
step_harmonic(time_var,
frequency = 4,
cycle_size = 86400
) %>%
prep() %>%
bake(new_data = NULL)
fit <- lm(osc ~ time_var_sin_1 + time_var_cos_1 - 1, rec)
expect_equal(sqrt(sum(coefficients(fit)^2)), amplitude)
})
test_that("harmonic datetime, numeric and date columns", {
x_datetime <- as.POSIXct(x_second,
origin = "1970-01-01",
tz = "UTC"
)
harmonic_dat <- tibble(
osc = sin(2 * pi * as.numeric(x_second) / (3600 * 6)),
time_var_posixt = x_datetime,
time_var_int = x_second
)
rec_datetime <- recipe(osc ~ time_var_posixt, data = harmonic_dat) %>%
step_harmonic(time_var_posixt,
frequency = 4,
cycle_size = 86400
) %>%
prep() %>%
bake(new_data = NULL)
rec_numeric <- recipe(osc ~ time_var_int, data = harmonic_dat) %>%
step_harmonic(time_var_int,
frequency = 4,
cycle_size = 86400
) %>%
prep() %>%
bake(new_data = NULL)
expect_equal(rec_datetime[[3]],
rec_numeric[[3]],
ignore_attr = TRUE
)
x_date <- as.Date(x_second[1:366], origin = "1970-01-01") # one year
harmonic_dat <- tibble(
osc = sin(2 * pi * as.numeric(x_date)),
time_var_date = x_date
)
rec_date <- recipe(osc ~ time_var_date, data = harmonic_dat) %>%
step_harmonic(time_var_date,
frequency = 12,
cycle_size = 366
) %>%
prep() %>%
bake(new_data = NULL)
x_date_sec <- seq(0, 365, 1) * 86400 # one year
harmonic_dat <- tibble(
osc = sin(2 * pi * x_date_sec),
time_var_date_s = x_date_sec
)
rec_date_s <- recipe(osc ~ time_var_date_s, data = harmonic_dat) %>%
step_harmonic(time_var_date_s,
frequency = 12,
cycle_size = 86400 * 366
) %>%
prep() %>%
bake(new_data = NULL)
expect_equal(rec_date[[3]],
rec_date_s[[3]],
ignore_attr = TRUE
)
})
test_that("harmonic NA in term", {
harmonic_dat <- tibble(
osc = sin(2 * pi * as.numeric(x_second) / (3600 * 6)),
time_var = x_second
)
harmonic_dat[20, "time_var"] <- NA
harmonic_dat[3, "time_var"] <- NA
rec_na <- recipe(osc ~ time_var, data = harmonic_dat) %>%
step_harmonic(time_var,
frequency = 4,
cycle_size = 86400,
keep_original_cols = TRUE
) %>%
prep() %>%
bake(new_data = NULL)
expect_equal(sum(is.na(rec_na)), 2 * 3)
harmonic_dat <- tibble(
osc = sin(2 * pi * as.numeric(x_second) / (3600 * 6)),
time_var = NA_real_
)
expect_snapshot(error = TRUE,
recipe(osc ~ time_var, data = harmonic_dat) %>%
step_harmonic(time_var,
frequency = 4,
cycle_size = 86400
) %>%
prep() %>%
bake(new_data = NULL)
)
})
test_that("harmonic character in term", {
harmonic_dat <- tibble(
osc = sin(2 * pi * as.numeric(x_second) / (3600 * 6)),
time_var = "x_second"
)
expect_snapshot(error = TRUE,
recipe(osc ~ time_var, data = harmonic_dat) %>%
step_harmonic(time_var,
frequency = 4,
cycle_size = 86400
) %>%
prep() %>%
bake(new_data = NULL)
)
})
test_that("harmonic cycle_size length", {
harmonic_dat <- tibble(
osc = sin(2 * pi * as.numeric(x_second) / (3600 * 6)),
time_var_1 = x_second,
time_var_2 = x_second,
time_var_3 = x_second
)
expect_snapshot(error = TRUE,
recipe(osc ~ time_var_1 + time_var_2 + time_var_3, data = harmonic_dat) %>%
step_harmonic(time_var_1, time_var_2, time_var_3,
frequency = 4,
cycle_size = c(86400, 86400)
) %>%
prep()
)
})
test_that("harmonic starting_val length", {
harmonic_dat <- tibble(
osc = sin(2 * pi * as.numeric(x_second) / (3600 * 6)),
time_var_1 = x_second,
time_var_2 = x_second,
time_var_3 = x_second
)
expect_snapshot(error = TRUE,
recipe(osc ~ time_var_1 + time_var_2 + time_var_3, data = harmonic_dat) %>%
step_harmonic(time_var_1, time_var_2, time_var_3,
frequency = 4,
starting_val = c(86400, 86400),
cycle_size = 86400
) %>%
prep()
)
})
test_that("harmonic check tidy starting value", {
harmonic_dat <- tibble(
osc = sin(2 * pi * as.numeric(x_second) / (3600 * 6)),
time_var = x_second
)
tidy_starting <- recipe(osc ~ time_var, data = harmonic_dat) %>%
step_harmonic(time_var,
frequency = 4,
cycle_size = 86400
) %>%
prep() %>%
tidy(number = 1)
expect_equal(tidy_starting$starting_val[[1]], 0,
ignore_attr = TRUE
)
tidy_starting <- recipe(osc ~ time_var, data = harmonic_dat) %>%
step_harmonic(time_var,
frequency = 4,
starting_val = 10,
cycle_size = 86400
) %>%
prep() %>%
tidy(number = 1)
expect_equal(tidy_starting$starting_val[[1]], 10,
ignore_attr = TRUE
)
x_datetime <- as.POSIXct(x_second,
origin = "1990-01-01 01:02:23",
tz = "UTC"
)
harmonic_dat <- tibble(
osc = sin(2 * pi * as.numeric(x_datetime) / (3600 * 6)),
time_var_posixt = x_datetime
)
tidy_starting <- recipe(osc ~ time_var_posixt, data = harmonic_dat) %>%
step_harmonic(time_var_posixt,
frequency = 4,
starting_val = as.POSIXct(100, origin = "1970-01-01"),
cycle_size = 86400
) %>%
prep() %>%
tidy(number = 1)
expect_equal(tidy_starting$starting_val[[1]],
100,
ignore_attr = TRUE
)
})
test_that("check_name() is used", {
dat <- mtcars
dat$mpg_sin_1 <- dat$mpg
rec <- recipe(~., data = dat) |>
step_harmonic(mpg, frequency = 3, cycle_size = 2.5)
expect_snapshot(
error = TRUE,
prep(rec, training = dat)
)
})
test_that("tunable", {
rec <-
recipe(~., data = iris) %>%
step_harmonic(all_predictors(), cycle_size = 1)
rec_param <- tunable.step_harmonic(rec$steps[[1]])
expect_equal(rec_param$name, c("frequency"))
expect_true(all(rec_param$source == "recipe"))
expect_true(is.list(rec_param$call_info))
expect_equal(nrow(rec_param), 1)
expect_equal(
names(rec_param),
c("name", "call_info", "source", "component", "component_id")
)
})
# Infrastructure ---------------------------------------------------------------
test_that("bake method errors when needed non-standard role columns are missing", {
harmonic_dat_mult <- tibble(
osc = sin(2 * pi * x_second / (3600 * 6)),
time_var_1 = x_second,
time_var_2 = x_second * 2
)
rec <- recipe(osc ~ time_var_1 + time_var_2, data = harmonic_dat_mult) %>%
step_harmonic(time_var_1, time_var_2,
frequency = c(5, 10),
cycle_size = 1
) %>%
update_role(time_var_1, time_var_2, new_role = "potato") %>%
update_role_requirements(role = "potato", bake = FALSE) %>%
prep()
expect_error(bake(rec, new_data = harmonic_dat_mult[, 1:2]),
class = "new_data_missing_column")
})
test_that("empty printing", {
rec <- recipe(mpg ~ ., mtcars)
rec <- step_harmonic(rec, frequency = 1 / 11, cycle_size = 1)
expect_snapshot(rec)
rec <- prep(rec, mtcars)
expect_snapshot(rec)
})
test_that("empty selection prep/bake is a no-op", {
rec1 <- recipe(mpg ~ ., mtcars)
rec2 <- step_harmonic(rec1, frequency = 1 / 11, cycle_size = 1)
rec1 <- prep(rec1, mtcars)
rec2 <- prep(rec2, mtcars)
baked1 <- bake(rec1, mtcars)
baked2 <- bake(rec2, mtcars)
expect_identical(baked1, baked2)
})
test_that("empty selection tidy method works", {
rec <- recipe(mpg ~ ., mtcars)
rec <- step_harmonic(rec, frequency = 1 / 11, cycle_size = 1)
expect <- tibble(
terms = character(),
starting_val = double(),
cycle_size = double(),
frequency = double(),
key = character(),
id = character()
)
expect_identical(tidy(rec, number = 1), expect)
rec <- prep(rec, mtcars)
expect_identical(tidy(rec, number = 1), expect)
})
test_that("keep_original_cols works", {
new_names <- c("mpg_sin_1", "mpg_cos_1")
rec <- recipe(~ mpg, mtcars) %>%
step_harmonic(all_predictors(), frequency = 3, cycle_size = 2.5,
keep_original_cols = FALSE)
rec <- prep(rec)
res <- bake(rec, new_data = NULL)
expect_equal(
colnames(res),
new_names
)
rec <- recipe(~ mpg, mtcars) %>%
step_harmonic(all_predictors(), frequency = 3, cycle_size = 2.5,
keep_original_cols = TRUE)
rec <- prep(rec)
res <- bake(rec, new_data = NULL)
expect_equal(
colnames(res),
c("mpg", new_names)
)
})
test_that("keep_original_cols - can prep recipes with it missing", {
rec <- recipe(~ mpg, mtcars) %>%
step_harmonic(all_predictors(), frequency = 3, cycle_size = 2.5)
rec$steps[[1]]$keep_original_cols <- NULL
expect_snapshot(
rec <- prep(rec)
)
expect_error(
bake(rec, new_data = mtcars),
NA
)
})
test_that("printing", {
rec <- recipe(mpg ~ ., mtcars) %>%
step_harmonic(hp, frequency = 1 / 11, cycle_size = 1)
expect_snapshot(print(rec))
expect_snapshot(prep(rec))
})
test_that("tunable is setup to work with extract_parameter_set_dials", {
skip_if_not_installed("dials")
rec <- recipe(~., data = mtcars) %>%
step_harmonic(
all_predictors(),
cycle_size = 1,
frequency = hardhat::tune()
)
params <- extract_parameter_set_dials(rec)
expect_s3_class(params, "parameters")
expect_identical(nrow(params), 1L)
})
|
0732964d1008030187881544874e9fc30846041c
|
c46697e3649cee2777f699294c7f3559e058e6b0
|
/R/UnblockWorker.R
|
46b2528c04d687a4954449046a2f4dd349b01e02
|
[] |
no_license
|
cran/MTurkR
|
22fe2aca00d64288a99687fc099dfefd10b3f30b
|
29e750e47b24068120faae531053b18467d50c4c
|
refs/heads/master
| 2021-01-13T17:25:27.346534
| 2017-01-23T05:35:43
| 2017-01-23T05:35:43
| 17,680,794
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,761
|
r
|
UnblockWorker.R
|
UnblockWorker <-
UnblockWorkers <-
unblock <-
function (workers, reasons = NULL, verbose = getOption('MTurkR.verbose', TRUE), ...){
operation <- "UnblockWorker"
if (is.factor(workers)) {
workers <- as.character(workers)
}
if (length(workers) > 1) {
if (!is.null(reasons)) {
if (is.factor(reasons)) {
reasons <- as.character(reasons)
}
if (length(reasons) == 1) {
reasons <- rep(reasons, length(workers))
} else if (!length(workers) == length(reasons)) {
stop("length(reason) must equal length(workers) or 1")
}
}
}
Workers <- emptydf(length(workers), 3, c("WorkerId", "Reason", "Valid"))
for (i in 1:length(workers)) {
GETparameters <- paste("&WorkerId=", workers[i], sep = "")
if (!is.null(reasons[i])) {
GETparameters <- paste(GETparameters, "&Reason=", curl_escape(reasons[i]), sep = "")
}
request <- request(operation, GETparameters = GETparameters, ...)
if (is.null(request$valid)) {
return(request)
}
if (request$valid) {
if (verbose) {
message(i, ": Worker ", workers[i], " Unblocked")
}
if (is.null(reasons)) {
Workers[i, ] = c(workers[i], NA_character_, request$valid)
} else {
Workers[i, ] = c(workers[i], reasons[i], request$valid)
}
} else if (!request$valid & verbose) {
warning(i, ": Invalid Request for worker ", workers[i])
}
}
Workers$Valid <- factor(Workers$Valid, levels=c('TRUE','FALSE'))
return(Workers)
}
|
3350ddb4f23e23da0959444e87c0d6270c03ad3f
|
41bbdd673bef8e9f1fdc724556591f4607b220e6
|
/pascal/man/killfactors.Rd
|
9d6e0ee6a96c0048821012dcc32f48690b9b39ae
|
[] |
no_license
|
pascal-niklaus/pascal
|
bd42a5f00fd06d9b9f8b4e6227419943a817b409
|
88723458c71609a8971925d363671910a4fa421c
|
refs/heads/master
| 2023-06-27T03:37:37.156306
| 2023-06-14T13:35:16
| 2023-06-14T13:35:16
| 27,164,003
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,182
|
rd
|
killfactors.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/killfactors.R
\name{killfactors}
\alias{killfactors}
\alias{restorefactors}
\title{Remove or add factor property of data.frame columns}
\usage{
killfactors(d, restore.numerics = FALSE, cols = seq_along(names(d)))
restorefactors(d, cols = seq_along(names(d)))
}
\arguments{
\item{d}{Source data frame containing the data set to aggregate}
\item{restore.numerics}{Logical determining whether numeric columns
are converted to numeric instead of the default character type
(default FALSE). A column is considered numeric when all values
except NA can be converted to valid numeric data including NaN
and Inf.}
\item{cols}{Vector determining which columns should be inspected
(default: all). Columns can be specified by index or name.}
}
\description{
\code{killfactors} converts factors in data frames to regular
numeric of character vectors. R automatically converts strings to
factors when data frames are created. This is not always desired,
and \code{killFactors} will ``undo'' this behaviour.
}
\details{
\code{restorefactors} does the opposite: it converts character
columns into factors. Alternatively, columns may be specified by
name or index, and these are turned into factors regardless of
their type.
}
\examples{
d <- data.frame(c1=LETTERS[1:5],c2=factor(1:5),c3=1:5)
str(d)
## 'data.frame': 5 obs. of 3 variables:
## $ c1: Factor w/ 5 levels "A","B","C","D",..: 1 2 3 4 5
## $ c2: Factor w/ 5 levels "1","2","3","4",..: 1 2 3 4 5
## $ c3: int 1 2 3 4 5
d2 <- killfactors(d)
str(d2)
## 'data.frame': 5 obs. of 3 variables:
## $ c1: chr "A" "B" "C" "D" ...
## $ c2: chr "1" "2" "3" "4" ...
## $ c3: int 1 2 3 4 5
str(killfactors(d, restore.numerics = TRUE))
## 'data.frame': 5 obs. of 3 variables:
## $ c1: chr "A" "B" "C" "D" ...
## $ c2: num 1 2 3 4 5
## $ c3: int 1 2 3 4 5
str(restorefactors(d2, cols=c("c1","c3")))
## 'data.frame': 5 obs. of 3 variables:
## $ c1: Factor w/ 5 levels "A","B","C","D",..: 1 2 3 4 5
## $ c2: chr "1" "2" "3" "4" ...
## $ c3: Factor w/ 5 levels "A","B","C","D",..: 1 2 3 4 5
}
\author{
Pascal Niklaus \email{pascal.niklaus@ieu.uzh.ch}
}
|
9f742d5980e38ac2ade6cb79a37aec89e28ef0e9
|
9aafde089eb3d8bba05aec912e61fbd9fb84bd49
|
/codeml_files/newick_trees_processed/4763_0/rinput.R
|
c5a8fbf88cb2afa8192c616f11af56447412e3c2
|
[] |
no_license
|
DaniBoo/cyanobacteria_project
|
6a816bb0ccf285842b61bfd3612c176f5877a1fb
|
be08ff723284b0c38f9c758d3e250c664bbfbf3b
|
refs/heads/master
| 2021-01-25T05:28:00.686474
| 2013-03-23T15:09:39
| 2013-03-23T15:09:39
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 135
|
r
|
rinput.R
|
library(ape)
testtree <- read.tree("4763_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4763_0_unrooted.txt")
|
d7b0149bb7b61b0795e820fd3678ba862a97a4c1
|
8d76d039cba8c977c81b3495fb3934d3287ae175
|
/CP03_02_markdown_small.R
|
2efa11b5599e14e735aee6b071bafd32583f8247
|
[] |
no_license
|
rubiera/RCapstone_Week2Writeup
|
ff45b0c20ceb025f7e4a62eb668c30eb5115ff67
|
9ac3470b9fb729717bd02438cdcd81d526526fe0
|
refs/heads/master
| 2020-12-02T18:40:21.296539
| 2020-01-06T18:50:58
| 2020-01-06T18:50:58
| 231,083,542
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,289
|
r
|
CP03_02_markdown_small.R
|
library(textreadr)
library(tm)
library(caret)
library(tidyverse)
library(RWeka)
library(knitr)
library(quanteda)
en_blogs <-
readLines("./../course-data/en_US/en_US.blogs.txt",skipNul = TRUE,warn=FALSE)
en_news <-
readLines("./../course-data/en_US/en_US.news.txt",skipNul = TRUE,warn=FALSE)
en_twitter <-
readLines("./../course-data/en_US/en_US.twitter.txt",skipNul = TRUE,warn=FALSE)
str(en_blogs)
#chr [1:899288]
str(en_news)
#chr [1:77259]
str(en_twitter)
#chr [1:2360148]
en_blogs <- iconv(en_blogs, "latin1", "ASCII", sub="")
en_news <- iconv(en_news, "latin1", "ASCII", sub="")
en_twitter <- iconv(en_twitter, "latin1", "ASCII", sub="")
skim_blogs_small <- en_blogs[1:100]
skim_news_small <- en_news[1:100]
skim_twitter_small <- en_twitter[1:100]
### corpora 1000
corpus_blogs_01 <- VCorpus(VectorSource(skim_blogs_small))
###### blogs large (don't do any other large)
myCorpus_blogs_01 = tm_map(corpus_blogs_01, content_transformer(tolower))
myCorpus_blogs_01 = tm_map(myCorpus_blogs_01, removePunctuation)
myCorpus_blogs_01 = tm_map(myCorpus_blogs_01, removeNumbers)
myCorpus_blogs_01 = tm_map(myCorpus_blogs_01, stemDocument)
myCorpus_blogs_01 = tm_map(myCorpus_blogs_01,
removeWords,c(stopwords(source = "smart"),"english"))
myDTM_blogs_01 = DocumentTermMatrix(myCorpus_blogs_01,
control = list(minWordLength = 1))
inspect(myDTM_blogs_01)
#plotting
myTDM_blogs_01 = TermDocumentMatrix(myCorpus_blogs_01,
control = list(minWordLength = 1))
inspect(myTDM_blogs_01)
findFreqTerms(myTDM_blogs_01,5)
# blogs 2 grams
BigramTokenizer_2gram <- function(x) NGramTokenizer(x, Weka_control(min = 2, max = 2))
txtTdmBi_blogs_2gram <- TermDocumentMatrix(myCorpus_blogs_01,
control = list(tokenize = BigramTokenizer_2gram))
inspect(txtTdmBi_blogs_2gram)
findFreqTerms(txtTdmBi_blogs_2gram,2)
#findAssocs(txtTdmBi_blogs_2gram, "high school", 0.5)
# blogs 3 grams
BigramTokenizer_3gram<- function(x) NGramTokenizer(x, Weka_control(min = 3, max = 3))
txtTdmBi_blogs_3gram<- TermDocumentMatrix(myCorpus_blogs_01,
control = list(tokenize = BigramTokenizer_3gram))
inspect(txtTdmBi_blogs_3gram)
findFreqTerms(txtTdmBi_blogs_3gram,1)[1:20]
#findAssocs(txtTdmBi_blogs_3gram, "cricket world cup", 0.5)
#I need a much smaller corpus to generate any plots....
matrix_myTDM_blogs_01 <- as.matrix(myTDM_blogs_01)
str(matrix_myTDM_blogs_01)
dimnames(matrix_myTDM_blogs_01)$Terms[1:20]
matrixsums_myTDM_blogs_01 <- sort(rowSums(matrix_myTDM_blogs_01),decreasing=TRUE)
matrixDF_myTDM_blogs_01 <- data.frame(word = names(matrixsums_myTDM_blogs_01),
freq=matrixsums_myTDM_blogs_01)
filtermatrixDF_myTDM_blogs_01 <- filter(matrixDF_myTDM_blogs_01, freq >= 5)
filtermatrixDF_myTDM_blogs_01 %>%
mutate(word = reorder(word, freq)) %>%
ggplot(aes(word, freq)) +
geom_col() +
xlab(NULL) +
coord_flip()
kable(filtermatrixDF_myTDM_blogs_01)
matrixDF_myTDM_blogs_01$word[1:40]
##########################
#end of medium EDA
##########################
|
68ab71b5000c1d0c6541f2fd999732d553220840
|
654b3ecea617c0aa0c9cc7188e56ecee9afdbc12
|
/R/get_weather.R
|
b9ce6c119ebd64a8c139e46c5129a7273575ac98
|
[
"MIT"
] |
permissive
|
UBC-MDS/weatheR
|
2685a3fea9ecb1b6bc86ea2340512c60755f046e
|
f81bd0fa6398b51093952ef6ee5e9c61e5769f00
|
refs/heads/master
| 2020-03-08T06:32:29.809433
| 2018-04-13T00:10:16
| 2018-04-13T00:10:16
| 127,974,282
| 0
| 5
| null | 2018-04-13T00:10:17
| 2018-04-03T22:09:48
|
R
|
UTF-8
|
R
| false
| false
| 1,545
|
r
|
get_weather.R
|
# get_weather(city)
#
# Returen a picture of temperature and humidity of a city
#
# For example: get_weather("London,UK")
#
#
get_weather <- function(city,api){
require(httr)
#require(ggplot2)
require(dplyr)
require(tidyr)
#use public api to grab data
url <- GET(paste0("api.openweathermap.org/data/2.5/forecast?q=",city,"&appid=",api))
if (status_code(url) == 404){
stop("wrong city name")
}
else if (status_code(url) == 401){
stop("wrong API")
}
data <- content(url)
# load time
time <- rep(0,8)
for (i in seq(1,8)){
time[i] <- substr(strsplit(data$list[[i]]$dt_txt," ")[[1]][2],1,5)
}
# load temperature
temp <- rep(0,8)
for (i in seq(1,8)){
temp[i] <- data$list[[i]]$main$temp-273.15
}
temp <- round(temp,digits = 1)
# load humidity
humidity <- rep(0,8)
for (i in seq(1,8)){
humidity[i] <- data$list[[i]]$main$humidity*0.1
}
humidity
#create a data frame of every variables
dataframe <- data.frame(time,temp,humidity)
# create pictures
#p <- ggplot(dataframe,aes(x = time))+
# geom_point(aes(y = temp),color = "blue",size =2)+
# geom_text(aes(y=temp, label = paste0(temp,"C")), vjust=2)+
# geom_line(aes(y = temp, group = 1), color = "blue")+
# geom_bar(aes(y=humidity),stat = "identity",alpha = 0.5)+
# geom_text(aes(y=humidity, label = humidity*10), vjust=2)+
# scale_y_continuous(sec.axis = sec_axis(~.*5, name = "Humidity"))+
# labs(x = "Time", y = "Temperature")+
# ggtitle(paste0("Weather of ",city))
return(dataframe)
}
|
499d5d792ef3ce389b5f3d9aa20c3c57618fb9ab
|
5f7be819ef6bb5c31311b7bc63b1120f0bf1ce81
|
/load-data.R
|
58ecc007ca5345e177617c0b94931510b88b9528
|
[] |
no_license
|
satahippy/coursera-exdata-project2
|
e443ff5e4eb043a1f9d962dd71bf586a3ee493d8
|
10c879b8dffd8d680551a1b44cdcfa5f7ce4f79c
|
refs/heads/master
| 2021-01-10T04:03:49.282184
| 2015-11-22T16:28:43
| 2015-11-22T16:28:43
| 46,672,033
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 321
|
r
|
load-data.R
|
if (!dir.exists('data')) {
dir.create('data')
}
src = "data/data.zip"
if (!file.exists(src)) {
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip", src)
unzip(src, exdir = 'data')
}
NEI = readRDS("data/summarySCC_PM25.rds")
SCC = readRDS("data/Source_Classification_Code.rds")
|
8c100ab055f5777de3cac826d4d711be4bcae0b7
|
81be3ddb554835d0b1c57361ad1a953d8b704adf
|
/surya_etal_front_virol_data_s2_sarbecovirus/sarbecovirus_code_r_detect_outliers.r
|
663a4e371a3d8deebf6ee03419288fb109d1e0e3
|
[] |
no_license
|
ChrisOrgan/Covid_Analysis
|
046dc8b92401e46d9c9a3877ec912f7a0fffbcf8
|
f3f50591ca414931b65dcf98188c8213f63c79ea
|
refs/heads/master
| 2023-04-18T13:20:27.733339
| 2023-03-06T20:30:09
| 2023-03-06T20:30:09
| 256,229,889
| 0
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 650
|
r
|
sarbecovirus_code_r_detect_outliers.r
|
# Written by Kevin Surya.
# This code is part of the coronavirus-evolution project.
library(MASS)
# Read data ----
dat <- read.table(
"sarbecovirus_data.txt",
sep = "\t",
header = TRUE,
row.names = 1
)
dat <- dat[, -4]
# Detect outliers ----
output75 <- cov.mcd(dat, quantile.used = nrow(dat) * 0.75)
mhmcd75 <- mahalanobis(dat, output75$center, output75$cov)
alpha <- 0.01
cutoff <- qchisq(p = 1 - alpha, df = 3)
outliers <- names(which(mhmcd75 > cutoff))
# Write outliers to a text file ----
write.table(
outliers,
file = "sarbecovirus_data_outliers.txt",
quote = FALSE,
sep = "\n",
row.names = FALSE,
col.names = FALSE
)
|
7a8873d3d39613bc359c1269e1ea100212255def
|
2daf15129d00829346a1ceb12e3fa28d555aeb33
|
/taxi_questions.R
|
5dbb0fddb5d632ae9e6d9492e4c746bb0ae5994f
|
[] |
no_license
|
mavaladezt/NYC_Taxi_Profit_Estimator
|
185b5c0a64a99a63ea7a9604eaab89eaf9ff85a3
|
1b0b522cf093e1ede728e3a0c293ad8b606fd6e7
|
refs/heads/master
| 2020-08-09T09:42:21.316451
| 2019-11-02T16:44:57
| 2019-11-02T16:44:57
| 214,060,629
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,617
|
r
|
taxi_questions.R
|
library(readr)
library(data.table)
library(pryr)
library(dplyr)
library(ggplot2)
library(ggthemes)
#==============================================================================
#DF_OVERVIEW ####
setwd("/Users/mavt/Dropbox/School/NYCDataScience/Projects/Shiny/")
getwd()
#df <- fread("trips_2019_01_to_06.csv")
multiplier=365/181
df <- read_csv("trips_2019_01_to_06.csv",
col_types = cols(year = col_skip(),
month = col_skip(),
payment_type= col_skip()))
df %>%
group_by(O_borough,O_Zone,D_borough,D_Zone) %>%
summarize(passengers=sum(passengers),distance=sum(distance),amount_fare=sum(amount_fare),amount_extra=sum(amount_extra),amount_mta=sum(amount_mta),amount_tip=sum(amount_tip),amount_tolls=sum(amount_tolls),amount_improvement=sum(amount_improvement),amount_total=sum(amount_total),duration=sum(duration),trips=sum(trips)) ->
df_total
df_total %>% mutate(passengers=(passengers*multiplier),distance=(distance*multiplier),amount_fare=(amount_fare*multiplier),amount_extra=(amount_extra*multiplier),amount_mta=(amount_mta*multiplier),amount_tip=(amount_tip*multiplier),amount_tolls=(amount_tolls*multiplier),amount_improvement=(amount_improvement*multiplier),amount_total=(amount_total*multiplier),duration=(duration*multiplier),trips=(trips*multiplier)) ->
df_total
object_size(df_total)
#Mon 25 51
#Tue 26 52
#Wed 26 52
#Thu 26 52
#Fri 26 52
#Sat 26 52
#Sun 26 51
#==============================================================================
#FILE PROCESSING ####
df_small = df[sample(1:nrow(df),size = 10000,replace=F),]
#General Dataframe
df %>%
group_by(O_borough,O_Zone,D_borough,D_Zone,wday,range_hrs) %>%
summarize(passengers=sum(passengers),distance=sum(distance),amount_fare=sum(amount_fare),amount_extra=sum(amount_extra),amount_mta=sum(amount_mta),amount_tip=sum(amount_tip),amount_tolls=sum(amount_tolls),amount_improvement=sum(amount_improvement),amount_total=sum(amount_total),duration=sum(duration),trips=sum(trips)) ->
df
#%>% mutate(elapsed_days=ifelse(wday==1,25,26)) -> df
#Daily Dataframe
df %>% mutate(passengers=(passengers*multiplier),distance=(distance*multiplier),amount_fare=(amount_fare*multiplier),amount_extra=(amount_extra*multiplier),amount_mta=(amount_mta*multiplier),amount_tip=(amount_tip*multiplier),amount_tolls=(amount_tolls*multiplier),amount_improvement=(amount_improvement*multiplier),amount_total=(amount_total*multiplier),duration=(duration*multiplier),trips=(trips*multiplier)) ->
df_summary
#nrow(df_daily)
#object_size(df_daily)
df_summary %>%
# filter(O_Zone=='Upper West Side South',D_Zone=='JFK Airport') %>%
group_by(wday,range_hrs) %>%
summarize(passengers=sum(passengers),distance=sum(distance),amount_fare=sum(amount_fare),amount_extra=sum(amount_extra),amount_mta=sum(amount_mta),amount_tip=sum(amount_tip),amount_tolls=sum(amount_tolls),amount_improvement=sum(amount_improvement),amount_total=sum(amount_total),duration=sum(duration),trips=sum(trips)) ->
df_overview
#object_size(df_daily_overview)
#nrow(df_daily_overview)
fwrite(df_overview,"df_overview.csv")
#==============================================================================
tot_trips=sum(df_overview$trips)
df_overview %>%
select(amount_fare,amount_extra,amount_mta,amount_tolls,amount_improvement,amount_tip,amount_total) %>%
rename(Fare=amount_fare,Extra=amount_extra,MTA=amount_mta,Tolls=amount_tolls,Improv=amount_improvement,Tip=amount_tip,Total=amount_total) %>%
summarize_all(sum) -> x
columns=colnames(x)
x %>% transpose() %>% cbind(columns) -> wat
wat$V1=wat$V1/tot_trips
colnames(wat)=c('Amount','desc')
wat$desc <- as.character(wat$desc)
wat$id <- seq_along(wat$Amount)
wat$type <- ifelse(wat$Amount>0,"in","out")
wat$type[wat$desc=='Total']='net'
wat$end <- cumsum(wat$Amount)
wat$end <- c(head(wat$end,-1),0)
wat$start <- c(0,head(wat$end,-1))
wat <- wat[,c(4,3,6,1,5,2)]
wat$type <- as.factor(wat$type)
xlabels=as.character(wat$desc)
voladora <- ggplot(wat,aes(x=desc,fill=type)) +
geom_rect(aes(xmin=id-.45,xmax=id+.45,ymin=end,ymax=start)) +
scale_x_discrete(labels=xlabels) +
ggtitle("Average NYC Yellow Taxi Trip") +
theme(plot.title = element_text(hjust=0.5))
voladora
str(voladora)
#ggplot(data = wat,aes(x =wat$Desc)) + geom_bar(aes(fill =type))
#==============================================================================
#==============================================================================
#DF_DATA ####
setwd("/Users/mavt/Dropbox/School/NYCDataScience/Projects/Shiny/taxis/")
getwd()
#df <- fread("trips_2019_01_to_06.csv")
multiplier=365/181
multiplier=1/181
df <- read_csv("trips_2019_01_to_06.csv",
col_types = cols(year = col_skip(),
month = col_skip(),
payment_type= col_skip()))
df %>%
group_by(O_borough,O_Zone,D_borough,D_Zone) %>%
summarize(passengers=sum(passengers),distance=sum(distance),amount_fare=sum(amount_fare),amount_extra=sum(amount_extra),amount_mta=sum(amount_mta),amount_tip=sum(amount_tip),amount_tolls=sum(amount_tolls),amount_improvement=sum(amount_improvement),amount_total=sum(amount_total),duration=sum(duration),trips=sum(trips)) %>%
mutate(passengers=(passengers*multiplier),distance=(distance*multiplier),amount_fare=(amount_fare*multiplier),amount_extra=(amount_extra*multiplier),amount_mta=(amount_mta*multiplier),amount_tip=(amount_tip*multiplier),amount_tolls=(amount_tolls*multiplier),amount_improvement=(amount_improvement*multiplier),amount_total=(amount_total*multiplier),duration=(duration*multiplier),trips=(trips*multiplier)) %>%
mutate(passengers=(passengers/trips),distance=(distance/trips),amount_fare=(amount_fare/trips),amount_extra=(amount_extra/trips),amount_mta=(amount_mta/trips),amount_tip=(amount_tip/trips),amount_tolls=(amount_tolls/trips),amount_improvement=(amount_improvement/trips),amount_total=(amount_total/trips),duration=(duration/trips)) %>%
filter(O_borough!="Unknown" & D_borough!="Unknown" & O_Zone!="NV" & D_Zone!="NV") %>%
rename('From Borough'=O_borough,Zone=O_Zone,'To Borough'=D_borough,"To Zone"=D_Zone,"Daily Passengers"=passengers,Distance=distance,Fare=amount_fare,Extra=amount_extra,MTA=amount_mta,Tip=amount_tip,Tolls=amount_tolls,Improv.=amount_improvement,Total=amount_total,"Avg.Mins"=duration,Trips=trips) %>%
select(-"Daily Passengers") %>%
arrange(-Trips) ->
df_data
object_size(df_data)
fwrite(df_data,"df_data.csv")
#==============================================================================
# HEATMAPS ####
df_overview[complete.cases(df_overview),] %>%
select(wday,range_hrs,trips) %>%
mutate(trips=(trips/365)) %>%
ggplot(aes(x = wday, y = range_hrs)) +
geom_tile(aes(fill = trips)) + scale_fill_gradient(low = "white", high = "black")
df_overview[complete.cases(df_overview),] %>%
select(wday,range_hrs,distance,duration) %>%
mutate(speed=(distance/(duration/60))) %>%
select(-distance,-duration) %>%
ggplot(aes(x = wday, y = range_hrs)) +
geom_tile(aes(fill = speed)) + scale_fill_gradient(low = "darkred", high = "white")
#==============================================================================
#FILE PROCESSING ####
#General Dataframe
df[complete.cases(df),c(1:6,8,16,17)] %>%
filter(O_borough!="Unknown" & D_borough!="Unknown" & O_Zone!="NV" & D_Zone!="NV") %>%
mutate(Origen=paste0(O_borough," - ",O_Zone),Destino=paste0(D_borough," - ",D_Zone)) %>%
select(-O_borough,-O_Zone,-D_borough,-D_Zone) %>%
group_by(Origen,Destino,wday,range_hrs) %>%
summarize(distance=sum(distance),duration=sum(duration),trips=sum(trips)) ->
df_heatmaps
object_size(df_heatmaps)
nrow(df_heatmaps)
fwrite(df_heatmaps,"df_heatmaps.csv")
#==============================================================================
library(RSQLite)
library(data.table)
setwd("/Users/mavt/Dropbox/School/NYCDataScience/Projects/Shiny/taxis/")
getwd()
csvpath = "./df_heatmaps.csv"
dbname = "./taxis.sqlite"
tblname = "df_heatmaps"
## read csv
data <- fread(input = csvpath,
sep = ",",
header = TRUE)
## connect to database
conn <- dbConnect(drv = SQLite(),
dbname = dbname)
## write table
dbWriteTable(conn = conn,
name = tblname,
value = data)
## list tables
dbListTables(conn)
## disconnect
dbDisconnect(conn)
#==============================================================================
|
2d373a345cff838ddbe746989e0d6234001b9e53
|
9d515dfeb17d7ff8a3f00d960c2c799dfccc12b3
|
/Getting and Cleaning Data/Assighnment W3/Merging data.R
|
8740017d40f4425a0de53cff2a137a6cba791823
|
[] |
no_license
|
rokbohinc86/2019.3.28.Data_Science-Specialization
|
af8e1c106f4ad6b6cb38a9c6aec2c1f09f0bd45c
|
066d2a4ec5b61edbf38ae643eac3c286ca9e0298
|
refs/heads/master
| 2020-08-05T14:17:39.837996
| 2019-10-03T12:34:17
| 2019-10-03T12:34:17
| 212,575,890
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,166
|
r
|
Merging data.R
|
if(!file.exists("./data")){dir.create("./data")}
fileUrl1 = "https://dl.dropboxusercontent.com/u/7710864/data/reviews-apr29.csv"
fileUrl2 = "https://dl.dropboxusercontent.com/u/7710864/data/solutions-apr29.csv"
download.file(fileUrl1,destfile="./data/reviews.csv",method="curl")
download.file(fileUrl2,destfile="./data/solutions.csv",method="curl")
reviews = read.csv("./data/reviews.csv"); solutions <- read.csv("./data/solutions.csv")
head(reviews,2)
#merge
mergedData = merge(reviews,solutions,by.x="solution_id",by.y="id",all=TRUE) # x and y are DF and you have to specify by which column you want to merge
head(mergedData)
# the default its to merge by all common column names
intersect(names(solutions),names(reviews))
mergedData2 = merge(reviews,solutions,all=TRUE)
head(mergedData2)
# use join in the plyr package
df1 = data.frame(id=sample(1:10),x=rnorm(10))
df2 = data.frame(id=sample(1:10),y=rnorm(10))
arrange(join(df1,df2),id)
#more data frames easy for plyr package
df1 = data.frame(id=sample(1:10),x=rnorm(10))
df2 = data.frame(id=sample(1:10),y=rnorm(10))
df3 = data.frame(id=sample(1:10),z=rnorm(10))
dfList = list(df1,df2,df3)
join_all(dfList)
|
182601d07f25009d5cb29568f512b8eb334b27f4
|
a3a90785fcf311da21ec1fb8a7b06e352909165d
|
/data-raw/Great_Merge_example_data.R
|
43635d7362acb98f74bf6128b42f0402c4f42166
|
[] |
no_license
|
pmartR/pmartRdata
|
e77223c1a26128c0f4516c2e54f6e8dae1c63aac
|
bb4729346c264b45deb310f2d99199cd09776051
|
refs/heads/master
| 2023-01-09T18:03:49.387617
| 2023-01-03T20:08:24
| 2023-01-03T20:08:24
| 69,269,999
| 5
| 8
| null | 2023-01-03T20:08:26
| 2016-09-26T16:32:28
|
R
|
UTF-8
|
R
| false
| false
| 3,388
|
r
|
Great_Merge_example_data.R
|
library (pmartR)
library (pmartRdata)
# Peptides (pepes) -------------------------------------------------------------
data(pep_edata)
data(pep_fdata)
data(pep_emeta)
pep_object <- as.pepData(
e_data = pep_edata,
f_data = pep_fdata,
e_meta = pep_emeta,
edata_cname = "Mass_Tag_ID",
fdata_cname = "SampleID",
emeta_cname = "Protein",
check.names = FALSE
)
usethis::use_data(pep_object, overwrite = TRUE)
# Techrep pepes ----------------------------------------------------------------
data(techrep_edata)
data(techrep_fdata)
techrep_pep_object = as.pepData(
e_data = techrep_edata,
f_data = techrep_fdata,
edata_cname = "Mass_Tag_ID",
fdata_cname = "RunID",
emeta_cname = "Protein",
techrep_cname = 'TECH_REP',
check.names = FALSE
)
usethis::use_data(techrep_pep_object, overwrite = TRUE)
# Proteins ---------------------------------------------------------------------
data(pro_edata)
data(pro_fdata)
pro_object <- as.proData(
e_data = pro_edata,
f_data = pro_fdata,
edata_cname = "Reference",
fdata_cname = "SampleID",
data_scale = "log2",
check.names = FALSE
)
# Data was normalized previously. Update the attributes to reflect this.
attr(pro_object, "data_info")$norm_info$is_normalized <- TRUE
attr(pro_object, "data_info")$norm_info$norm_fn <- "median"
usethis::use_data(pro_object, overwrite = TRUE)
# Metabolites ------------------------------------------------------------------
data("metab_edata")
data("metab_fdata")
metab_object <- as.metabData(
e_data = metab_edata,
f_data = metab_fdata,
edata_cname = "Metabolite",
fdata_cname = "SampleID",
check.names = FALSE
)
usethis::use_data(metab_object, overwrite = TRUE)
# Lipids -----------------------------------------------------------------------
data("lipid_edata")
data("lipid_fdata")
lipid_object <- as.lipidData(
e_data = lipid_edata,
f_data = lipid_fdata,
edata_cname = "LipidCommonName",
fdata_cname = "Sample_Name",
check.names = FALSE
)
usethis::use_data(lipid_object, overwrite = TRUE)
# Smoking mice -----------------------------------------------------------------
data("smoke_edata")
data("smoke_fdata")
data("smoke_emeta")
smoke_pep_object <- as.pepData(
e_data = smoke_edata,
f_data = smoke_fdata,
e_meta = smoke_emeta,
edata_cname = "Mass_Tag_ID",
fdata_cname = "SampleID",
emeta_cname = "Protein",
data_scale = "abundance",
check.names = FALSE
)
usethis::use_data(smoke_pep_object, overwrite = TRUE)
# Isobaric pepes ---------------------------------------------------------------
data("isobaric_edata")
data("isobaric_emeta")
data("isobaric_fdata")
isobaric_object <- as.isobaricpepData(
e_data = isobaric_edata,
f_data = isobaric_fdata,
e_meta = isobaric_emeta,
edata_cname = "Peptide",
fdata_cname = "Sample",
emeta_cname = "Protein",
check.names = FALSE
)
usethis::use_data(isobaric_object, overwrite = TRUE)
# NMR stuffs -------------------------------------------------------------------
data("nmr_edata_identified")
data("nmr_fdata_identified")
data("nmr_emeta_identified")
nmr_object_identified <- as.nmrData(
e_data = nmr_edata_identified,
f_data = nmr_fdata_identified,
e_meta = nmr_emeta_identified,
edata_cname = "Metabolite",
fdata_cname = "SampleID",
emeta_cname = "Metabolite",
check.names = FALSE
)
usethis::use_data(nmr_object_identified, overwrite = TRUE)
|
454569d6a6ed31d99daa74bbca2b3a17a1910cfb
|
f7f63838b028cc57a9824e575124b6f4ac0241a6
|
/app/assignments/residuals/assignment.R
|
42d2f1352fc431875b154175c976010438e3b631
|
[
"MIT"
] |
permissive
|
RobinEchoAlex/flexTeaching
|
3cfb01fb88b2c9dfdabdcfba4fd27e3f028ff542
|
033b8fb9959d05014d963429f1607a1147e810ca
|
refs/heads/master
| 2023-08-16T13:27:17.208387
| 2021-07-19T13:18:58
| 2021-07-19T13:18:58
| 378,942,363
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,516
|
r
|
assignment.R
|
assignment = read.csv('meta_info.csv', stringsAsFactors = FALSE)[1,"title"]
getData <- function(seed, secret, extra = ""){
set.seed.alpha(paste0(seed, secret, extra))
N = 150
z = random.set(N)
df = z$data
text = z$text
all.data = list(
data = df,
info = list(text = text)
)
return(all.data)
}
getAssignment <- function(seed, secret, assignmentDir = NULL, solutions = FALSE){
tmpfn = tempfile(fileext = ".html")
input = paste0(assignmentDir,"/assignment.Rmd")
rmarkdown::render(input = input, output_format = "html_fragment", output_file = tmpfn, envir = new.env())
return(tmpfn)
}
############ Helper functions
random.set = function(N = 150){
type = sample(c("linear","none","nonmonotone","nonlinear"), 1)
df = make.set(type = type, N = N)
if( type == "nonmonotone"){
correl = NA
text = "The relationship strong but it is not monotone. A correlation is not an appropriate statistic for characterizing this relationship."
}else if( type == "nonlinear"){
correl = round(cor(df$x, df$y, method = "kendall"),3)
text = paste0("The relationship strong but it is not linear.",
" A nonparametric correlation coefficient such as Kendall's $\\tau$ is appropriate statistic for characterizing this relationship.",
" Kendall's $\\tau=",correl,"$.")
}else{
correl = c(kendall=round(cor(df$x, df$y, method = "kendall"),3),
pearson=round(cor(df$x, df$y),3))
if(abs(correl[['pearson']])<.3){
text.str = "This linear correlation is weak."
}else if(abs(correl[['pearson']])<.5){
text.str = "This linear correlation is of medium strength."
}else if(abs(correl[['pearson']])<.8){
text.str = "This linear correlation is fairly strong."
}else{
text.str = "This linear correlation is very strong."
}
if(sign(correl[['pearson']])>0){
text.sign = "The relationship is positive."
}else{
text.sign = "The relationship is negative."
}
text = paste0(text.str," ", text.sign,
" Typically, a Pearson correlation would be used to characterize this relationship, but a nonparametric correlation might also be reported.",
" Pearson's $r=",correl[['pearson']],"$, and",
" Kendall's $\\tau=",correl[['kendall']],"$.")
}
list(data = df, type = type, text = text)
}
make.set = function(type="linear", N = 150){
switch(type,
linear = make.linear(N),
nonlinear = make.nonlinear(N),
nonmonotone = make.nonmonotone(N),
none = make.linear(N, TRUE))
}
make.nonlinear <- function(N){
x = runif(N,0,100)
x=sort(x)
x0 = (x - min(x))/sd(x)
y0 = exp(x0^1.2)
err.sd = diff(range(y0))/10
if(rbinom(1,1,.5)){
x=rev(x)
}else if(rbinom(1,1,.5)){
rnge = diff(range(y0))
y0 = min(y0) + rnge*(1 - (y0 - min(y0))/rnge)
}
y = 50 + y0 + rnorm(y0,0,err.sd)
return(data.frame(x=x,y=y))
}
make.nonmonotone <- function(N){
x = runif(N,0,100)
x0 = (x - mean(x))/sd(x)
y0 = x0^2
if(rbinom(1,1,.5)){
rnge = diff(range(y0))
y0 = min(y0) + rnge*(1 - (y0 - min(y0))/rnge)
}
err.sd = diff(range(y0))/10
y = 5*(y0 + rnorm(y0,0,err.sd))
return(data.frame(x=x,y=y))
}
make.linear <- function(N, no.cor=FALSE){
x = rnorm(N,50,15)
x0 = (x - mean(x))/sd(x)
slp = (rbeta(1, 2, 2)*2 - 1) * (1 - no.cor)
y0 = slp * x0
err.sd = ifelse(no.cor,10,diff(range(y0))/runif(1,0,10))
y = 5*(y0 + rnorm(y0,0,err.sd))
return(data.frame(x=x,y=y))
}
|
4e7bed2a82672e94c747c4b725df1d026494c368
|
35de14603463a45028bd2aca76fa336c41186577
|
/man/DEEPLEARNING.melt.Rd
|
e22f46aa59161eef43b654a752f1b6217005e739
|
[
"MIT"
] |
permissive
|
UKDRI/echolocatoR
|
e3cf1d65cc7113d02b2403960d6793b9249892de
|
0ccf40d2f126f755074e731f82386e4e01d6f6bb
|
refs/heads/master
| 2023-07-14T21:55:27.825635
| 2021-08-28T17:02:33
| 2021-08-28T17:02:33
| 416,442,683
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,306
|
rd
|
DEEPLEARNING.melt.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Dey_DeepLearning.R
\name{DEEPLEARNING.melt}
\alias{DEEPLEARNING.melt}
\title{Melt deep learning annotations into long-format}
\usage{
DEEPLEARNING.melt(
ANNOT,
model = c("Basenji", "BiClassCNN", "DeepSEA", "ChromHMM", "Roadmap", "Others"),
aggregate_func = "mean",
replace_NA = NA,
replace_negInf = NA,
save_path = F
)
}
\description{
Melt deep learning annotations into long-format
}
\examples{
\dontrun{
root <- "/sc/arion/projects/pd-omics/brian/Fine_Mapping/Data/GWAS/Nalls23andMe_2019/_genome_wide"
## merged_dat <- merge_finemapping_results(dataset = "Data/GWAS/Nalls23andMe_2019", minimum_support = 0, LD_reference = "UKB")
## ANNOT <- DEEPLEARNING.query(merged_dat=merged_dat, level="Allelic_Effect", type="annot")
#### Allelic_Effect ####
path <- file.path(root,"Dey_DeepLearning/Nalls23andMe_2019.Dey_DeepLearning.annot.Allelic_Effect.csv.gz")
#### Variant_Level ####
path <- file.path(root,"Dey_DeepLearning/Nalls23andMe_2019.Dey_DeepLearning.annot.Variant_Level.csv.gz")
ANNOT <- data.table::fread(path, nThread=8)
ANNOT <- find_consensus_SNPs_no_PolyFun(ANNOT)
annot.melt <- DEEPLEARNING.melt(ANNOT=ANNOT, aggregate_func="mean", save_path=gsub("\\\\.csv\\\\.gz",".snp_groups_mean.csv.gz",path))
}
}
|
016828d6f5962820aa3b34c552edeba183107939
|
a40df82edb3d1888b9c078926c92e7ffd34a9302
|
/plot_ly_ex.R
|
32f97f4aa1b9b5f1fd7839ac71a182280db258b8
|
[] |
no_license
|
UM-Biostatistics/teachShiny
|
a10f0ee280c3dab7735391bc4637eeef8853216c
|
b092d4ea1344f6e5373951c71d002973b14976a2
|
refs/heads/master
| 2022-11-08T17:13:43.671011
| 2020-06-24T19:18:04
| 2020-06-24T19:18:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 556
|
r
|
plot_ly_ex.R
|
library(tidyverse)
library(plotly)
data(mtcars)
p <- plot_ly(data = mtcars,
type = "scatter",
mode = "markers") %>%
add_trace(
x = ~hp,
y = ~mpg,
marker = list(
color = '#22F082',
size = 10),
showlegend = FALSE) %>%
layout(title = "1974 Motor Trend US - MPG vs HP",
xaxis = list(title = "Gross Horsepower",
zeroline = TRUE,
range = c(0, 300)),
yaxis = list(title = "Miles/(US) Gallon",
range = c(0, 35))
)
p
|
1fa91e1f4d8264b4cdf93cc6d5bdbffb788f18fa
|
74bc48ba64859a63855d204f1efd31eca47a223f
|
/Avito/001.Prav_EDA.R
|
66977d9dbae6bdd32a292aab477f7575cfe71d54
|
[] |
no_license
|
PraveenAdepu/kaggle_competitions
|
4c53d71af12a615d5ee5f34e5857cbd0fac7bc3c
|
ed0111bcecbe5be4529a2a5be2ce4c6912729770
|
refs/heads/master
| 2020-09-02T15:29:51.885013
| 2020-04-09T01:50:55
| 2020-04-09T01:50:55
| 219,248,958
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,405
|
r
|
001.Prav_EDA.R
|
###############################################################################################################################
# 01. Read all source files
###############################################################################################################################
train <- read_csv("./input/train.csv")
test <- read_csv("./input/test.csv")
head(train)
head(test)
train_activationdate_count <- train %>% mutate(WeekDay = weekdays(activation_date)) %>% group_by(activation_date, WeekDay) %>% summarise(rowCount = n())
test_activationdate_count <- test %>% mutate(WeekDay = weekdays(activation_date)) %>% group_by(activation_date, WeekDay) %>% summarise(rowCount = n())
###############################################################################################################################
# 02. Prav - decision to make cv schema
# - trainingSet from 2017-03-15 to 2017-03-30 and exclude later dates
# two possible cv schema ideas
# - 5 folds by weekday stratified, time based training and to time based test
# - 5 folds by random sampling
# third possible cv schema - last one to test, follow forum for experiences
# - treat the problem as time series and use >= 2017-03-26 for validation
###############################################################################################################################
train_subset <- train %>% filter(activation_date <= "2017-03-30")
train_subset %>% mutate(WeekDay = weekdays(activation_date)) %>% group_by(activation_date, WeekDay) %>% summarise(rowCount = n())
train_subset <- train_subset %>% mutate(WeekDay = weekdays(activation_date))
length(unique(train_subset$item_id))
dim(train_subset)
###############################################################################################################################
# CV 5 folds - Weekday stratified
# Use python generated cv schema file
Prav_5folds_CVindices <- read_csv("./input/Prav_5folds_CVindices_weekdayStratified.csv")
train_subset <- left_join(train_subset, Prav_5folds_CVindices, by="item_id")
CVIndices <- train_subset %>% group_by(activation_date, WeekDay, CVindices) %>% summarise(rowCount = n())
# folds 3,4,5 has more similar to test presentation on last day distribution
###############################################################################################################################
|
d4c812128fddf2d86a801eca40d4f0ec14a8dbdc
|
f30cc1c33978ca5a708a7e0a493403ea88550160
|
/man/write.neuronlistfh.Rd
|
70c8384009e4961e22b5793ba607dc4b3cf1e706
|
[] |
no_license
|
natverse/nat
|
044384a04a17fd0c9d895e14979ce43e43a283ba
|
1d161fa463086a2d03e7db3d2a55cf4d653dcc1b
|
refs/heads/master
| 2023-08-30T21:34:36.623787
| 2023-08-25T07:23:44
| 2023-08-26T19:02:50
| 15,578,625
| 35
| 10
| null | 2023-01-28T19:03:03
| 2014-01-02T07:54:01
|
R
|
UTF-8
|
R
| false
| true
| 1,755
|
rd
|
write.neuronlistfh.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/neuronlistfh.R
\name{write.neuronlistfh}
\alias{write.neuronlistfh}
\title{Write out a neuronlistfh object to an RDS file}
\usage{
write.neuronlistfh(x, file = attr(x, "file"), overwrite = FALSE, ...)
}
\arguments{
\item{x}{The neuronlistfh object to write out}
\item{file}{Path where the file will be written (see details)}
\item{overwrite}{Whether to overwrite an existing file}
\item{\dots}{Additional parameters passed to \code{saveRDS}}
}
\description{
Write out a neuronlistfh object to an RDS file
}
\details{
This function writes the main neuronlistfh object to disk, but makes
no attempt to touch/verify the associated object files.
if \code{file} is not specified, then the function will first check if
\code{x} has a \code{'file'} attribute. If that does not exist, then
\code{attr(x,'db')@dir}, the backing \code{filehash} database directory,
is inspected. The save path \code{file} will then be constructed by taking
the directory one up from the database directory and using the name of the
neuronlistfh object with the suffix '.rds'. e.g. write.neuronlistfh(kcs20)
with db directory '/my/path/dps/data' will be saved as
'/my/path/dps/kcs20.rds'
Note that if x has a \code{'file'} attribute (set by
\code{read.neuronlistfh}) then this will be removed before the file is
saved (since the file attribute must be set on read to ensure that we know
exactly which file on disk was the source of the object in memory).
}
\seealso{
\code{\link{saveRDS}}
Other neuronlistfh:
\code{\link{[.neuronlistfh}()},
\code{\link{neuronlistfh}()},
\code{\link{read.neuronlistfh}()},
\code{\link{remotesync}()}
}
\concept{neuronlistfh}
|
4c7824273cd254f6602a36576781c81169be4a84
|
3a70065126f01158b7132da8050adceeb1bc9302
|
/code/RandomForest.R
|
2520fcf2afa0f753ba2713c5549db35149236d54
|
[] |
no_license
|
jcauteru/rossman
|
9b5f2ad8373e9c075a015cfcb2cea96d166b5fd3
|
46c4359b960dbb5f66640aa443f6a2d75a73914b
|
refs/heads/master
| 2021-01-10T08:59:23.018379
| 2015-12-10T00:47:42
| 2015-12-10T00:47:42
| 47,728,324
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,112
|
r
|
RandomForest.R
|
library(data.table)
library(randomForest)
library(foreach)
source('/media/hdd/kaggle/rossman/code/make_data.R')
source('/media/hdd/kaggle/rossman/code/GXB_GRIDCV.R')
RMPSE2 <- function(preds, dtrain) {
elab<-as.numeric(dtrain)
epreds<-as.numeric(preds)
err <- sqrt(mean((epreds/elab-1)^2))
return(list(metric = "RMPSE", value = err))
}
PSE <- function(preds, dtrain) {
elab<-as.numeric(dtrain)
epreds<-as.numeric(preds)
err <- (epreds/elab-1)^2
return(err)
}
#################################################
#################################################
glb_hld <- sample(1:nrow(training_enahnced), 40000)
holdout <<- training_enahnced[glb_hld, c('Sales', feature.names)]
holdout_log <- holdout
holdout_log$Sales <- log(holdout$Sales)
training_enahnced <- training_enahnced[order(train_gbl$Date),]
#h <- training_enahnced[train$Date >= as.Date('2015-07-31'), ]
h <- which(train_gbl$Date > as.Date('2015-06-01'))
training_data <- training_enahnced[-c(glb_hld, h), c('Sales', feature.names)]
training_data$Sales <- log(training_data$Sales)
testing_data <- training_enahnced[h, c('Sales', feature.names)]
testing_data$Sales <- log(testing_data$Sales)
H20 <- h2o.init(nthreads=-1,max_mem_size='20G', ice_root = '/media/hdd/kaggle/h2olock')
## Load data into cluster from R
training_data$Store=as.factor(training_data$Store)
testing_data$Store=as.factor(testing_data$Store)
trainHex <- as.h2o(training_data)
testHex <- as.h2o(testing_data)
features<-feature.names
## Train a random forest using all default parameters
rfHex <- h2o.randomForest(x=features, y="Sales", training_frame=trainHex, ntrees=60, max_depth=30, nbins_cats = 1115)
#, max_depth = 30,nbins_cats = 1115
## Get predictions out; predicts in H2O, as.data.frame gets them into R
predictions<-as.data.frame(h2o.predict(rfHex,testHex))
## Return the predictions to the original scale of the Sales data
pred <- expm1(predictions[,1])
RMPSE2(pred, exp(testing_data$Sales))
summary(pred)
submission <- data.frame(Id=test$Id, Sales=pred)
cat("saving the submission file\n")
write.csv(submission, "h2o_rf.csv",row.names=F)
|
94d00e270865259f2d0a9aca55758a421da71787
|
dde6ab1f5a39b19450c320999051c5607f447b85
|
/man/factbook_tibble.Rd
|
e2ed42767cbb093f069a0f3f71b50f1a601c6c30
|
[
"MIT"
] |
permissive
|
mskyttner/factbook
|
2873b05945e15f5f951c45481521f0ae5fd71e88
|
3d07bf53e49913f4768ff01b22ddf2b770898685
|
refs/heads/master
| 2020-06-03T23:59:08.193651
| 2019-06-13T15:12:29
| 2019-06-13T15:12:29
| 191,785,145
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 902
|
rd
|
factbook_tibble.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/factbook.R
\name{factbook_tibble}
\alias{factbook_tibble}
\title{World Factbook data as a tibble}
\format{A tibble with 261 rows and 13 variables:
\describe{
\item{id}{row id}
\item{code}{country code}
\item{name}{country name}
\item{area}{total land area}
\item{area_land}{land area}
\item{area_water}{water area}
\item{population}{population}
\item{population_growth}{population growth}
\item{birth_rate}{birth rate}
\item{death_rate}{death rate}
\item{migration_rate}{migration rate}
\item{created_at}{created at timestamp}
\item{updated_at}{updated at timestamp}
}}
\source{
\url{https://github.com/factbook/factbook.sql/releases/download/v0.1/factbook.db}
}
\usage{
factbook_tibble()
}
\description{
A dataset with population, birth_rate and
other attributes for countries around the world.
}
|
dd4bd964284d93c4487326664205bed9c064bfe0
|
c51b7d3e6aea5df63cd795cdefa1454e2c7b6615
|
/man/width.Rd
|
4e587f3486fcc46dc64cc09e0ac1569535392f36
|
[] |
no_license
|
cran/rccmisc
|
11da5e0c2311ed4135ab0d0d7625ac5bb23a73cd
|
bf30d0b8be83a45fbf349e6ac0efc60159a7a204
|
refs/heads/master
| 2021-01-09T20:41:18.733037
| 2016-06-08T10:40:43
| 2016-06-08T10:40:43
| 60,682,631
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 420
|
rd
|
width.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/width.R
\name{width}
\alias{width}
\title{Calculate the width of the range of x}
\usage{
width(x)
}
\arguments{
\item{x}{object to calculate range for}
}
\value{
The width of the range of \code{x} as integer.
}
\description{
Calculate the width of the range of x
}
\examples{
width(1:10)
width(c(6748, 234, 2456, 5678))
width(sample(345))
}
|
eedb95a42113a919bf8e699d911e8d4fa4aed465
|
e41dc2086cc3a27ff9204ad0d3ab279504e08e48
|
/2 - R Programming/R Programming Week 4 - Final Project/rankhospital.R
|
3025ba133ef03407c9c9beff33accd3fccec225d
|
[] |
no_license
|
sandyjaugust/Data-Science-Specialization-Coursera
|
ff03ac66fc4c0302a74d25bf33f1e3dca08ba1b6
|
565d9876e1aaefff3f11366626a37e16f0dbf2c4
|
refs/heads/main
| 2023-01-05T11:10:43.664956
| 2020-11-02T00:52:23
| 2020-11-02T00:52:23
| 303,149,135
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,652
|
r
|
rankhospital.R
|
## Define global variables:
## This function reads the outcome-of-care-measures.csv file and returns a character
## vector with the name of the hospital that has the num'th lowest 30-day mortality rate
## for the outcome specified in the state specified. It breaks ties by sorting all hospitals
## that perform equivalently on the mortality measure alphabetically by hospital and
## returning the first hospital in that alphabetic list
rankhospital <- function(state, outcome, num = "best") {
outcomeOpt <- c("HEART ATTACK", "HEART FAILURE", "PNEUMONIA") ## Define valid outcome
## inputs
## Read outcome data
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character")
## Convert column names to all uppercase letters
up <- toupper(colnames(data))
names(data) <- up
## Convert outcome input to uppercase letters
outcome <- toupper(outcome)
validStates <- unique(data$STATE) ## Define valid state inputs
## Convert fields that may be ranked to numeric format
data[,11] <- as.numeric(data[,11])
data[,17] <- as.numeric(data[,17])
data[,23] <- as.numeric(data[,23])
## If statements test for valid state & outcome inputs. If either is invalid
## the appropriate error message will be thrown.
if(is.na(match(state,validStates))){
stop("Invalid State")
} else if(is.na(match(outcome,outcomeOpt))){
stop("Invalid Outcome")
}
## If inputs were valid, run this code to identify num'th best hospital in state for
## a particular outcome
else{
## Replaces " " in outcome parameter with "."
outcome <- gsub(" ",".",outcome)
## Appends outcome to the phrase that appears in every column header,
## "HOSPITAL.30.DAY.DEATH..MORTALITY..RATES.FROM."
outcome <- paste("HOSPITAL.30.DAY.DEATH..MORTALITY..RATES.FROM.",outcome,sep = "")
## Filter data to the rows that pertain to your state.
data <- data[data$STATE == state,]
## Sort mortality rates from lowest to highest
index <- order(data[[outcome]],data$HOSPITAL.NAME,na.last = NA)
data <- data[index,]
#Set values for best & worst arguments
if(num == "best"){
num <- 1
} else if(num == "worst"){
num <- nrow(data)
}
#Check if num > number of rows in data set
if(num > nrow(data)){
return(NA)
}
#Get num ranked value
data[num,"HOSPITAL.NAME"]
}
}
|
68d8ed629caacd7962c7aa604302a3ae15d23d89
|
b669732bbf19ea107b695fc8eeb36e0adaf28b06
|
/MRCAVectorDebugging.R
|
b3dce30def8008c8cd037a59315f6544091302ea
|
[] |
no_license
|
BenSinger01/I_MRCA
|
149253cc3100d26abd93183c9ed2df1b9e24fe95
|
ef85ad80c05e50ed9606af48cbf158067d13840e
|
refs/heads/master
| 2021-05-03T17:35:52.466086
| 2018-03-15T12:07:43
| 2018-03-15T12:07:43
| 120,450,229
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,658
|
r
|
MRCAVectorDebugging.R
|
# This code is to calculate the MRCA-matching distance between two phylogeographies.
# It follows the Google Style Guide for R.
# Classes
# class of vectors storing locations of MRCAs of pairs of leaves
setClass("MRCA.vector", representation(leaf.no = "numeric", MRCA.location = "character"))
setMethod("initialize", "MRCA.vector", function(.Object, ..., MRCA.location)
{callNextMethod(.Object, ..., MRCA.location = MRCA.location,
leaf.no = ((1+sqrt(1+8*length(MRCA.location)))/2))})
# class of vectors storing nodes of MRCAs of pairs of leaves, and probabilities
# over possible locations of the nodes
setClass("prob.MRCA.vector",
representation(leaf.no = "numeric", node.location.prob = "matrix",
MRCA.node = "character"))
setMethod("initialize", "prob.MRCA.vector",
function(.Object, ..., MRCA.node, node.location.prob)
{callNextMethod(.Object, ...,
node.location.prob = node.location.prob, MRCA.node = MRCA.node,
leaf.no = ((1+sqrt(1+8*length(MRCA.node)))/2))})
# class of geographic networks, storing vertices and distances
setClass("network", representation(locations = "character", distances = "matrix"))
# Functions
# Tips is here to make later functions more readable
Tips <- function(beast){
return(unlist(beast@phylo["tip.label"]))
}
# MRCAVector: function to extract a deterministic MRCA location vector from
# a data frame generated using the read.beast function. The comparison argument
# can be assigned to another treedata object in order to make sure that only
# shared leaves are used in creating the vector
GetMRCAVector <- function(beast, comparison = 0){
#i f there's a comparison, get rid of the tips not shared between the trees
leaves <- c("O/ALG/1/2014","O/BAN/1/2009","O/BAN/GO/Ka-236(Pig)/2015",
"O/BAN/NA/Ha/156/2013","O/BAR/15/2015", "O/BAR/2/2015",
"O/BHU/1/2013","O/BHU/12/2012","O/BHU/2/2009","O/BHU/3/2016")
# create a vector of all possible pairs of leaf names
pairs <- combn(leaves,2)
nChoose <- length(pairs)/2
# initialise the location vector
V <- character(nChoose)
# for each pair of leaves, assign the corresponding entry in V to the location
# of those leaves' MRCA in the BEAST-generated tree
for (i in 1:nChoose){
V[i] <- toString(beast@data[MRCA(beast,pairs[,i]),"Location"])
}
# return a deterministic MRCA vector
return(new("MRCA.vector", MRCA.location=V))
}
# ProbMRCAVector: function to extract a probabilistic MRCA location
# vector from a data frame generated using the read.beast function
GetProbMRCAVector <- function(beast, comparison = 0){
# if there's a comparison, get rid of the tips not shared between the trees
if (class(comparison) == "treedata"){
leaves <- Tips(beast)[Tips(beast) %in% Tips(comparison)]
locs <- unique(c(unlist(beast@data["Location.set"]),
unlist(comparison@data["Location.set"])))
}
else{
leaves <- Tips(beast)
locs <- unique(unlist(beast@data["Location.set"]))
}
# find some useful quantities
nLeaves <- length(leaves)
nNodes <- nLeaves-1
# create a vector of all possible pairs of leaf names
pairs <- combn(leaves,2)
nChoose <- length(pairs)/2
# initialise the node vector
V <- character(nChoose)
# for each pair of leaves, assign the corresponding entry in V to the index
# of the MRCA node
for (i in 1:nChoose){
V[i] <- MRCA(beast,pairs[,i])
}
# find the set of nodes
nodes = unlist(beast@data["node"])[unlist(beast@data["node"]) %in% V]
# initialise the location probability array
X <- array(0,dim = c(nNodes,length(locs)),
dimnames = list(nodes,locs))
# for each node, enter the corresponding location entries into X
for (n in nodes){
l <- unlist(beast@data[n,"Location.set"])
for (j in 1:length(l)){
X[n,l[j]] <- unlist(beast@data[n,"Location.set.prob"])[j]
}
}
return(new("prob.MRCA.vector", node.location.prob = X,
MRCA.node = V))
}
# Network: returns a network object from a data frame made with read.beast,
# with all locations distance 1 away from each other
GetNetwork <- function(beast, comparison = 0){
if (class(comparison) == "treedata"){
locs <- unique(c(unlist(beast@data["Location.set"]),
unlist(comparison@data["Location.set"])))
}
else{
locs <- unique(unlist(beast@data["Location.set"]))
}
dists <- 1-diag(length(locs))
rownames(dists) <- locs
colnames(dists) <- locs
return(new("network", locations = locs, distances = dists))
}
# function to calculate incompatibility of two MRCA.vectors
I_MRCA <- function(v1,v2,N, normalisation = FALSE) {
# check that the vectors are of the same class
if(class(v1) != class(v2)){
stop("Vectors are of different classes - check if one is probabilistic and the other not")
} else if(class(v1) == "MRCA.vector") {
# if the vectors are not probabilistic, just sum the distances on the network
# of corresponding elements in the MRCA vectors
I <- 0
for (i in 1:length(v1@MRCA.location)){
I <- I + N@distances[v1@MRCA.location[i],v2@MRCA.location[i]]
}
return(I)
} else if(class(v1) == "prob.MRCA.vector") {
# if the vectors are probabilistic, sum distances weighted by probabilities,
# as outlined in the notes/paper accompanying this code
I <- 0
locPairs = combn(N@locations,2)
# assign the weighting function appropriately
if (normalisation){
w <- function(i) {
return(0.5*(1/(sum(v1@MRCA.node==v1@MRCA.node[i]))+
1/(sum(v2@MRCA.node==v2@MRCA.node[i]))))
}
} else {
w <- function(node1,node2){
return(1)
}
}
for (i in 1:length(v1@MRCA.node)){
# sum over pairs of different vertices (don't need to sum over identical
# vertices since each vertex as a distance of zero from itself). Sum
# performed for each order of each pair.
for (j in 1:(length(locPairs)/2)){
I <- I + w(i)*v1@node.location.prob[v1@MRCA.node[i],locPairs[1,j]]*
v2@node.location.prob[v2@MRCA.node[i],locPairs[2,j]]*
N@distances[locPairs[1,j],locPairs[2,j]]
I <- I + w(i)*v1@node.location.prob[v1@MRCA.node[i],locPairs[2,j]]*
v2@node.location.prob[v2@MRCA.node[i],locPairs[1,j]]*
N@distances[locPairs[1,j],locPairs[2,j]]
}
}
if (normalisation) {I = I/(v1@leaf.no-1)}
return(I)
} else {
# return an error message if the inputs are the wrong type
stop("Vectors are not of the class MRCA.vector or ProbabilisticMRCA.vector")
}
}
|
9e04229c24e289321739907f413c61a2a602c026
|
7ac133f9871f201f7a956f3b239d8f0030907c06
|
/tests/testthat/test-wrappers.R
|
560dcdddf9b9700b80306cb86e7490078690cb8f
|
[
"MIT"
] |
permissive
|
gopalpenny/anem
|
1029318ca01a6172b365ddb7d2181135d909d92c
|
f2ba63622e0e67a08423b20c5f09a34b6433f6d0
|
refs/heads/master
| 2021-07-09T21:43:07.502292
| 2020-12-16T15:25:36
| 2020-12-16T15:25:36
| 219,404,991
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,606
|
r
|
test-wrappers.R
|
# test_wrappers.R
# define aquifer
bounds_df <- data.frame(bound_type=c("CH","NF","NF","NF"),m=c(Inf,0,Inf,0),b=c(0,1000,1000,0))
aquifer_unconfined <- define_aquifer("unconfined",1e-3,bounds=bounds_df,h0=100)
# define wells and well images
set.seed(30)
wells_df <- data.frame(x=runif(8,0,1000),y=runif(8,0,1000),diam=1) %>%
dplyr::mutate(R=get_ROI(Ksat=aquifer_unconfined$Ksat,h=aquifer_unconfined$h0,t=3600*24*365,n=0.4,method="aravin-numerov"), # t = 1 year
country=factor(y>500,levels=c(F,T),labels=c("A","B")),
weights=1)
wells <- define_wells(wells_df) %>% generate_image_wells(aquifer_unconfined)
df <- tibble::tibble(var=c("PHI_A_A", "PHI_A_B", "PHI_B_A", "PHI_B_B"),
pot=round(c(1317.4124,242.8351,242.8351,875.3999),5))
test_that("get_drawdown_relationships returns correct result for simple aquifer and two countries",{
expect_equal(get_drawdown_relationships(wells,aquifer_unconfined,country,weights) %>% dplyr::mutate(pot=round(pot,4)) %>% dplyr::select(var,pot),df)
})
# define aquifer
bounds_df <- data.frame(bound_type=c("CH","NF","NF","NF"),m=c(Inf,0,Inf,0),b=c(0,1000,1000,0))
aquifer_unconfined <- define_aquifer("unconfined",1e-3,bounds=bounds_df,h0=100)
# define wells and well images
set.seed(30)
wells_df <- data.frame(x=runif(8,0,1000),y=runif(8,0,1000),diam=1) %>% dplyr::mutate(R=1000,Q=-1/dplyr::n())
wells <- define_wells(wells_df) %>% generate_image_wells(aquifer_unconfined)
gridded <- get_gridded_hydrodynamics(wells,aquifer_unconfined,c(15,15),c(8,8))
test_that("get_gridded_hydrodynamics returns accurate dimensions for head",{
expect_equal(dim(gridded$head),c(225,3))
})
test_that("get_gridded_hydrodynamics returns accurate dimensions for flow",{
expect_equal(dim(gridded$flow),c(64,6))
})
edges_user <- data.frame(x1=c(-87.38,-86.22,-85.85,-87.18),
y1=c(41.44,41.83,41.15,40.85),
bID=c(5,6,7,8),
x2=c(-86.22,-85.85,-87.18,-87.38),
y2=c(41.83,41.15,40.85,41.44))
edges_rect <- tibble::tibble(bID=c(5, 6, 7, 8),
x1=c(-87.44, -86.19, -85.88, -87.12),
y1=c(41.46, 41.81, 41.17, 40.83),
x2=c(-86.19, -85.88, -87.12, -87.44),
y2=c(41.81, 41.17, 40.83, 41.46))
test_that("use_anem_function works for get_utm_rectangle",{
expect_equal(TRUE,
all.equal(use_anem_function("get_utm_rectangle",edges_user=edges_user),
edges_rect,tolerance=1e-1))
})
|
b8cfb3beefff61614a010864ea60250272b5c0f0
|
b3355f7deb1c43c6508cc09b6137e3a7af0adbc1
|
/R/Main.R
|
b38f3657683443c214abbdb3de1f7632c7ccd150
|
[] |
no_license
|
dustincys/RSVSim
|
bd40720256c9a130333c23616fee4e3f008da340
|
46bed7dee8ab40396e7dc90d49ede3520aa772b9
|
refs/heads/master
| 2020-05-29T12:31:24.313374
| 2015-10-13T19:59:53
| 2015-10-13T19:59:53
| 47,256,174
| 1
| 0
| null | 2015-12-02T11:16:20
| 2015-12-02T11:16:20
| null |
UTF-8
|
R
| false
| false
| 32,103
|
r
|
Main.R
|
setMethod("simulateSV",
signature(),
function(output, genome, chrs, dels, ins, invs, dups, trans, size, sizeDels, sizeIns, sizeInvs, sizeDups, regionsDels, regionsIns, regionsInvs, regionsDups, regionsTrans, maxDups, percCopiedIns, percBalancedTrans, bpFlankSize, percSNPs, indelProb, maxIndelSize, repeatBias, weightsMechanisms, weightsRepeats, repeatMaskerFile, bpSeqSize, random, seed, verbose){
options(stringsAsFactors=FALSE, scipen=10)
if(!missing(seed)){
set.seed(seed)
}
## default for random is TRUE
## is only one value given, set it for all SVs where regions were specified (only place, where a FALSE makes sense)
if(length(random) == 1){
r = rep(TRUE, 5)
r[!c(missing(regionsDels), missing(regionsIns), missing(regionsInvs), missing(regionsDups), missing(regionsTrans))] = random
random = r
}
.validateInput(output, genome, chrs, dels, ins, invs, dups, trans, sizeDels, sizeIns, sizeInvs, sizeDups, regionsDels, regionsIns, regionsInvs, regionsDups, regionsTrans, maxDups, percCopiedIns, percBalancedTrans, percSNPs, indelProb, weightsMechanisms, weightsRepeats, bpSeqSize, random, seed)
## see if genome is given
## if not, read the hg19 by default
if(missing(genome)){
if(verbose==TRUE) message("Loading human genome (hg19)")
if(missing(chrs)){
genome = .getHG19(NA)
}else{
genome = .getHG19(chrs)
}
gaps = genome[[2]]
genome = genome[[1]]
chrs = names(genome)
genomeOrganism = "hg19"
## if genome is given, there are two possibilities:
## 1. genome is a filename pointing to a FASTA-file
## 2. genome is a named DNAStringSet
}else{
if(class(genome) == "character"){
if(verbose==TRUE) message("Loading genome from FASTA file: ", genome)
genome = readDNAStringSet(filepath=genome) ## read genome from FASTA-file
}
if(missing(chrs)){
chrs = names(genome)
}else{
genome = genome[match(chrs, names(genome))]
}
## compute gaps within the given genome (N-sequences)
gaps = GRanges()
for(chr in chrs){
Ns = whichAsIRanges(as.integer(genome[[chr]]) == as.integer(DNAString("N")))
if(length(Ns) > 0){
gaps = c(gaps, GRanges(Ns, seqnames=chr))
}
}
genomeOrganism = "unknown"
}
gaps = as.data.frame(gaps)[, 1:3]
genomeCoords = GRanges(IRanges(start=1, end=width(genome)), seqnames=names(genome))
## for the default case, i.e. simulation on the hg19:
## set the weights for the sv formation mechanisms and their occurence within certain repeat regions
## default values for SV formation mechanisms were derived from:
## deletions, insertions/duplications: Mills et al., Mapping copy number variation by population-scale genome sequencing
## inversions: Pang et al., Mechanisms of Formation of Structural Variation in a Fully Sequenced Human Genome
## translocations: Ou et al., Observation and prediction of recurrent human translocations mediated by NAHR between nonhomologous chromosomes & Chen et al., Mapping translocation breakpoints by next-generation sequencing
## default values for repeat regions enriched for SV formation mechanisms were derived from:
## Lam et al., Nucleotide-resolution analysis of structural variants using BreakSeq and a breakpoint library (Supplemental Table 5)
## in this order: NAHR, NHR, TEI, VNTR, other
mechanisms = c("NAHR","NHR","TEI","VNTR","Other")
bpTypes = c("L1","L2","Alu","MIR","SD","TR","Random")
## for simulation on hg19 set the weights for the repeat elements (if not given by the user)
## for simulation on any other organism (user specified genome), set the weights to zero except for random simulation (i.e. turn this feature off)
if(missing(weightsMechanisms)){
if(genomeOrganism == "hg19" & repeatBias){
data("weightsMechanisms", package="RSVSim", envir=environment())
}else{
weightsMechanisms = data.frame(
dels = c(0,0,0,0,1),
ins = c(0,0,0,0,1),
invs = c(0,0,0,0,1),
dups = c(0,0,0,0,1),
trans = c(0,0,0,0,1)
)
rownames(weightsMechanisms) = mechanisms
}
}
if(missing(weightsRepeats)){
if(genomeOrganism == "hg19" & repeatBias){
data("weightsRepeats", package="RSVSim", envir=environment())
}else{
weightsRepeats = data.frame(
NAHR = c(0,0,0,0,0,0,0),
NHR = c(0,0,0,0,0,0,0),
TEI = c(0,0,0,0,0,0,0),
VNTR = c(0,0,0,0,0,0,0),
Other = c(0,0,0,0,0,0,1)
)
rownames(weightsRepeats) = bpTypes
}
}
## several options to get the repeatmasker regions:
## 1. bp shall be placed randomly (no bpWeights)
## then, only use all the genomic coordinates without special weighting for repeats
if(genomeOrganism != "hg19" | !repeatBias | all(random) == FALSE){
if(genomeOrganism == "hg19"){
if(verbose==TRUE) message("Bias for hg19 repeat regions is turned OFF")
}else{
if(verbose==TRUE) message("Breakpoints will be distributed uniformly across the genome")
}
bpRegions = vector(mode="list", length=1)
names(bpRegions) = "Random"
}else{
if(verbose==TRUE) message("Bias for hg19 repeat regions is turned ON")
## 2. repeatMasker regions were loaded and saved to disk once befoe in the data directory of the package
## then just load the data
if(file.exists(file.path(path.package("RSVSim"), "data", "repeats_hg19.RData"))){
if(verbose==TRUE) message("Loading hg19 repeat regions")
data("repeats_hg19", package="RSVSim", envir=environment()) ## loads object named "repeats"
}else{
## 3. filename of repeatmasker file is given (file downloaded from http://www.repeatmasker.org/genomes/hg19/RepeatMasker-rm330-db20120124/hg19.fa.out.gz)
## then, read the file, extract LINES,SINES and read segmental duplications from UCSC
if(!missing(repeatMaskerFile)){
if(verbose==TRUE) message("Loading hg19 repeat regions for the first time from given RepeatMasker output file")
repeats = .readRepeatMaskerOutput(repeatMaskerFile)
if(file.exists(file.path(path.package("RSVSim"), "data", "repeats_hg19.RData"))){
if(verbose==TRUE) message("Repeat regions were saved to ", file.path(path.package("RSVSim"), "data", "repeats_hg19.RData"), " for faster access next time")
}else{
warning("Saving of repeat regions to ", file.path(path.package("RSVSim"), "data", "repeats_hg19.RData"), " failed")
}
}
## 4. no repeatMasker file is given
## then, read the repeatMasker track and the segmental duplications from UCSC via rTracklayer package
else{
if(verbose==TRUE) message("Loading hg19 repeat regions for the first time from the UCSC Browser's RepeatMasker track (this may take up to 45 minutes)")
repeats = .loadFromUCSC_RepeatMasks(save=TRUE, verbose=verbose)
if(file.exists(file.path(path.package("RSVSim"), "data", "repeats_hg19.RData"))){
if(verbose==TRUE) message("Repeat regions were saved to ", file.path(path.package("RSVSim"), "data", "repeats_hg19.RData"), " for faster access next time")
}else{
warning("Saving of repeat regions to ", file.path(path.package("RSVSim"), "data", "repeats_hg19.RData"), " failed")
}
}
}
repeats = lapply(repeats, function(r) return(r[seqnames(r) %in% chrs]))
repeats = lapply(repeats, function(r) return(reduce(r, min.gapwidth=50))) # merge repeats which are only 100bp away from each other
bpRegions = vector(mode="list", length=length(repeats)+1)
names(bpRegions) = bpTypes
bpRegions[1:length(repeats)] = repeats
rm(repeats)
}
## put SVs anywhere in the genome if no regions were specified
## if regions were given, take care, that the regions lie within the available chromosomes
if(missing(regionsDels)){
regionsDels = genomeCoords
}else{
regionsDels = regionsDels[seqnames(regionsDels) %in% intersect(chrs, as.character(seqnames((regionsDels))))]
if(length(regionsDels) == 0){
stop("No regions on given chromosomes")
}
## for non-random distribution, set number of deletions to number of given regions
if(random[1] == FALSE){
dels = length(regionsDels)
}
}
if(missing(regionsIns)){
regionsIns = genomeCoords
}else{
if(random[2] == TRUE){
regionsIns = regionsIns[seqnames(regionsIns) %in% intersect(chrs, as.character(seqnames((regionsIns))))]
}else{
regionsIns = regionsIns[seqnames(regionsIns) %in% intersect(chrs, as.character(seqnames((regionsIns)))) & regionsIns$chrB %in% chrs]
ins = length(regionsIns)
}
if(length(regionsIns) == 0){
stop("No regions on given chromosomes")
}
}
if(missing(regionsInvs)){
regionsInvs = genomeCoords
}else{
regionsInvs = regionsInvs[seqnames(regionsInvs) %in% intersect(chrs, as.character(seqnames((regionsInvs))))]
if(length(regionsInvs) == 0){
stop("No regions on given chromosomes")
}
## for non-random distribution, set number of deletions to number of given regions
if(random[3] == FALSE){
invs = length(regionsInvs)
}
}
if(missing(regionsDups)){
regionsDups = genomeCoords
dupTimes = sample(1:(maxDups-1), dups) + 1
}else{
regionsDups = regionsDups[seqnames(regionsDups) %in% intersect(chrs, as.character(seqnames((regionsDups))))]
if(length(regionsDups) == 0){
stop("No regions on given chromosomes")
}
## for non-random distribution, set number of deletions to number of given regions
dupTimes = sample(1:(maxDups-1), length(regionsDups)) + 1
if(random[4] == FALSE){
dups = length(regionsDups)
if(ncol(mcols(regionsDups)) > 0){
if("times" %in% colnames(mcols(regionsDups))){
dupTimes = mcols(regionsDups)$times
}
}
}
}
if(missing(regionsTrans)){
regionsTrans = genomeCoords
}else{
if(random[5] == TRUE){
regionsTrans = regionsTrans[seqnames(regionsTrans) %in% intersect(chrs, as.character(seqnames((regionsTrans))))]
}else{
regionsTrans = regionsTrans[seqnames(regionsTrans) %in% intersect(chrs, as.character(seqnames((regionsTrans)))) & regionsTrans$chrB %in% chrs]
trans = length(regionsTrans)
}
if(length(regionsTrans) == 0){
stop("No regions on given chromosomes")
}
}
## restrict SV regions to given chromosomes
## inversion and deletion sizes
if((length(sizeDels) > 1 & length(sizeDels) != dels) | (length(sizeIns) > 1 & length(sizeIns) != ins) | (length(sizeInvs) > 1 & length(sizeInvs) != invs) | (length(sizeDups) > 1 & length(sizeDups) != dups)){
warning("Length of vectors with SV sizes vary from the number of SVs")
}
if(!missing(size)){
sizeDels = sizeIns = sizeInvs = sizeDups = size
}
sizeDels = rep(sizeDels, dels)[1:dels]
sizeIns = rep(sizeIns, ins)[1:ins]
sizeInvs = rep(sizeInvs, invs)[1:invs]
sizeDups = rep(sizeDups, dups)[1:dups]
## bpSeqSize will be used two times, in upstream and downstream direction; so divide it hear by two
bpSeqSize = round(bpSeqSize / 2)
## Simulation ############################################################
## 1. simulate regions for translocations:
type = "translocation"
translocations = posTrans_1 = posTrans_2 = NULL
if(random[5] == TRUE){
## 1.1 for chromosome terminals
if(trans > 0){
if(verbose==TRUE) message("Calculating coordinates: ", trans, " translocations")
subtrahend = gaps[, c("seqnames","start","end")]
subtrahend = GRanges(IRanges(subtrahend$start, subtrahend$end), seqnames=subtrahend$seqnames)
regionsTrans = .subtractIntervals(regionsTrans, subtrahend)
bpRegions[["Random"]] = regionsTrans
t = .simTranslocationPositions(trans, bpRegions, weightsMechanisms[, "trans", drop=FALSE], weightsRepeats, genome, percBalancedTrans, c(), verbose)
translocations = t[[1]]
posTrans_1 = t[[2]]
posTrans_2 = t[[3]]
}
}else{
if(verbose==TRUE) message("Using given coordinates: Translocations")
trans = length(regionsTrans)
posTrans_1 = as.data.frame(regionsTrans)
posTrans_1$names = rownames(posTrans_1)
posTrans_2 = as.data.frame(regionsTrans)[, c("chrB","startB","endB")]
colnames(posTrans_2) = c("seqnames","start","end")
## Add balanced information (default:TRUE) if not given
if(!("balanced" %in% colnames(posTrans_1)) | !("balanced" %in% colnames(posTrans_2))){
posTrans_1$balanced = posTrans_2$balanced = TRUE
}
## Determine wich translocations need to be inverted (segments on different ends of chromosomes)
idx = which((posTrans_1$start == 1 & posTrans_2$start != 1) | (posTrans_1$start != 1 & posTrans_2$start == 1))
posTrans_1$inverted = posTrans_2$inverted = FALSE
posTrans_1$inverted[idx] = posTrans_2$inverted[idx] = TRUE
size1 = posTrans_1$end-posTrans_1$start+1
size2 = posTrans_2$end-posTrans_2$start+1
translocations = cbind(posTrans_1$names, posTrans_1[, c("seqnames","start","end")], size1, posTrans_2[, c("seqnames","start","end")], size2, posTrans_2$balanced)
colnames(translocations) = c("Name", "ChrA", "StartA", "EndA", "SizeA", "ChrB", "StartB", "EndB", "SizeB","Balanced")
}
## 2. simulate insertions
type = "insertion"
insertions = posIns_1 = posIns_2 = NULL
if(random[2] == TRUE){
if(ins > 0){
if(verbose==TRUE) message("Calculating coordinates: ", ins, " insertions")
subtrahend = rbind(gaps[, c("seqnames","start","end")], posTrans_1[, c("seqnames","start","end")], posTrans_2[, c("seqnames","start","end")])
subtrahend = GRanges(IRanges(subtrahend$start, subtrahend$end), seqnames=subtrahend$seqnames)
regionsIns = .subtractIntervals(regionsIns, subtrahend)
bpRegions[["Random"]] = regionsIns
i = .simInsertionPositions(ins, bpRegions, weightsMechanisms[, "ins", drop=FALSE], weightsRepeats, genome, sizeIns, percCopiedIns, verbose)
insertions = i[[1]]
posIns_1 = i[[2]]
posIns_2 = i[[3]]
}
}else{
if(verbose==TRUE) message("Using given coordinates: Insertions")
ins = length(regionsIns)
posIns_1 = as.data.frame(regionsIns)
size = posIns_1$end-posIns_1$start+1
posIns_1$names = rownames(posIns_1)
posIns_2 = as.data.frame(regionsIns)[, c("chrB","startB")]
posIns_2$endB = posIns_2$startB + size - 1
colnames(posIns_2) = c("seqnames","start","end")
## Add duplicate information (default:FALSE) if not given
if(!("copied" %in% colnames(posIns_1))){
posIns_1$copied = posIns_2$copied = FALSE
}
insertions = cbind(posIns_1$names, posIns_1[rownames(posIns_1), c("seqnames","start","end")], posIns_2[rownames(posIns_1), c("seqnames","start","end")], size, posIns_1$copied)
colnames(insertions) = c("Name", "ChrA", "StartA", "EndA", "ChrB", "StartB", "EndB", "Size", "Copied")
}
## 3. simulate deletions
type = "deletion"
deletions = posDel = NULL
if(random[1] == TRUE){
if(dels > 0){
if(verbose==TRUE) message("Calculating coordinates: ", dels, " deletions")
subtrahend = rbind(gaps[, c("seqnames","start","end")], posIns_1[, c("seqnames","start","end")], posIns_2[, c("seqnames","start","end")], posTrans_1[, c("seqnames","start","end")], posTrans_2[, c("seqnames","start","end")])
subtrahend = GRanges(IRanges(subtrahend$start, subtrahend$end), seqnames=subtrahend$seqnames)
regionsDels = .subtractIntervals(regionsDels, subtrahend)
bpRegions[["Random"]] = regionsDels
d = .simPositions(dels, bpRegions, weightsMechanisms[, "dels", drop=FALSE], weightsRepeats, sizeDels, "deletion", verbose)
deletions = d[[1]]
posDel = d[[2]]
}
}else{
if(verbose==TRUE) message("Using given coordinates: Deletions")
dels = length(regionsDels)
posDel = as.data.frame(regionsDels)
posDel$names = rownames(posDel)
size=posDel$end-posDel$start+1
deletions = cbind(posDel$names, posDel[, c("seqnames","start","end")], size)
colnames(deletions) = c("Name", "Chr", "Start","End","Size")
}
## 4. simulate inversions
type = "inversion"
inversions = posInv = NULL
if(random[3] == TRUE){
if(invs > 0){
if(verbose==TRUE) message("Calculating coordinates: ", invs, " inversions")
subtrahend = rbind(gaps[, c("seqnames","start","end")], posDel[, c("seqnames","start","end")], posIns_1[, c("seqnames","start","end")], posIns_2[, c("seqnames","start","end")], posTrans_1[, c("seqnames","start","end")], posTrans_2[, c("seqnames","start","end")])
subtrahend = GRanges(IRanges(subtrahend$start, subtrahend$end), seqnames=subtrahend$seqnames)
regionsInvs = .subtractIntervals(regionsInvs, subtrahend)
bpRegions[["Random"]] = regionsInvs
i = .simPositions(invs, bpRegions, weightsMechanisms[, "invs", drop=FALSE], weightsRepeats, sizeInvs, type, verbose)
inversions = i[[1]]
posInv = i[[2]]
}
}else{
if(verbose==TRUE) message("Using given coordinates: Inversions")
invs = length(regionsInvs)
posInv = as.data.frame(regionsInvs)
posInv$names = rownames(posInv)
size=posInv$end-posInv$start+1
inversions = cbind(posInv$names, posInv[, c("seqnames","start","end")], size)
colnames(inversions) = c("Name","Chr", "Start","End", "Size")
}
## 5. simulate tandem duplications
type = "tandemDuplication"
tandemDups = posDup = NULL
if(random[4] == TRUE){
if(dups > 0){
if(verbose==TRUE) message("Calculating coordinates: ", dups, " tandem duplications")
subtrahend = rbind(gaps[, c("seqnames","start","end")], posDel[, c("seqnames","start","end")], posIns_1[, c("seqnames","start","end")], posIns_2[, c("seqnames","start","end")], posInv[, c("seqnames","start","end")], posTrans_1[, c("seqnames","start","end")], posTrans_2[, c("seqnames","start","end")])
subtrahend = GRanges(IRanges(subtrahend$start, subtrahend$end), seqnames=subtrahend$seqnames)
regionsDups = .subtractIntervals(regionsDups, subtrahend)
bpRegions[["Random"]] = regionsDups
td = .simPositions(dups, bpRegions, weightsMechanisms[, "dups", drop=FALSE], weightsRepeats, sizeDups, type, verbose)
tandemDups = td[[1]]
posDup = td[[2]]
}
}else{
if(verbose==TRUE) message("Using given coordinates: Tandem Duplications")
dups = length(regionsDups)
posDup = as.data.frame(regionsDups)
posDup$names = rownames(posDup)
size=posDup$end-posDup$start+1
tandemDups = cbind(posDup$names, posDup[, c("seqnames","start","end")], size)
colnames(tandemDups) = c("Name","Chr", "Start","End", "Size")
}
## Execution: implement regions into genome sequence ############################################################
## 1. inversions
if(invs > 0){
if(verbose==TRUE) message("Rearranging genome: Inversions")
if(verbose==TRUE) pb = txtProgressBar(min = 0, max = invs, style = 3)
for(i in 1:invs){
chr = as.character(posInv$seqnames[i])
rearrangement = .execInversion(genome[[chr]], chr, posDel, posIns_1, posIns_2, posInv, posDup, posTrans_1, posTrans_2, bpSeqSize, i, bpFlankSize, percSNPs, indelProb, maxIndelSize)
genome[[chr]] = rearrangement[[1]]
posDel = rearrangement[[2]]
posIns_1 = rearrangement[[3]]
posIns_2 = rearrangement[[4]]
posInv = rearrangement[[5]]
posDup = rearrangement[[6]]
posTrans_1 = rearrangement[[7]]
posTrans_2 = rearrangement[[8]]
if(verbose==TRUE) setTxtProgressBar(pb, i)
}
if(verbose==TRUE) close(pb)
}
## 2. translocations
if(trans > 0){
if(verbose==TRUE) message("Rearranging genome: Translocations")
if(verbose==TRUE) pb = txtProgressBar(min = 0, max = trans, style = 3)
for(i in 1:trans){
chr1 = as.character(posTrans_1$seqnames[i])
chr2 = as.character(posTrans_2$seqnames[i])
rearrangement = .execTranslocation(genome[[chr1]], genome[[chr2]], chr1, chr2, posDel, posIns_1, posIns_2, posInv, posDup, posTrans_1, posTrans_2, i, bpSeqSize, bpFlankSize, percSNPs, indelProb, maxIndelSize)
genome[[chr1]] = rearrangement[[1]]
genome[[chr2]] = rearrangement[[2]]
posDel = rearrangement[[3]]
posIns_1 = rearrangement[[4]]
posIns_2 = rearrangement[[5]]
posInv = rearrangement[[6]]
posDup = rearrangement[[7]]
posTrans_1 = rearrangement[[8]]
posTrans_2 = rearrangement[[9]]
if(verbose==TRUE) setTxtProgressBar(pb, i)
}
if(verbose==TRUE) close(pb)
}
## 3. insertions
if(ins > 0){
if(verbose==TRUE) message("Rearranging genome: Insertions")
if(verbose==TRUE) pb = txtProgressBar(min = 0, max = ins, style = 3)
for(i in 1:ins){
chr1 = as.character(posIns_1$seqnames[i])
chr2 = as.character(posIns_2$seqnames[i])
rearrangement = .execInsertion(genome[[chr1]], genome[[chr2]], chr1, chr2, posDel, posIns_1, posIns_2, posInv, posDup, posTrans_1, posTrans_2, i, bpSeqSize, bpFlankSize, percSNPs, indelProb, maxIndelSize)
genome[[chr1]] = rearrangement[[1]]
genome[[chr2]] = rearrangement[[2]]
posDel = rearrangement[[3]]
posIns_1 = rearrangement[[4]]
posIns_2 = rearrangement[[5]]
posInv = rearrangement[[6]]
posDup = rearrangement[[7]]
posTrans_1 = rearrangement[[8]]
posTrans_2 = rearrangement[[9]]
if(verbose==TRUE) setTxtProgressBar(pb, i)
}
if(verbose==TRUE) close(pb)
}
## 4. tandem duplications
if(dups > 0){
if(verbose==TRUE) message("Rearranging genome: Tandem duplications")
## add a column for the number of duplications
tandemDups = cbind(tandemDups[, c("Name", "Chr", "Start","End", "Size")], 0, "")
names(tandemDups) = c("Name", "Chr", "Start","End", "Size", "Duplications", "BpSeq")
if(verbose==TRUE) pb = txtProgressBar(min = 0, max = dups, style = 3)
for(i in 1:dups){
# times = sample(2:maxDups,1) ## how many times the sequence is duplicated
times = dupTimes[i]
chr = as.character(posDup$seqnames[i])
rearrangement = .execTandemDuplication(genome[[chr]], chr, posDel, posIns_1, posIns_2, posInv, posDup, posTrans_1, posTrans_2, bpSeqSize, times, i, bpFlankSize, percSNPs, indelProb, maxIndelSize)
genome[[chr]] = rearrangement[[1]]
posDel = rearrangement[[2]]
posIns_1 = rearrangement[[3]]
posIns_2 = rearrangement[[4]]
posInv = rearrangement[[5]]
posDup = rearrangement[[6]]
posTrans_1 = rearrangement[[7]]
posTrans_2 = rearrangement[[8]]
tandemDups$BpSeq[i] = rearrangement[[9]]
tandemDups$Duplications[i] = times
if(verbose==TRUE) setTxtProgressBar(pb, i)
}
if(verbose==TRUE) close(pb)
}
## 5. deletions
if(dels > 0){
if(verbose==TRUE) message("Rearranging genome: Deletions")
if(verbose==TRUE) pb = txtProgressBar(min = 0, max = dels, style = 3)
for(i in 1:dels){
chr = as.character(posDel$seqnames[i])
rearrangement = .execDeletion(genome[[chr]], chr, posDel, posIns_1, posIns_2, posInv, posDup, posTrans_1, posTrans_2, bpSeqSize, i, bpFlankSize, percSNPs, indelProb, maxIndelSize)
genome[[chr]] = rearrangement[[1]]
posDel = rearrangement[[2]]
posIns_1 = rearrangement[[3]]
posIns_2 = rearrangement[[4]]
posInv = rearrangement[[5]]
posDup = rearrangement[[6]]
posTrans_1 = rearrangement[[7]]
posTrans_2 = rearrangement[[8]]
if(verbose==TRUE) setTxtProgressBar(pb, i)
}
if(verbose==TRUE) close(pb)
}
## Retrieve breakpoint sequences for translocations, deletions and inversions (for duplications, it works during execution)
if(invs > 0){
inversions$BpSeq_5prime = inversions$BpSeq_3prime = ""
for(i in 1:invs){
bpSeq = .getBpSeq(genome, posInv, bpSeqSize, i)
inversions$BpSeq_5prime[i] = bpSeq[1]
inversions$BpSeq_3prime[i] = bpSeq[2]
}
}
if(dels > 0){
deletions$BpSeq = ""
for(i in 1:dels){
pos = posDel
pos$end = pos$start
bpSeq = .getBpSeq(genome, pos, bpSeqSize, i)
deletions$BpSeq[i] = bpSeq[1]
}
}
if(trans > 0){
translocations$BpSeqB = translocations$BpSeqA = ""
for(i in 1:trans){
if(posTrans_1$balanced[i] == TRUE){
bpSeq = .getBpSeq(genome, posTrans_1, bpSeqSize, i)
translocations$BpSeqA[i] = bpSeq[which(bpSeq != "")][1]
}
bpSeq = .getBpSeq(genome, posTrans_2, bpSeqSize, i)
translocations$BpSeqB[i] = bpSeq[which(bpSeq != "")][1]
}
}
if(ins > 0){
insertions$BpSeqB_3prime = insertions$BpSeqB_5prime = insertions$BpSeqA = ""
for(i in 1:ins){
if(posIns_1$copied[i] == FALSE){
bpSeq = .getBpSeq(genome, posIns_1, bpSeqSize, i)
insertions$BpSeqA[i] = bpSeq[1]
}
bpSeq = .getBpSeq(genome, posIns_2, bpSeqSize, i)
insertions$BpSeqB_5prime[i] = bpSeq[1]
insertions$BpSeqB_3prime[i] = bpSeq[2]
}
}
## Output ############################################################
if(!is.na(output)){
if(output == "."){
if(verbose==TRUE) message("Writing output to current directory")
}else{
if(verbose==TRUE) message("Writing output to ", output)
}
if(dels > 0){
write.table(deletions, file.path(output, "deletions.csv"), col.names=TRUE, row.names=FALSE, sep="\t", quote=FALSE)
}
if(ins > 0){
write.table(insertions, file.path(output, "insertions.csv"), col.names=TRUE, row.names=FALSE, sep="\t", quote=FALSE)
}
if(invs > 0){
write.table(inversions, file.path(output, "inversions.csv"), col.names=TRUE, row.names=FALSE, sep="\t", quote=FALSE)
}
if(dups > 0){
write.table(tandemDups, file.path(output, "tandemDuplications.csv"), col.names=TRUE, row.names=FALSE, sep="\t", quote=FALSE)
}
if(trans > 0){
write.table(translocations, file.path(output, "translocations.csv"), col.names=TRUE, row.names=FALSE, sep="\t", quote=FALSE)
}
writeXStringSet(genome, file.path(output, "genome_rearranged.fasta"), append=FALSE, format="fasta")
}
idx = c(dels>0, ins>0, invs>0, dups>0, trans>0)
## names(svs) = c("deletions", "insertions", "inversions","tandemDuplications","translocations")[idx]
metadata(genome) = list(deletions=deletions, insertions=insertions, inversions=inversions, tandemDuplications=tandemDups, translocations=translocations)[idx]
return(genome)
})
.validateInput <- function(output, genome, chrs, dels, ins, invs, dups, trans, sizeDels, sizeIns, sizeInvs, sizeDups, regionsDels, regionsIns, regionsInvs, regionsDups, regionsTrans, maxDups, percCopiedIns, percBalancedTrans, percSNPs, indelProb, weightsMechanisms, weightsRepeats, bpSeqSize, random, seed){
## output
if(!is.na(output)){
if(!file.exists(output)){
stop("Output directory does not exist")
}
}
## genome and chrs
if(!missing(genome)){
if(class(genome) == "DNAStringSet"){
if(is.null(names(genome))){
stop("Please enter chromosome names in your genome DNAStringSet")
}
if(!missing(chrs)){
if(!all(chrs %in% names(genome))){
stop("Invalid argument: Specified chromosomes and chromosome names in the genome do not match")
}
}
}
if(class(genome) == "BSgenome"){
stop("Please extract the desired sequences from the BSgenome package and combine them into a named DNAStringSet")
}
}
## Number and size of SVs
if(any(c(dels, ins, invs, dups, trans, sizeDels, sizeIns, sizeInvs, sizeDups) < 0)){
stop("Invalid argument: Number of SVs and their size cannot be smaller than zero. Makes sense, doesn't it?")
}
## percBalancedTrans, percCopiedIns, percSNPs, indelProb
if(percBalancedTrans < 0 | percBalancedTrans > 1 | percCopiedIns < 0 | percCopiedIns > 1 | percSNPs < 0 | percSNPs > 1 | indelProb < 0 | indelProb > 1){
stop("Invalid argument: Percentages have to be given as value between 0 and 1.")
}
## random and regions
if(length(random) != 1 & length(random) != 5){
stop("Invalid argument: Give either one value for argument random for all SVs or five values for each SV in this order: deletions, insertions, inversions, tandem duplications, translocations")
}
if(random[1] == FALSE & missing(regionsDels)){
stop("Missing argument: Please specifiy the regions for deletions")
}
if(random[2] == FALSE & missing(regionsIns)){
stop("Missing argument: Please specifiy the regions for insertions")
}
if(random[3] == FALSE & missing(regionsInvs)){
stop("Missing argument: Please specifiy the regions for inversions")
}
if(random[4] == FALSE & missing(regionsDups)){
stop("Missing argument: Please specifiy the regions for tandem duplications")
}
if(random[5] == FALSE & missing(regionsTrans)){
stop("Missing argument: Please specifiy the regions for translocations")
}
## regions
if(!missing(regionsIns) & random[2] == FALSE){
if(!all(c("chrB","startB") %in% colnames(mcols(regionsIns)))){
stop("Invalid argument: regionsIns is missing columns chrB and startB")
}
}
if(!missing(regionsTrans) & random[5] == FALSE){
if(!all(c("chrB","startB","endB") %in% colnames(mcols(regionsTrans)))){
stop("Invalid argument: regionsTrans is missing columns chrB, startB and endB")
}
}
## repeat weights
mechanisms = c("NAHR","NHR","TEI","VNTR","Other")
bpTypes = c("L1","L2","Alu","MIR","SD","TR","Random")
svTypes = c("dels", "invs", "ins", "dups", "trans")
if(!missing(weightsMechanisms)){
if(is.data.frame(weightsMechanisms)){
if(!all(rownames(weightsMechanisms) %in% mechanisms) | !all(colnames(weightsMechanisms) %in% svTypes)){
stop("Invalid argument: Please make sure that the row names of parameter weightsMechanisms equal \"NAHR\",\"NHR\",\"TEI\",\"VNTR\" and the column names equal \"Other\"\"dels\", \"invs\", \"ins\", \"dups\", \"trans\".")
}
}
}
if(!missing(weightsRepeats)){
if(is.data.frame(weightsRepeats)){
if(!all(rownames(weightsRepeats) %in% bpTypes) | !all(colnames(weightsRepeats) %in% mechanisms)){
stop("Invalid argument: Please make sure that the row names of parameter weightsRepeats equal \"L1\",\"L2\",\"Alu\",\"MIR\",\"SD\",\"TR\",\"Random\" and the column names equal \"NAHR\",\"NHR\",\"TEI\",\"VNTR\",\"Other\".")
}
}
}
}
## Deprecated: Transposons (maybe useful for future release)
#
# if(trans[3] > 0){
# ## 1.3 for intra-chromosomal translocations
#
# message("Calculating coordinates: ", trans[3], " intra-chromosomal translocations")
#
# intrachrom=TRUE
# subtrahend = RangedData(rbind(gaps[, c("space","start","end")], posTrans_1[, c("space","start","end")], posTrans_2[, c("space","start","end")]))
# regionsTrans = .subtractIntervals(regionsTrans, subtrahend)
# t = .simTranslocationPositions(trans[3], regionsTrans, genome, percInvertedTrans, percBalancedTrans, sizeTrans2, "intrachrom")
# translocations = rbind(translocations, t[[1]])
# posTrans_1 = rbind(posTrans_1, t[[2]])
# posTrans_2 = rbind(posTrans_2, t[[3]])
# }
## Maybe add gene annotation in future release (if it makes sense)
# require(biomaRt)
# ensembl=useMart("ensembl")
# dataset="hsapiens_gene_ensembl"
# ensembl=useDataset(dataset, mart=ensembl)
# bmAttributes = c(
# "hgnc_symbol",
# "chromosome_name",
# "start_position",
# "end_position"
# )
# bmFilter=c("chromosomal_region")
# bmValues = as.list(paste(substr(d$Chr, 4, nchar(d$Chr)), d$Start, d$End, sep=":"))
# genes = getBM(attributes=bmAttributes, filter=bmFilter, values=bmValues, mart=ensembl)
# d$Genes = ""
# for(i in 1:nrow(d)){
# gene_overlap = subset(genes, (chromosome_name == substr(d$Chr[i], 4, nchar(d$Chr))) & (IRanges(start_position, end_position) %in% IRanges(d$Start[i], d$End[i])) & hgnc_symbol != "")
# d$Genes[i] = paste(unique(gene_overlap$hgnc_symbol), collapse=",")
# }
|
e18ecce9b4705a66b77de4ba9a4e06ef70dcc7f2
|
92a0b69e95169c89ec0af530ed43a05af7134d45
|
/man/Load.Rd
|
d8d51053f28450154f1a71e661c8d2c246513466
|
[] |
no_license
|
gelfondjal/IT2
|
55185017b1b34849ac1010ea26afb6987471e62b
|
ee05e227403913e11bf16651658319c70c509481
|
refs/heads/master
| 2021-01-10T18:46:17.062432
| 2016-01-20T17:51:29
| 2016-01-20T17:51:29
| 21,449,261
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 317
|
rd
|
Load.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/Load.R
\name{Load}
\alias{Load}
\title{Loads a single R object from file}
\usage{
Load(file)
}
\arguments{
\item{file}{contains R object}
}
\value{
object for file that was read
}
\description{
Loads a single R object from file
}
|
719acd59bb164ba967dcd726b2923d164ce7a8ba
|
fc539b6d748f8b52daf555dccc823d499bd8409d
|
/MSc-SimSci/ACM41000-Uncertainty-Quantification/assignement3/Data2LD_Fix/Data2LD_Fix/R/DRarrayFn.R
|
a7b392224c6607f10d9ba8f12c81994f22e2a175
|
[] |
no_license
|
iantowey/sandbox
|
39cb5afbbbadea2e86f3ecac671286d9e3c331c8
|
7ea216e663b48e4f52d130656a14a527764a789a
|
refs/heads/master
| 2020-04-02T21:22:57.642105
| 2018-12-19T10:36:20
| 2018-12-19T10:36:20
| 154,797,142
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 569
|
r
|
DRarrayFn.R
|
DRarrayFn <- function(nXbasisw, nWbasisw, nXbasisx, nWbasisx,
Bvecx, Btens) {
DRvec <- rep(0,nXbasisw*nXbasisx*nWbasisw)
Btens <- as.vector(Btens)
inprodDList <- .C("DRarrayFn", as.integer(nXbasisw),
as.integer(nWbasisw),
as.integer(nXbasisx),
as.integer(nWbasisx),
as.double(Bvecx),
as.double(Btens),
as.double(DRvec))
DRarrayC <- array(inprodDList[[7]],c(nXbasisw,nXbasisx,nWbasisw))
return(DRarrayC)
}
|
2f64baa8e418fc7f7fb1db20893a202bff5561f2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/aqp/examples/panel.depth_function.Rd.R
|
ce6bf366c0d67d9cabff5a43c41155db409603d8
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,003
|
r
|
panel.depth_function.Rd.R
|
library(aqp)
### Name: panel.depth_function
### Title: Lattice Panel Function for Soil Profiles
### Aliases: panel.depth_function prepanel.depth_function make.segments
### Keywords: hplot
### ** Examples
library(lattice)
data(sp1)
# 1. plotting mechanism for step-functions derived from soil profile data
xyplot(cbind(top,bottom) ~ prop, data=sp1,id=sp1$id,
panel=panel.depth_function, ylim=c(250,-10),
scales=list(y=list(tick.number=10)), xlab='Property',
ylab='Depth (cm)', main='panel.depth_function() demo'
)
# 1.1 include groups argument to leverage lattice styling framework
sp1$group <- factor(sp1$group, labels=c('Group 1', 'Group2'))
xyplot(cbind(top,bottom) ~ prop, groups=group, data=sp1, id=sp1$id,
panel=panel.depth_function, ylim=c(250,-10),
scales=list(y=list(tick.number=10)), xlab='Property',
ylab='Depth (cm)', main='panel.depth_function() demo',
auto.key=list(columns=2, points=FALSE, lines=TRUE),
par.settings=list(superpose.line=list(col=c('Orange','RoyalBlue')))
)
|
5eb0a3cec823488b81f1edc70c858589f2005b4e
|
6c53c630b849d988884ccf70c137e7f2584d423d
|
/creating_result_lists.R
|
c8558326d71813dc9e8e0054e0d051bed1b40183
|
[] |
no_license
|
MarMagn/VARA_rep
|
9abd6c875f32793dd8f7b2f1ce76098134f52263
|
a3b10a3389eb15375166db84545f805ea3f18791
|
refs/heads/master
| 2021-07-20T10:03:41.898816
| 2018-11-28T20:05:12
| 2018-11-28T20:05:12
| 138,162,921
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,180
|
r
|
creating_result_lists.R
|
all_results_30 = list()
class(all_results_30) <- c("results_class", class(all_results_30))
print.results_class <- function(l) {
if (length(l) == 0) {
cat('\n None')
return()
}
sapply(l, USE.NAMES = TRUE, simplify = FALSE, function(vals) {
sapply(vals, function(v) {
sprintf("%.3f", v)
})
}) %>%
do.call(rbind, .) %>%
knitr::kable() %>%
print
}
all_results_90 = list()
class(all_results_90) <- c("results_class", class(all_results_90))
print.results_class <- function(l) {
if (length(l) == 0) {
cat('\n None')
return()
}
sapply(l, USE.NAMES = TRUE, simplify = FALSE, function(vals) {
sapply(vals, function(v) {
sprintf("%.3f", v)
})
}) %>%
do.call(rbind, .) %>%
knitr::kable() %>%
print
}
all_results_fx_30 = list()
class(all_results_fx_30) <- c("results_class", class(all_results_fx_30))
print.results_class <- function(l) {
if (length(l) == 0) {
cat('\n None')
return()
}
sapply(l, USE.NAMES = TRUE, simplify = FALSE, function(vals) {
sapply(vals, function(v) {
sprintf("%.3f", v)
})
}) %>%
do.call(rbind, .) %>%
knitr::kable() %>%
print
}
all_results_fx_90 = list()
class(all_results_fx_90) <- c("results_class", class(all_results_fx_90))
print.results_class <- function(l) {
if (length(l) == 0) {
cat('\n None')
return()
}
sapply(l, USE.NAMES = TRUE, simplify = FALSE, function(vals) {
sapply(vals, function(v) {
sprintf("%.3f", v)
})
}) %>%
do.call(rbind, .) %>%
knitr::kable() %>%
print
}
all_results_el_30 = list()
class(all_results_el_30) <- c("results_class", class(all_results_el_30))
print.results_class <- function(l) {
if (length(l) == 0) {
cat('\n None')
return()
}
sapply(l, USE.NAMES = TRUE, simplify = FALSE, function(vals) {
sapply(vals, function(v) {
sprintf("%.3f", v)
})
}) %>%
do.call(rbind, .) %>%
knitr::kable() %>%
print
}
all_results_el_90 = list()
class(all_results_el_90) <- c("results_class", class(all_results_el_90))
print.results_class <- function(l) {
if (length(l) == 0) {
cat('\n None')
return()
}
sapply(l, USE.NAMES = TRUE, simplify = FALSE, function(vals) {
sapply(vals, function(v) {
sprintf("%.3f", v)
})
}) %>%
do.call(rbind, .) %>%
knitr::kable() %>%
print
}
code_results_30 = list()
class(code_results_30) <- c("results_class", class(code_results_30))
print.results_class <- function(l) {
if (length(l) == 0) {
cat('\n None')
return()
}
sapply(l, USE.NAMES = TRUE, simplify = FALSE, function(vals) {
sapply(vals, function(v) {
sprintf("%.3f", v)
})
}) %>%
do.call(rbind, .) %>%
knitr::kable() %>%
print
}
code_results_90 = list()
class(code_results_90) <- c("results_class", class(code_results_90))
print.results_class <- function(l) {
if (length(l) == 0) {
cat('\n None')
return()
}
sapply(l, USE.NAMES = TRUE, simplify = FALSE, function(vals) {
sapply(vals, function(v) {
sprintf("%.3f", v)
})
}) %>%
do.call(rbind, .) %>%
knitr::kable() %>%
print
}
|
cf79a59f4c0e51aec129c1ffb8d004365fa98dc3
|
cf46efb113cc46d497eb2f7a147bbbc1a0564ec8
|
/scripts/R/probeSelection/improbe_functions.R
|
05d6caeed67a441ba6b27e07f318f63fde869bef
|
[] |
no_license
|
MethylationWorkhorse/workhorse
|
5b4b053249cc61fde3a6cfe676749e8bfd0478fb
|
2e77c75608831190ac7a297c58238fed75ee703e
|
refs/heads/master
| 2020-09-09T20:16:09.604610
| 2020-03-12T07:55:04
| 2020-03-12T07:55:04
| 221,555,673
| 2
| 1
| null | 2019-11-18T15:04:28
| 2019-11-13T21:33:13
|
Perl
|
UTF-8
|
R
| false
| false
| 47,773
|
r
|
improbe_functions.R
|
suppressWarnings(suppressPackageStartupMessages(require("tidyverse")) )
suppressWarnings(suppressPackageStartupMessages(require("stringr")) )
suppressWarnings(suppressPackageStartupMessages(require("glue")) )
suppressWarnings(suppressPackageStartupMessages(require("scales")) )
suppressWarnings(suppressPackageStartupMessages(require("matrixStats")) )
suppressWarnings(suppressPackageStartupMessages(require("Biostrings")) )
COM <- ","
TAB <- "\t"
RET <- "\n"
# ----- ----- ----- ----- ----- -----|----- ----- ----- ----- ----- ----- #
# Infinium Methylation Probe toStrings::
# ----- ----- ----- ----- ----- -----|----- ----- ----- ----- ----- ----- #
srdsToBrac = function(tib,
beg1=1, end1=60, mid1=61,
beg2=63,end2=122,mid2=62) {
# TBD:: Calculate alll data points based on sequence length
tib %>%
dplyr::mutate(StrandFR=case_when(FR ~ 'F', !FR ~ 'R', TRUE ~ NA_character_),
StrandCO=case_when(CO ~ 'C', !CO ~ 'O', TRUE ~ NA_character_),
DesSeqN=paste0(stringr::str_sub(DesSeqN,beg1,end1),{BNG},
stringr::str_sub(DesSeqN,mid1,mid2),{BNG},
stringr::str_sub(DesSeqN,beg2,end2)),
DesBscU=paste0(stringr::str_sub(DesBscU,beg1,end1),{BNG},
stringr::str_sub(DesBscU,mid1,mid2),{BNG},
stringr::str_sub(DesBscU,beg2,end2)),
DesBscM=paste0(stringr::str_sub(DesBscM,beg1,end1),{BNG},
stringr::str_sub(DesBscM,mid1,mid2),{BNG},
stringr::str_sub(DesBscM,beg2,end2)),
DesBscD=paste0(stringr::str_sub(DesBscD,beg1,end1),{BNG},
stringr::str_sub(DesBscD,mid1,mid2),{BNG},
stringr::str_sub(DesBscD,beg2,end2))) %>%
dplyr::arrange(StrandFR, StrandCO) %>% split(.$StrandFR)
}
prbsToStr = function(tib, pr='cg',
verbose=0,vt=1,tc=1,tabsStr='') {
funcTag <- 'prbsToStr'
tabsStr <- paste0(rep(TAB, tc), collapse='')
fr1Key <- tib$StrandFR[1]
fr2Key <- tib$StrandFR[2]
co1Key <- tib$StrandCO[1]
co2Key <- tib$StrandCO[2]
mud <- list('U'='U','M'='M','D'='D')
desSeq <- 'DesSeqN'
bscKey <- lapply(mud, function(x) { paste('DesBsc',x,sep='')} )
nxbKey <- lapply(mud, function(x) { paste('NXB',x,sep='_')} )
cpnKey <- lapply(mud, function(x) { paste('CPN',x,sep='_')} )
tarKey <- lapply(mud, function(x) { paste('TAR',x,sep='_')} )
secKey <- lapply(mud, function(x) { paste('SEC',x,sep='_')} )
bodKey <- lapply(mud, function(x) { paste('BOD',x,sep='_')} )
endKey <- lapply(mud, function(x) { paste('END',x,sep='_')} )
# TBD:: Note on the Opposite Strand we should reverse all framents, but currently fragLen==1 are left alone for effiecntcy...
# Sketch Output::
#
# F_C_N DesSeqN[CG]DesSeqN
#
# D2 22222
# N M1 1111
# N U1 1111
# F_C_U DesBscU[tG]DesBscU
# F_O_U DesBscU[Ca]DesBscU
# 1111 1U N
# 1111 1M N
# 22222 2D
#
if (pr=='rs'||pr=='ch') {
if (fr1Key=='F' && fr2Key=='F') {
bufC <- 0
bufO <- 0
str <- glue::glue(
"{fr1Key}_{co1Key}_II{TAB}",paste0(rep(" ", 61+bufC), collapse=''),"{tib[[tarKey$D]][1]}{tib[[secKey$D]][1]}{BNG}{tib[[bodKey$D]][1]}{tib[[endKey$D]][1]}{RET}",
"{fr1Key}_{co1Key}_IM{TAB}",paste0(rep(" ", 59+bufC), collapse=''),"{tib[[nxbKey$M]][1]}{BNG}{tib[[tarKey$M]][1]}{tib[[secKey$M]][1]}{BNG}{tib[[bodKey$M]][1]}{RET}",
"{fr1Key}_{co1Key}_IU{TAB}",paste0(rep(" ", 59+bufC), collapse=''),"{tib[[nxbKey$U]][1]}{BNG}{tib[[tarKey$U]][1]}{tib[[secKey$U]][1]}{BNG}{tib[[bodKey$U]][1]}{RET}",
"{fr1Key}_{co1Key}_U {TAB}{tib[[bscKey$U]][1]}{RET}",
"{fr1Key}_{co1Key}_M {TAB}{tib[[bscKey$M]][1]}{RET}",
"{fr1Key}_{co1Key}_D {TAB}{tib[[bscKey$D]][1]}{RET}",
"{fr1Key}_{co1Key}_N {TAB}{tib[[desSeq]][1]}{RET}",
# "FwdSeq{TAB}{tib$Forward_Sequence[1]}{RET}",
"{RET}",
"{fr1Key}_{co2Key}_N {TAB}{cmpl(tib[[desSeq]][2])}{RET}",
"{fr2Key}_{co2Key}_D {TAB}{Biostrings::reverse(tib[[bscKey$D]][2])}{RET}",
"{fr2Key}_{co2Key}_M {TAB}{Biostrings::reverse(tib[[bscKey$M]][2])}{RET}",
"{fr2Key}_{co2Key}_U {TAB}{Biostrings::reverse(tib[[bscKey$U]][2])}{RET}",
"{fr2Key}_{co2Key}_IU{TAB}",paste0(rep(" ", 11-bufO), collapse=''),"{Biostrings::reverse(tib[[bodKey$U]][2])}{tib[[secKey$U]][2]}{BNG}{tib[[tarKey$U]][2]}{tib[[nxbKey$U]][2]}{RET}",
"{fr2Key}_{co2Key}_IM{TAB}",paste0(rep(" ", 11-bufO), collapse=''),"{Biostrings::reverse(tib[[bodKey$M]][2])}{tib[[secKey$M]][2]}{BNG}{tib[[tarKey$M]][2]}{tib[[nxbKey$M]][2]}{RET}",
"{fr2Key}_{co2Key}_II{TAB}",paste0(rep(" ", 10-bufO), collapse=''),"{tib[[endKey$D]][2]}{Biostrings::reverse(tib[[bodKey$D]][2])}{tib[[secKey$D]][2]}{BNG}{tib[[tarKey$D]][2]}{RET}",
"{RET}")
} else if (fr1Key=='R' && fr2Key=='R') {
bufO <- 0
bufC <- 0
str <- glue::glue(
"{fr2Key}_{co2Key}_II{TAB}",paste0(rep(" ", 61+bufO), collapse=''),"{tib[[tarKey$D]][2]}{tib[[secKey$D]][2]}{BNG}{tib[[bodKey$D]][2]}{tib[[endKey$D]][2]}{RET}",
"{fr2Key}_{co2Key}_IM{TAB}",paste0(rep(" ", 59+bufO), collapse=''),"{tib[[nxbKey$M]][2]}{BNG}{tib[[tarKey$M]][2]}{tib[[secKey$M]][2]}{BNG}{tib[[bodKey$M]][2]}{RET}",
"{fr2Key}_{co2Key}_IU{TAB}",paste0(rep(" ", 59+bufO), collapse=''),"{tib[[nxbKey$U]][2]}{BNG}{tib[[tarKey$U]][2]}{tib[[secKey$U]][2]}{BNG}{tib[[bodKey$U]][2]}{RET}",
"{fr2Key}_{co2Key}_D {TAB}{tib[[bscKey$D]][2]}{RET}",
"{fr2Key}_{co2Key}_N {TAB}{revCmp(tib[[desSeq]][2])}{RET}",
# "{fr2Key}_{co2Key}_N {TAB}{tib[[desSeq]][2]}{RET}",
"{RET}",
# "{fr1Key}_{co1Key}_N {TAB}{cmpl(tib[[desSeq]][1])}{RET}",
"{fr1Key}_{co1Key}_N {TAB}{Biostrings::reverse(tib[[desSeq]][1])}{RET}",
"{fr1Key}_{co1Key}_D {TAB}{Biostrings::reverse(tib[[bscKey$D]][1])}{RET}",
"{fr1Key}_{co1Key}_IU{TAB}",paste0(rep(" ", 11-bufC), collapse=''),"{Biostrings::reverse(tib[[bodKey$U]][1])}{tib[[secKey$U]][1]}{BNG}{tib[[tarKey$U]][1]}{tib[[nxbKey$U]][1]}{RET}",
"{fr1Key}_{co1Key}_IM{TAB}",paste0(rep(" ", 11-bufC), collapse=''),"{Biostrings::reverse(tib[[bodKey$M]][1])}{tib[[secKey$M]][1]}{BNG}{tib[[tarKey$M]][1]}{tib[[nxbKey$M]][1]}{RET}",
"{fr1Key}_{co1Key}_II{TAB}",paste0(rep(" ", 10-bufC), collapse=''),"{tib[[endKey$D]][1]}{Biostrings::reverse(tib[[bodKey$D]][1])}{tib[[secKey$D]][1]}{BNG}{tib[[tarKey$D]][1]}{RET}",
"{RET}")
} else {
stop(glue::glue("{RET}[{funcTag}]: ERROR: fr1Key={fr1Key}, fr2Key={fr2Key}, Allowed Values=[F,R]!{RET}{RET}"))
}
} else {
if (fr1Key=='F' && fr2Key=='F') {
buf <- 0
str <- glue::glue(
"{fr1Key}_{co1Key}_II{TAB}",paste0(rep(" ", 61+buf), collapse=''),"{tib[[tarKey$D]][1]}{tib[[secKey$D]][1]}{BNG}{tib[[bodKey$D]][1]}{tib[[endKey$D]][1]}{RET}",
"{fr1Key}_{co1Key}_IM{TAB}",paste0(rep(" ", 59+buf), collapse=''),"{tib[[nxbKey$M]][1]}{BNG}{tib[[tarKey$M]][1]}{tib[[secKey$M]][1]}{BNG}{tib[[bodKey$M]][1]}{RET}",
"{fr1Key}_{co1Key}_IU{TAB}",paste0(rep(" ", 59+buf), collapse=''),"{tib[[nxbKey$U]][1]}{BNG}{tib[[tarKey$U]][1]}{tib[[secKey$U]][1]}{BNG}{tib[[bodKey$U]][1]}{RET}",
"{fr1Key}_{co1Key}_D {TAB}{tib[[bscKey$D]][1]}{RET}",
"{fr1Key}_{co1Key}_N {TAB}{tib[[desSeq]][1]}{RET}",
"{RET}",
"{fr1Key}_{co2Key}_N {TAB}{cmpl(tib[[desSeq]][2])}{RET}",
"{fr2Key}_{co2Key}_D {TAB}{Biostrings::reverse(tib[[bscKey$D]][2])}{RET}",
"{fr2Key}_{co2Key}_IU{TAB}",paste0(rep(" ", 11-buf), collapse=''),"{Biostrings::reverse(tib[[bodKey$U]][2])}{tib[[secKey$U]][2]}{BNG}{tib[[tarKey$U]][2]}{tib[[nxbKey$U]][2]}{RET}",
"{fr2Key}_{co2Key}_IM{TAB}",paste0(rep(" ", 11-buf), collapse=''),"{Biostrings::reverse(tib[[bodKey$M]][2])}{tib[[secKey$M]][2]}{BNG}{tib[[tarKey$M]][2]}{tib[[nxbKey$M]][2]}{RET}",
"{fr2Key}_{co2Key}_II{TAB}",paste0(rep(" ", 10-buf), collapse=''),"{tib[[endKey$D]][2]}{Biostrings::reverse(tib[[bodKey$D]][2])}{tib[[secKey$D]][2]}{BNG}{tib[[tarKey$D]][2]}{RET}",
"{RET}")
} else if (fr1Key=='R' && fr2Key=='R') {
buf <- 0
buf <- 1
str <- glue::glue(
"{fr2Key}_{co2Key}_II{TAB}",paste0(rep(" ", 61+buf), collapse=''),"{tib[[tarKey$D]][2]}{BNG}{tib[[secKey$D]][2]}{tib[[bodKey$D]][2]}{tib[[endKey$D]][2]}{RET}",
"{fr2Key}_{co2Key}_IM{TAB}",paste0(rep(" ", 60+buf), collapse=''),"{tib[[nxbKey$M]][2]}{tib[[tarKey$M]][2]}{BNG}{tib[[secKey$M]][2]}{tib[[bodKey$M]][2]}{RET}",
"{fr2Key}_{co2Key}_IU{TAB}",paste0(rep(" ", 60+buf), collapse=''),"{tib[[nxbKey$U]][2]}{tib[[tarKey$U]][2]}{BNG}{tib[[secKey$U]][2]}{tib[[bodKey$U]][2]}{RET}",
"{fr2Key}_{co2Key}_D {TAB}{tib[[bscKey$D]][2]}{RET}",
"{fr2Key}_{co2Key}_N {TAB}{revCmp(tib[[desSeq]][2])}{RET}",
# "{fr2Key}_{co2Key}_N {TAB}{tib[[desSeq]][2]}{RET}",
"{RET}",
# "{fr1Key}_{co1Key}_N {TAB}{cmpl(tib[[desSeq]][1])}{RET}",
"{fr1Key}_{co1Key}_N {TAB}{Biostrings::reverse(tib[[desSeq]][1])}{RET}",
"{fr1Key}_{co1Key}_D {TAB}{Biostrings::reverse(tib[[bscKey$D]][1])}{RET}",
"{fr1Key}_{co1Key}_IU{TAB}",paste0(rep(" ", 11+buf), collapse=''),"{Biostrings::reverse(tib[[bodKey$U]][1])}{BNG}{tib[[secKey$U]][1]}{tib[[tarKey$U]][1]}{BNG}{tib[[nxbKey$U]][1]}{RET}",
"{fr1Key}_{co1Key}_IM{TAB}",paste0(rep(" ", 11+buf), collapse=''),"{Biostrings::reverse(tib[[bodKey$M]][1])}{BNG}{tib[[secKey$M]][1]}{tib[[tarKey$M]][1]}{BNG}{tib[[nxbKey$M]][1]}{RET}",
"{fr1Key}_{co1Key}_II{TAB}",paste0(rep(" ", 10+buf), collapse=''),"{tib[[endKey$D]][1]}{Biostrings::reverse(tib[[bodKey$D]][1])}{BNG}{tib[[secKey$D]][1]}{tib[[tarKey$D]][1]}{RET}",
"{RET}")
} else {
stop(glue::glue("{RET}[{funcTag}]: ERROR: fr1Key={fr1Key}, fr2Key={fr2Key}, Allowed Values=[F,R]!{RET}{RET}"))
}
}
if (verbose>=vt) cat(str)
str
}
prbsToStrMUD = function(tib, mu='U',
verbose=0,vt=1,tc=1,tabsStr='') {
funcTag <- 'prbsToStr'
tabsStr <- paste0(rep(TAB, tc), collapse='')
fr1Key <- tib$StrandFR[1]
fr2Key <- tib$StrandFR[2]
co1Key <- tib$StrandCO[1]
co2Key <- tib$StrandCO[2]
bscKey <- paste('DesBsc',mu, sep='')
nxbKey <- paste('NXB',mu, sep='_')
cpnKey <- paste('CPN',mu, sep='_')
tarKey <- paste('TAR',mu, sep='_')
secKey <- paste('SEC',mu, sep='_')
bodKey <- paste('BOD',mu, sep='_')
endKey <- paste('END',mu, sep='_')
str <- glue::glue(
paste0(rep(" ", 59+10), collapse=''),"{tib[[cpnKey]][1]}{tib[[secKey]][1]}{BNG}{tib[[bodKey]][1]}{tib[[endKey]][1]}{RET}",
paste0(rep(" ", 59+8), collapse=''),"{tib[[nxbKey]][1]}{BNG}{tib[[cpnKey]][1]}{tib[[secKey]][1]}{BNG}{tib[[bodKey]][1]}{RET}",
"{fr1Key}_{co1Key}_{mu}{TAB}{tib[[bscKey]][1]}{RET}",
"{fr2Key}_{co2Key}_{mu}{TAB}{Biostrings::reverse(tib[[bscKey]][2])}{RET}",
paste0(rep(" ", 9+10), collapse=''),"{Biostrings::reverse(tib[[bodKey]][2])}{tib[[secKey]][2]}{BNG}{tib[[cpnKey]][2]}{tib[[nxbKey]][2]}{RET}",
paste0(rep(" ", 9+9), collapse=''),"{tib[[endKey]][2]}{Biostrings::reverse(tib[[bodKey]][2])}{tib[[secKey]][2]}{BNG}{tib[[cpnKey]][2]}{RET}",
"{RET}")
if (verbose>=vt) cat(str)
str
}
# ----- ----- ----- ----- ----- -----|----- ----- ----- ----- ----- ----- #
# Infinium Methylation Probe Design Methods::
# ----- ----- ----- ----- ----- -----|----- ----- ----- ----- ----- ----- #
desAllPrbs = function(tib) {
fr <- tib %>% dplyr::distinct(FR) %>% base::as.logical()
co <- tib %>% dplyr::distinct(CO) %>% base::as.logical()
pr <- tib %>% dplyr::distinct(PRB_DES) %>% base::as.character()
dplyr::bind_rows(tib %>%
des2prbs(fwd=fr, con=co, pr=pr, mu='U', strand='DesBscU') %>%
des2prbs(fwd=fr, con=co, pr=pr, mu='M', strand='DesBscM') %>%
des2prbs(fwd=fr, con=co, pr=pr, mu='D', strand='DesBscD') )
}
des2prbs = function(tib, fwd, con, pr, mu, strand='DesSeqN', len=48, del='_',QC_CPN=TRUE,
verbose=0,vt=1,tc=1,tabsStr='') {
funcTag <- 'des2prbs'
tabsStr <- paste0(rep(TAB, tc), collapse='')
stopifnot(is.logical(fwd))
stopifnot(is.logical(con))
if (mu!='N' && mu!='U' && mu!='M' && mu!='D')
stop(glue::glue("{RET}[{funcTag}]: ERROR: mu={mu} Only Supported=[N,U,M,D]!{RET}{RET}"))
if (pr!='cg' && pr!='ch' & pr!='rs' & pr!='rp')
stop(glue::glue("{RET}[{funcTag}]: ERROR: pr={pr} Only Supported=[cg,ch,rp,rs]!{RET}{RET}"))
strand <- rlang::sym(strand)
if (pr=='rs'|| pr=='ch') {
# $prb_F_C_I = revCmp(substr($des_F_C, 60, 50));
# $prb_R_C_I = revCmp(substr($des_R_C, 61, 50));
# $prb_F_C_II = revCmp(substr($des_F_C, 60, 50));
# $prb_R_C_II = revCmp(substr($des_R_C, 61, 50));
#
# $prb_F_O_I = revCmp(substr($des_F_O, 61, 50));
# $prb_R_O_I = revCmp(substr($des_R_O, 62, 50));
# $prb_F_O_II = revCmp(substr($des_F_O, 61, 50));
# $prb_R_O_II = revCmp(substr($des_R_O, 62, 50));
if ( fwd && con) nxb_pos <- 60
else if (!fwd && con) nxb_pos <- 61
else if ( fwd && !con) nxb_pos <- 61
else if (!fwd && !con) nxb_pos <- 60
# else if (!fwd && !con) nxb_pos <- 62 # Original
else {
stop(glue::glue("{RET}[{funcTag}]: ERROR: unsupported combination fwd={fwd}, con={con}!!!{RET}{RET}"))
}
} else if (pr=='cg' || pr=='rp') {
# $prb_F_C_I = revCmp(substr($des_F_C, 60, 50));
# $prb_F_O_I = revCmp(substr($des_F_O, 61, 50));
# $prb_R_C_I = revCmp(substr($des_R_C, 60, 50));
# $prb_R_O_I = revCmp(substr($des_R_O, 61, 50));
#
# $prb_F_C_II = revCmp(substr($des_F_C, 61, 50));
# $prb_F_O_II = revCmp(substr($des_F_O, 62, 50));
# $prb_R_C_II = revCmp(substr($des_R_C, 61, 50));
# $prb_R_O_II = revCmp(substr($des_R_O, 62, 50));
nxb_pos <- 60
if (!con) nxb_pos <- 61
} else {
stop(glue::glue("[{funcTag}]: ERROR: NOT READY!!!{RET}{RET}"))
}
cpg_pos <- nxb_pos + 1
sec_pos <- cpg_pos + 1
bod_pos <- sec_pos + 1
end_pos <- bod_pos + len
# Special consideration is needed for U/M strands at the query site.
# For CN (i.e. cg or ch) this is actually done naturally in U/M conversion
# However, for non-CN probes (i.e. rs) this needs to be forced to U/M
#
# This is handled by the TAR (Target/Query Nucleotide). This should only change
# for U/M (QMAP_U/QMAP_M) for D its just itself.
#
tib <- tib %>% dplyr::mutate(
NXB=stringr::str_sub(!!strand, nxb_pos, nxb_pos),
CPN=stringr::str_sub(!!strand, cpg_pos, cpg_pos),
TAR=qmaps(CPN, mu=mu),
SEC=stringr::str_sub(!!strand, sec_pos, sec_pos),
BOD=stringr::str_sub(!!strand, bod_pos, end_pos-1),
END=stringr::str_sub(!!strand, end_pos, end_pos)
)
# QC TEST:: for CpN (cg or ch) verify that the probes are equal. Well call this
# PRB0 (CGN) and PRB1 (TAR). After testing remove PRB0
#
if (QC_CPN && (pr=='cg')) {
tib <- tib %>%
tidyr::unite(PRB0, CPN,SEC,BOD, sep='', remove=FALSE) %>%
tidyr::unite(PRB1, TAR,SEC,BOD, sep='', remove=FALSE) %>%
tidyr::unite(PRB2, SEC,BOD,END, sep='', remove=FALSE) %>%
dplyr::mutate(PRB0=revCmp(PRB0), PRB1=revCmp(PRB1), PRB2=revCmp(PRB2))
qc_tib <- tib %>% filter(PRB0!=PRB1)
qc_len <- qc_tib %>% base::nrow()
if (qc_len != 0) {
qc_tib %>% dplyr::select(1,PRB0,PRB1) %>% print()
stop(glue::glue("{RET}[{funcTag}]: ERROR: pr={pr}, qc_len={qc_len} != 0!!!{RET}{RET}"))
}
} else {
tib <- tib %>%
tidyr::unite(PRB1, TAR,SEC,BOD, sep='', remove=FALSE) %>%
tidyr::unite(PRB2, SEC,BOD,END, sep='', remove=FALSE) %>%
dplyr::mutate(PRB1=revCmp(PRB1), PRB2=revCmp(PRB2))
}
# Add suffix to sequences for merging later
tib <- tib %>%
dplyr::select(PRB1,PRB2, NXB,CPN,TAR,BOD,END, everything()) %>%
dplyr::rename(!!paste('PRB1',mu, sep=del):=PRB1,
!!paste('PRB2',mu, sep=del):=PRB2,
!!paste('NXB', mu, sep=del):=NXB,
!!paste('CPN', mu, sep=del):=CPN,
!!paste('TAR', mu, sep=del):=TAR,
!!paste('SEC', mu, sep=del):=SEC,
!!paste('BOD', mu, sep=del):=BOD,
!!paste('END', mu, sep=del):=END)
tib
}
des2prbsNOTES = function(srd, desSeq,
vt=1, tc=0, tabsStr='') {
funcTag <- 'des2prbs'
for (i in c(1:tc)) tabsStr <- paste0(tabsStr, TAB)
if (verbose>=vt) cat(glue::glue("[{funcTag}]:{tabsStr} Starting...{RET}"))
# bscNewU <- bscNewU(desSeq)
# bscNewM <- bscNewM(desSeq)
# bscNewD <- bscNewD(desSeq)
# my @desSetU = unpack("A11"."A48"."AAAA"."A47"."A12", $bscNewU);
# my @desSetM = unpack("A11"."A48"."AAAA"."A47"."A12", $bscNewM);
# my @desSetD = unpack("A11"."A48"."AAAA"."A47"."A12", $bscNewD);
# my @desSetU=unpack("A11"."A48"."AAAA"."A47"."A12",bscNewU($desSeq,$retUC));
# my @desSetM=unpack("A11"."A48"."AAAA"."A47"."A12",bscNewM($desSeq,$retUC));
# my ($desNxbU, $desCpgU, $desSeqU, $desEndU);
# my ($desNxbM, $desCpgM, $desSeqM, $desEndM);
# my ($desNxbD, $desCpgD, $desSeqD, $desEndD);
# ( $desNxbU, $desCpgU, $desSeqU, $desEndU) =
# return( $desSetU[2], $desSetU[3], $desSetU[4].$desSetU[5].$desSetU[6], $desSetU[7],
# $desSetM[2], $desSetM[3], $desSetM[4].$desSetM[5].$desSetM[6], $desSetM[7]) if ($desCO eq $C);
# ( $desNxbM, $desCpgM, $desSeqM, $desEndM) =
# ( $desNxbU, $desCpgU, $desSeqU, $desEndU) =
# return( revCmpl($desSetU[4]), revCmpl($desSetU[3]), revCmpl($desSetU[1].$desSetU[2]), revCmpl($desSetU[0]),
# revCmpl($desSetM[4]), revCmpl($desSetM[3]), revCmpl($desSetM[1].$desSetM[2]), revCmpl($desSetM[0])) if ($desCO eq $O);
# ( $desNxbM, $desCpgM, $desSeqM, $desEndM) =
# $$prbRef[$srd][$iU]=[ $desSetU[2], $desSetU[3], $desSetU[4].$desSetU[5].$desSetU[6], $desSetU[7] ];
# $$prbRef[$srd][$iM]=[ $desSetM[2], $desSetM[3], $desSetM[4].$desSetM[5].$desSetM[6], $desSetM[7] ];
# $$prbRef[$srd][$iD]=[ $desSetD[2], $desSetD[3], $desSetD[4].$desSetD[5].$desSetD[6], $desSetD[7] ];
#
# $srd++;
# $$prbRef[$srd][$iU]=[ revCmpl($desSetU[4]), revCmpl($desSetU[3]), revCmpl($desSetU[1].$desSetU[2]), revCmpl($desSetU[0]) ];
# $$prbRef[$srd][$iM]=[ revCmpl($desSetM[4]), revCmpl($desSetM[3]), revCmpl($desSetM[1].$desSetM[2]), revCmpl($desSetM[0]) ];
# $$prbRef[$srd][$iD]=[ revCmpl($desSetD[4]), revCmpl($desSetD[3]), revCmpl($desSetD[1].$desSetD[2]), revCmpl($desSetD[0]) ];
if (verbose>=vt) cat(glue::glue("[{funcTag}]:{tabsStr} Done.{RET}{RET}"))
NULL
}
# ----- ----- ----- ----- ----- -----|----- ----- ----- ----- ----- ----- #
# Output improbe Methods::
# ----- ----- ----- ----- ----- -----|----- ----- ----- ----- ----- ----- #
writeImprobeInput = function(tib, name, dir, run=FALSE,
exe=NULL, impTango=NULL, imp13mer=NULL,
tbVar='BOTH', coVar='BOTH',
verbose=0,vt=3,tc=1,tabsStr='') {
funcTag <- 'writeImprobeInput'
tabsStr <- paste0(rep(TAB, tc), collapse='')
if (verbose>=vt) cat(glue::glue("[{funcTag}]:{tabsStr} name={name}, dir={dir}.{RET}"))
if (!dir.exists(dir)) dir.create(dir, recursive=TRUE)
imp_run_sh <- file.path(dir, paste0(name,'.improbe-input.sh'))
imp_inp_tsv <- file.path(dir, paste0(name,'.improbe-input.tsv'))
imp_out_tsv <- file.path(dir, paste0(name,'.improbe-output.tsv'))
imp_log_txt <- file.path(dir, paste0(name,'.improbe-output.log'))
imp_fin_txt <- file.path(dir, paste0(name,'.improbe-output.fin.txt'))
imp_cmd_str <- ''
readr::write_tsv(tib, imp_inp_tsv)
if (!is.null(exe) && ! is.null(impTango) && !is.null(imp13mer)) {
# stopifnot(file.exists(exe))
# stopifnot(file.exists(impTango))
# stopifnot(file.exists(imp13mer))
# ${CMD} -oASPE -tBOTH -cBoth -n${fn_13mer} -a${fn_tango} -V ${fn_in} >$fn_out 2>${fn_log}
imp_cmd_str <- glue::glue("{exe} -oASPE -t{tbVar} -c{coVar} -n{imp13mer} -a{impTango} -V ",
"{imp_inp_tsv} >{imp_out_tsv} 2>{imp_log_txt}{RET}",
"touch {imp_fin_txt}{RET}")
readr::write_file(imp_cmd_str, imp_run_sh)
Sys.chmod(imp_run_sh, mode='0777')
if (run) system(imp_run_sh, wait = TRUE)
}
if (verbose>=vt) cat(glue::glue("[{funcTag}]:{tabsStr} Done.{RET}{RET}"))
imp_out_tsv
}
#
# tibToImprobeInput = function(tib, verbose=0,vt=5,tc=1,tabsStr='') {
# funcTag <- 'tibToImprobeInput'
# tabsStr <- paste0(rep(TAB, tc), collapse='')
#
# if (verbose>=vt) cat(glue::glue("[{funcTag}]:{tabsStr} Starting...{RET}"))
#
# imp_tib <- tib %>% dplyr::mutate(Sequence=stringr::str_replace(Forward_Sequence, '\\[[A-Za-z][A-Za-z]\\]', '[CG]'),
# CpG_Island='FALSE' ) %>%
# dplyr::select(IlmnID, Sequence, Genome_Build, CHR, MAPINFO, CpG_Island) %>%
# dplyr::rename(Seq_ID=IlmnID, Chromosome=CHR, Coordinate=MAPINFO)
#
# imp_tib
# }
# ----- ----- ----- ----- ----- -----|----- ----- ----- ----- ----- ----- #
# Load/Format Input File Methods::
# ----- ----- ----- ----- ----- -----|----- ----- ----- ----- ----- ----- #
loadManifestRS = function(file, swap, revAllele=FALSE, verbose=0,vt=1,tc=1,tabsStr='') {
funcTag <- 'loadManifestRS'
tabsStr <- paste0(rep(TAB, tc), collapse='')
if (verbose>=vt) cat(glue::glue("[{funcTag}]:{tabsStr} Loading manifest(RS)={file}.{RET}"))
snp_man_tib <- suppressMessages(suppressWarnings(readr::read_csv(file) ))
snp_swp_tib <- suppressMessages(suppressWarnings(readr::read_tsv(swap) ))
# NOTE: Rediculous Swapping Needed for Infinium I M/U -> U/M from old manufacturing mistake::
# - This method should not be used...
if (revAllele) {
snp_man_tib <- snp_man_tib %>%
dplyr::mutate(
TMP_ADD_A=AddressA_ID,
TMP_ADD_B=AddressB_ID,
TMP_SEQ_A=AlleleA_ProbeSeq,
TMP_SEQ_B=AlleleB_ProbeSeq,
IS_SWAP=case_when(
Infinium_Design_Type=='I' & stringr::str_ends(AlleleA_ProbeSeq,'C') ~ TRUE,
TRUE ~ FALSE
),
AddressA_ID=case_when( IS_SWAP ~ TMP_ADD_B, TRUE ~ AddressA_ID),
AddressB_ID=case_when( IS_SWAP ~ TMP_ADD_A, TRUE ~ AddressB_ID),
AlleleA_ProbeSeq=case_when( IS_SWAP ~ TMP_SEQ_B, TRUE ~ AlleleA_ProbeSeq ),
AlleleB_ProbeSeq=case_when( IS_SWAP ~ TMP_SEQ_A, TRUE ~ AlleleB_ProbeSeq )
)
}
snp_man_tib <- snp_man_tib %>%
dplyr::mutate(CGN=stringr::str_remove(Name, '_\\.*\\$') ) %>%
dplyr::select(IlmnID, Name, AddressA_ID, AlleleA_ProbeSeq, AddressB_ID, AlleleB_ProbeSeq,
Infinium_Design_Type, Next_Base, Color_Channel, CGN)
snp_swp_tib <- snp_swp_tib %>% tidyr::separate(Seq_ID, into=c('IlmnID', 'diNUC'), sep='_') %>%
dplyr::rename(Forward_Sequence=Sequence, CHR=Chromosome, MAPINFO=Coordinate) %>%
dplyr::mutate(Forward_Sequence=stringr::str_replace(Forward_Sequence,'\\[CG\\]', paste0('[',diNUC,']') ) ) %>%
dplyr::select(-CpG_Island)
snp_man_tib <- snp_man_tib %>% dplyr::inner_join(snp_swp_tib, by="IlmnID") %>%
dplyr::select(IlmnID, Name, AddressA_ID, AlleleA_ProbeSeq, AddressB_ID, AlleleB_ProbeSeq,
Infinium_Design_Type, Next_Base, Color_Channel, Forward_Sequence,
Genome_Build, CHR, MAPINFO, CGN, diNUC)
if (verbose>=vt) cat(glue::glue("[{funcTag}]:{tabsStr} Done.{RET}{RET}"))
snp_man_tib
}
loadManifestCG = function(file, pr='cg', verbose=0,vt=1,tc=1,tabsStr='') {
funcTag <- 'loadManifestCG'
tabsStr <- paste0(rep(TAB, tc), collapse='')
if (verbose>=vt) cat(glue::glue("[{funcTag}]:{tabsStr} Loading manifest(CG)={file}.{RET}"))
cpg_man_tib <- suppressMessages(suppressWarnings(readr::read_csv(file, skip=7) )) %>%
dplyr::filter(stringr::str_starts(IlmnID,pr)) %>%
dplyr::mutate(CGN=stringr::str_remove(Name, '_\\.*\\$'),
diNUC=stringr::str_remove(
stringr::str_replace(Forward_Sequence, '^.*\\[([A-Za-z][A-Za-z])].*$', "\\$1"),'^\\\\') ) %>%
dplyr::select(IlmnID, Name, AddressA_ID, AlleleA_ProbeSeq, AddressB_ID, AlleleB_ProbeSeq,
Infinium_Design_Type, Next_Base, Color_Channel, Forward_Sequence,
Genome_Build, CHR, MAPINFO, CGN, diNUC)
if (verbose>=vt) cat(glue::glue("[{funcTag}]:{tabsStr} Done.{RET}{RET}"))
cpg_man_tib
}
loadManifestCH = function(file, pr='ch', ry='R', verbose=0,vt=1,tc=1,tabsStr='') {
funcTag <- 'loadManifestCH'
tabsStr <- paste0(rep(TAB, tc), collapse='')
if (verbose>=vt) cat(glue::glue("[{funcTag}]:{tabsStr} Loading manifest(CH)={file}.{RET}"))
cph_man_tib <- suppressMessages(suppressWarnings(readr::read_csv(cph_man_csv ))) %>%
dplyr::filter(stringr::str_starts(IlmnID,pr)) %>%
dplyr::mutate(Strand=stringr::str_sub(IlmnID, -1), FR=Strand, CO='O',
TP=case_when( is.na(AlleleB_ProbeSeq) ~ 'II', TRUE ~ 'I' )) %>%
dplyr::mutate(CGN=stringr::str_remove(Name, '_\\.*\\$'),
diNUC=paste0(ry,stringr::str_remove(
stringr::str_replace(Forward_Sequence, '^.*\\[[A-Za-z]([A-Za-z])].*$', "\\$1"),'^\\\\')),
Forward_Sequence=stringr::str_replace(Forward_Sequence, '\\[[A-Za-z][A-Za-z]\\]', paste0('[',diNUC,']')) ) %>%
dplyr::select(IlmnID, Name, AddressA_ID, AlleleA_ProbeSeq, AddressB_ID, AlleleB_ProbeSeq,
Infinium_Design_Type, Next_Base, Color_Channel, Forward_Sequence,
Genome_Build, CHR, MAPINFO, CGN, diNUC)
if (verbose>=vt) cat(glue::glue("[{funcTag}]:{tabsStr} Done.{RET}{RET}"))
cph_man_tib
}
loadManifestYH = function(file, pr='ch', verbose=0,vt=1,tc=1,tabsStr='') {
funcTag <- 'loadManifestCH'
tabsStr <- paste0(rep(TAB, tc), collapse='')
if (verbose>=vt) cat(glue::glue("[{funcTag}]:{tabsStr} Loading manifest(CH)={file}.{RET}"))
cph_man_tib <- suppressMessages(suppressWarnings(readr::read_csv(cph_man_csv ))) %>%
dplyr::filter(stringr::str_starts(IlmnID,pr)) %>%
dplyr::mutate(Strand=stringr::str_sub(IlmnID, -1), FR=Strand, CO='O',
TP=case_when( is.na(AlleleB_ProbeSeq) ~ 'II', TRUE ~ 'I' )) %>%
dplyr::mutate(CGN=stringr::str_remove(Name, '_\\.*\\$'),
diNUC=paste0('Y',stringr::str_remove(
stringr::str_replace(Forward_Sequence, '^.*\\[[A-Za-z]([A-Za-z])].*$', "\\$1"),'^\\\\')),
Forward_Sequence=stringr::str_replace(Forward_Sequence, '\\[[A-Za-z][A-Za-z]\\]', paste0('[',diNUC,']')) ) %>%
dplyr::select(IlmnID, Name, AddressA_ID, AlleleA_ProbeSeq, AddressB_ID, AlleleB_ProbeSeq,
Infinium_Design_Type, Next_Base, Color_Channel, Forward_Sequence,
Genome_Build, CHR, MAPINFO, CGN, diNUC)
if (verbose>=vt) cat(glue::glue("[{funcTag}]:{tabsStr} Done.{RET}{RET}"))
cph_man_tib
}
loadManifestCH_OLD = function(file, pr='ch', verbose=0,vt=1,tc=1,tabsStr='') {
funcTag <- 'loadManifestCH_old'
tabsStr <- paste0(rep(TAB, tc), collapse='')
if (verbose>=vt) cat(glue::glue("[{funcTag}]:{tabsStr} Loading manifest(CH)={file}.{RET}"))
cph_man_tib <- suppressMessages(suppressWarnings(readr::read_csv(cph_man_csv ))) %>%
dplyr::filter(stringr::str_starts(IlmnID,pr)) %>%
dplyr::mutate(Strand=stringr::str_sub(IlmnID, -1), FR=Strand, CO='O',
TP=case_when( is.na(AlleleB_ProbeSeq) ~ 'II', TRUE ~ 'I' )) %>%
dplyr::mutate(CGN=stringr::str_remove(Name, '_\\.*\\$'),
diNUC=stringr::str_remove(
stringr::str_replace(Forward_Sequence, '^.*\\[([A-Za-z][A-Za-z])].*$', "\\$1"),'^\\\\') ) %>%
dplyr::select(IlmnID, Name, AddressA_ID, AlleleA_ProbeSeq, AddressB_ID, AlleleB_ProbeSeq,
Infinium_Design_Type, Next_Base, Color_Channel, Forward_Sequence,
Genome_Build, CHR, MAPINFO, CGN, diNUC)
if (verbose>=vt) cat(glue::glue("[{funcTag}]:{tabsStr} Done.{RET}{RET}"))
cph_man_tib
}
loadImprobeDesign = function(file=NULL, src_des_tib=NULL, verbose=0,vt=1,tc=1) {
funcTag <- 'loadImprobeDesign'
tabsStr <- paste0(rep(TAB, tc), collapse='')
if (verbose>=vt) cat(glue::glue("[{funcTag}]:{tabsStr} Loading improbe={file}.{RET}"))
if (!is.null(file)) src_des_tib <- suppressMessages(suppressWarnings(readr::read_tsv(file)))
stopifnot(!is.null(src_des_tib))
src_des_tib <- src_des_tib %>%
dplyr::rename(PRB1_U=UnMethyl_Probe_Sequence,
PRB1_M=Methyl_Probe_Sequence,
NXB_U=UnMethyl_Next_Base,
NXB_M=Methyl_Next_Base) %>%
dplyr::mutate(Probe_ID=paste(Seq_ID, Methyl_Allele_FR_Strand, stringr::str_sub(Methyl_Allele_TB_Strand,1,1),
Methyl_Allele_CO_Strand, sep='_'),
FR=case_when(Methyl_Allele_FR_Strand=='F' ~ TRUE, Methyl_Allele_FR_Strand=='R' ~ FALSE, TRUE ~ NA),
TB=case_when(Methyl_Allele_TB_Strand=='TOP' ~ TRUE, Methyl_Allele_TB_Strand=='BOT' ~ FALSE, TRUE ~ NA),
CO=case_when(Methyl_Allele_CO_Strand=='C' ~ TRUE, Methyl_Allele_CO_Strand=='O' ~ FALSE, TRUE ~ NA),
diNUC='CG',
NXB_IMP=case_when(NXB_U==NXB_M ~ NXB_U,
TRUE ~ NA_character_),
COL_U=case_when(NXB_U=='A'|NXB_U=='T' ~ FALSE, # Red == FALSE
NXB_U=='C'|NXB_U=='G' ~ TRUE, # Grn == TRUE
TRUE ~ NA),
COL_M=case_when(NXB_M=='A'|NXB_M=='T' ~ FALSE, # Red == FALSE
NXB_M=='C'|NXB_M=='G' ~ TRUE, # Grn == TRUE
TRUE ~ NA),
COL_IMP=case_when(COL_U==COL_M ~ COL_U,
TRUE ~ NA)
) %>%
# Design Score Parameters::
dplyr::rename(
PRB_SCR_U=UnMethyl_Final_Score,
PRB_SCR_M=Methyl_Final_Score,
PRB_SCR_S=Probeset_Score,
TM_RAW_M=Methyl_Tm,
TM_SCR_M=Methyl_Tm_Score,
GC_RAW_M=Methyl_GC_Percent,
GC_SCR_M=Methyl_GC_Score,
KM_RAW_M=Methyl_13mer_Count,
KM_SCR_M=Methyl_13mer_Score,
AD_RAW_M=Methyl_Address_Count,
AD_SCR_M=Methyl_Address_Score,
CM_RAW_M=Methyl_Self_Complementarity,
CM_SCR_M=Methyl_Self_Complementarity_Score,
MO_RAW_M=Methyl_Mono_Run,
MO_SCR_M=Methyl_Mono_Run_Score,
EP_RAW_M=Methyl_Ectopic_Count,
EP_SCR_M=Methyl_Ectopic_Score,
CG_RAW_M=Methyl_Underlying_CpG_Count,
MD_RAW_M=Methyl_Underlying_CpG_Min_Dist,
CG_SCR_M=Methyl_Underlying_CpG_Score,
NB_SCR_M=Methyl_Next_Base_Score,
TM_RAW_U=UnMethyl_Tm,
TM_SCR_U=UnMethyl_Tm_Score,
GC_RAW_U=UnMethyl_GC_Percent,
GC_SCR_U=UnMethyl_GC_Score,
KM_RAW_U=UnMethyl_13mer_Count,
KM_SCR_U=UnMethyl_13mer_Score,
AD_RAW_U=UnMethyl_Address_Count,
AD_SCR_U=UnMethyl_Address_Score,
CM_RAW_U=UnMethyl_Self_Complementarity,
CM_SCR_U=UnMethyl_Self_Complementarity_Score,
MO_RAW_U=UnMethyl_Mono_Run,
MO_SCR_U=UnMethyl_Mono_Run_Score,
EP_RAW_U=UnMethyl_Ectopic_Count,
EP_SCR_U=UnMethyl_Ectopic_Score,
CG_RAW_U=UnMethyl_Underlying_CpG_Count,
MD_RAW_U=UnMethyl_Underlying_CpG_Min_Dist,
CG_SCR_U=UnMethyl_Underlying_CpG_Score,
NB_SCR_U=UnMethyl_Next_Base_Score
) %>%
dplyr::select(Probe_ID, Seq_ID, FR, TB, CO, diNUC, NXB_IMP, COL_IMP, PRB1_U, PRB1_M,
Forward_Sequence,
Genome_Build, Chromosome, Coordinate,
dplyr::contains("_RAW_"), dplyr::contains("_SCR_"))
if (verbose>=vt) cat(glue::glue("[{funcTag}]:{tabsStr} Done.{RET}{RET}"))
src_des_tib
}
# ----- ----- ----- ----- ----- -----|----- ----- ----- ----- ----- ----- #
# Basic Bisulfite Conversion Methods::
# ----- ----- ----- ----- ----- -----|----- ----- ----- ----- ----- ----- #
shearBrac = function(x) {
x %>% stringr::str_remove('\\[') %>% stringr::str_remove('\\]')
}
bscU = function(x, uc=FALSE) {
x <- stringr::str_to_upper(x)
if (uc) x <- tr(x, 'CYSMBHV', 'TTKWKWD')
else x <- tr(x, 'CYSMBHV', 'ttkwkwd')
x
}
bscUs = function(x, uc=FALSE) { bscU(x) }
MAPM = function(x) {
if (length(MAP_M[[x]])==0) return(x)
MAP_M[[x]]
}
bscM = function(x) { stringr::str_replace_all(x, '([CYSMBHV][GRSKBDV])', MAPM) }
bscMs = function(x, uc=FALSE) {
x <- stringr::str_to_upper(x)
x <- lapply(x, bscM) %>% BiocGenerics::unlist()
x <- tr(x, 'CYSMBHV', 'ttkwkwd')
if (uc) x <- stringr::str_to_upper(x)
x
}
MAPD = function(x) {
if (length(MAP_D[[x]])==0) return(x)
MAP_D[[x]]
}
bscD = function(x) { stringr::str_replace_all(x, '([CYSMBHV][GRSKBDV])', MAPD) }
bscDs = function(x, uc=FALSE) {
x <- stringr::str_to_upper(x)
x <- lapply(x, bscD) %>% BiocGenerics::unlist()
x <- tr(x, 'CYSMBHV', 'ttkwkwd')
if (uc) x <- stringr::str_to_upper(x)
x
}
QMAP = function(x, mu) {
if (mu=='U') {
return(QMAP_U[[x]])
} else if (mu=='M') {
return(QMAP_M[[x]])
}
x
}
qmaps = function(x, mu) {
x <- lapply(x, QMAP, mu) %>% BiocGenerics::unlist()
}
cmpIUPAC = function(x) {
if (base::is.element(x, names(IUPAC_EQ) )) return(IUPAC_EQ[[x]])
# if (is.null(IUPAC_EQ[[x]])) return(FALSE)
# if (length(IUPAC_EQ[[x]])==0) return(FALSE)
FALSE
}
cmpIUPACs = function(x) {
x <- lapply(x, cmpIUPAC) %>% BiocGenerics::unlist()
}
# ----- ----- ----- ----- ----- -----|----- ----- ----- ----- ----- ----- #
# MisMatch Probe Comparison Methods::
# ----- ----- ----- ----- ----- -----|----- ----- ----- ----- ----- ----- #
cmpInfIMU_MisMatch = function(tib, fieldA, fieldB, mu, del='_',
verbose=0,vt=4,tc=1,tabsStr='') {
funcTag <- 'cmpInfIMU_MisMatch'
tabsStr <- paste0(rep(TAB, tc), collapse='')
if (verbose>=vt) cat(glue::glue("[{funcTag}]: fieldA={fieldA}, fieldB={fieldB} mu={mu}{RET}"))
fieldA <- rlang::sym(fieldA)
fieldB <- rlang::sym(fieldB)
tib <- tib %>%
dplyr::mutate(
BOD_NumMM=mapply(
adist,
stringr::str_sub(stringr::str_to_upper(!!fieldA),1,stringr::str_length(!!fieldA)-1),
stringr::str_sub(stringr::str_to_upper(!!fieldB),1,stringr::str_length(!!fieldB)-1) ),
DI_NUC_AB=paste0(
stringr::str_to_upper(stringr::str_sub(!!fieldA,stringr::str_length(!!fieldA),stringr::str_length(!!fieldA)) ),
stringr::str_to_upper(stringr::str_sub(!!fieldB,stringr::str_length(!!fieldB),stringr::str_length(!!fieldB)) )
),
TAR_EQU=cmpIUPACs(DI_NUC_AB)
) %>%
dplyr::rename(!!paste('BOD_NumMM',mu, sep=del):=BOD_NumMM,
!!paste('TAR_EQU', mu, sep=del):=TAR_EQU)
tib
}
cmpInfI_MisMatch = function(tib, fieldAU, fieldBU, fieldAM, fieldBM, del='_',
verbose=0,vt=4,tc=1,tabsStr='') {
funcTag <- 'cmpInfI_MisMatch'
tabsStr <- paste0(rep(TAB, tc), collapse='')
if (verbose>=vt) cat(glue::glue("[{funcTag}]: fieldAU={fieldAU}, fieldBU={fieldBU}{RET}"))
if (verbose>=vt) cat(glue::glue("[{funcTag}]: fieldAM={fieldAM}, fieldBM={fieldBM}{RET}"))
tib <- tib %>% cmpInfIMU_MisMatch(fieldAU, fieldBU, mu='U', del=del,verbose=verbose, vt=vt+1,tc=tc+1)
tib <- tib %>% cmpInfIMU_MisMatch(fieldAM, fieldBM, mu='M', del=del,verbose=verbose, vt=vt+1,tc=tc+1)
tib <- tib %>% dplyr::mutate(
Man_MisMatch=(BOD_NumMM_U+BOD_NumMM_M)/2, #, na.rm=TRUE),
Man_TarMatch=case_when(TAR_EQU_U & TAR_EQU_M ~ TRUE, TRUE ~ FALSE) ) %>%
dplyr::select(-c(BOD_NumMM_U,BOD_NumMM_M,TAR_EQU_U,TAR_EQU_M))
tib
}
cmpInfII_MisMatch = function(tib, fieldA, fieldB, mu='D', del='_',
verbose=0,vt=4,tc=1,tabsStr='') {
funcTag <- 'cmpInfI'
tabsStr <- paste0(rep(TAB, tc), collapse='')
fieldA <- rlang::sym(fieldA)
fieldB <- rlang::sym(fieldB)
tib <- tib %>% cmpInfIMU_MisMatch(fieldA, fieldB, mu='D', del=del,verbose=verbose, vt=vt+1,tc=tc+1) %>%
dplyr::rename(
Man_MisMatch=BOD_NumMM_D,
Man_TarMatch=TAR_EQU_D)
# dplyr::select(-c(BOD_NumMM_U,BOD_NumMM_M,TAR_EQU_U,TAR_EQU_M))
tib
}
# ----- ----- ----- ----- ----- -----|----- ----- ----- ----- ----- ----- #
# Exact Probe Comparison Methods::
# ----- ----- ----- ----- ----- -----|----- ----- ----- ----- ----- ----- #
cmpInfIMU= function(tib, fieldA, fieldB, mu, del='_',
verbose=0,vt=4,tc=1,tabsStr='') {
funcTag <- 'cmpInfMU'
tabsStr <- paste0(rep(TAB, tc), collapse='')
if (verbose>=vt) cat(glue::glue("[{funcTag}]: fieldA={fieldA}, fieldB={fieldB} mu={mu}{RET}"))
fieldA <- rlang::sym(fieldA)
fieldB <- rlang::sym(fieldB)
tib <- tib %>%
dplyr::mutate(
SUB_SEQ_A=stringr::str_to_upper(stringr::str_sub(!!fieldA,1,stringr::str_length(!!fieldA)-1)),
SUB_SEQ_B=stringr::str_to_upper(stringr::str_sub(!!fieldB,1,stringr::str_length(!!fieldB)-1)),
DI_NUC_AB=paste0(
stringr::str_to_upper(stringr::str_sub(!!fieldA,stringr::str_length(!!fieldA),stringr::str_length(!!fieldA)) ),
stringr::str_to_upper(stringr::str_sub(!!fieldB,stringr::str_length(!!fieldB),stringr::str_length(!!fieldB)) )
),
BOD_EQU=case_when(SUB_SEQ_A==SUB_SEQ_B ~ TRUE, TRUE ~ FALSE),
TAR_EQU=cmpIUPACs(DI_NUC_AB),
Inf1_Match=case_when(BOD_EQU & BOD_EQU==TAR_EQU ~ TRUE, TRUE ~ FALSE)
) %>%
dplyr::select(-c(SUB_SEQ_A,SUB_SEQ_B,BOD_EQU,DI_NUC_AB,TAR_EQU)) %>%
dplyr::rename(!!paste('Inf1_Match',mu, sep=del):=Inf1_Match)
if (verbose>=vt+1) print(tib)
tib
}
cmpInfI = function(tib, fieldAU, fieldBU, fieldAM, fieldBM, del='_',
verbose=0,vt=4,tc=1,tabsStr='') {
funcTag <- 'cmpInfI'
tabsStr <- paste0(rep(TAB, tc), collapse='')
if (verbose>=vt) cat(glue::glue("[{funcTag}]: fieldAU={fieldAU}, fieldBU={fieldBU}{RET}"))
if (verbose>=vt) cat(glue::glue("[{funcTag}]: fieldAM={fieldAM}, fieldBM={fieldBM}{RET}"))
tib <- tib %>% cmpInfIMU(fieldAU, fieldBU, mu='U', del=del,verbose=verbose, vt=vt+1,tc=tc+1)
tib <- tib %>% cmpInfIMU(fieldAM, fieldBM, mu='M', del=del,verbose=verbose, vt=vt+1,tc=tc+1)
tib <- tib %>%
dplyr::mutate(Man_Match=case_when(Inf1_Match_U & Inf1_Match_M ~ TRUE, TRUE ~ FALSE) ) %>%
dplyr::select(-c(Inf1_Match_U,Inf1_Match_M))
tib
}
cmpInfII = function(tib, fieldA, fieldB, mu='D', del='_',
verbose=0,vt=4,tc=1,tabsStr='') {
funcTag <- 'cmpInfI'
tabsStr <- paste0(rep(TAB, tc), collapse='')
fieldA <- rlang::sym(fieldA)
fieldB <- rlang::sym(fieldB)
tib <- tib %>%
dplyr::mutate(Man_Match=stringr::str_to_upper(!!fieldA)==stringr::str_to_upper(!!fieldB) )
tib
}
# ----- ----- ----- ----- ----- -----|----- ----- ----- ----- ----- ----- #
# Basic Reverse/Complement Methods::
# ----- ----- ----- ----- ----- -----|----- ----- ----- ----- ----- ----- #
revCmp = function(x) {
Biostrings::reverse(x) %>% cmpl()
}
cmpl = function(x) {
tr(x, 'ACTGRYSWKMBDHVactgryswkmbdhv','TGACYRSWMKVHDBtgacyrswmkvhdb')
# x <- tr(x, 'ACTGRYSWKMBDHV','TGACYRSWMKVHDB')
# x <- tr(x, 'actgryswkmbdhv','tgacyrswmkvhdb')
# x
}
tr = function(x, old, new) {
Biostrings::chartr(old, new, x)
}
# mapA = function(x) {
# if (length(MAP_A[[x]])==0) return(x)
# MAP_A[[x]]
# }
# INIT_MAP_A = function() {
# MAP_A <- NULL
# MAP_A[['AA']] <- 'aa'
# MAP_A[['Aa']] <- 'aa'
# MAP_A[['aA']] <- 'aa'
# MAP_A[['aa']] <- 'aa'
#
# MAP_A
# }
# MAP_A <- INIT_MAP_A()
# Data generated with:: /Users/bbarnes/Documents/Projects/scripts/mapMD.pl
INIT_MAP_M = function() {
MAP <- NULL
MAP[['CG']] <- 'cG'
MAP[['CR']] <- 'cR'
MAP[['CS']] <- 'cS'
MAP[['CK']] <- 'cK'
MAP[['CB']] <- 'cB'
MAP[['CD']] <- 'cD'
MAP[['CV']] <- 'cV'
MAP[['YG']] <- 'yG'
MAP[['YR']] <- 'yR'
MAP[['YS']] <- 'yS'
MAP[['YK']] <- 'yK'
MAP[['YB']] <- 'yB'
MAP[['YD']] <- 'yD'
MAP[['YV']] <- 'yV'
MAP[['SG']] <- 'sG'
MAP[['SR']] <- 'sR'
MAP[['SS']] <- 'sS'
MAP[['SK']] <- 'sK'
MAP[['SB']] <- 'sB'
MAP[['SD']] <- 'sD'
MAP[['SV']] <- 'sV'
MAP[['MG']] <- 'mG'
MAP[['MR']] <- 'mR'
MAP[['MS']] <- 'mS'
MAP[['MK']] <- 'mK'
MAP[['MB']] <- 'mB'
MAP[['MD']] <- 'mD'
MAP[['MV']] <- 'mV'
MAP[['BG']] <- 'bG'
MAP[['BR']] <- 'bR'
MAP[['BS']] <- 'bS'
MAP[['BK']] <- 'bK'
MAP[['BB']] <- 'bB'
MAP[['BD']] <- 'bD'
MAP[['BV']] <- 'bV'
MAP[['HG']] <- 'hG'
MAP[['HR']] <- 'hR'
MAP[['HS']] <- 'hS'
MAP[['HK']] <- 'hK'
MAP[['HB']] <- 'hB'
MAP[['HD']] <- 'hD'
MAP[['HV']] <- 'hV'
MAP[['VG']] <- 'vG'
MAP[['VR']] <- 'vR'
MAP[['VS']] <- 'vS'
MAP[['VK']] <- 'vK'
MAP[['VB']] <- 'vB'
MAP[['VD']] <- 'vD'
MAP[['VV']] <- 'vV'
MAP
}
MAP_M <- INIT_MAP_M()
INIT_MAP_D = function() {
MAP <- NULL
MAP[['CG']] <- 'yG'
MAP[['CR']] <- 'yR'
MAP[['CS']] <- 'yS'
MAP[['CK']] <- 'yK'
MAP[['CB']] <- 'yB'
MAP[['CD']] <- 'yD'
MAP[['CV']] <- 'yV'
MAP[['YG']] <- 'yG'
MAP[['YR']] <- 'yR'
MAP[['YS']] <- 'yS'
MAP[['YK']] <- 'yK'
MAP[['YB']] <- 'yB'
MAP[['YD']] <- 'yD'
MAP[['YV']] <- 'yV'
MAP[['SG']] <- 'bG'
MAP[['SR']] <- 'bR'
MAP[['SS']] <- 'bS'
MAP[['SK']] <- 'bK'
MAP[['SB']] <- 'bB'
MAP[['SD']] <- 'bD'
MAP[['SV']] <- 'bV'
MAP[['MG']] <- 'hG'
MAP[['MR']] <- 'hR'
MAP[['MS']] <- 'hS'
MAP[['MK']] <- 'hK'
MAP[['MB']] <- 'hB'
MAP[['MD']] <- 'hD'
MAP[['MV']] <- 'hV'
MAP[['BG']] <- 'bG'
MAP[['BR']] <- 'bR'
MAP[['BS']] <- 'bS'
MAP[['BK']] <- 'bK'
MAP[['BB']] <- 'bB'
MAP[['BD']] <- 'bD'
MAP[['BV']] <- 'bV'
MAP[['HG']] <- 'hG'
MAP[['HR']] <- 'hR'
MAP[['HS']] <- 'hS'
MAP[['HK']] <- 'hK'
MAP[['HB']] <- 'hB'
MAP[['HD']] <- 'hD'
MAP[['HV']] <- 'hV'
MAP[['VG']] <- 'nG'
MAP[['VR']] <- 'nR'
MAP[['VS']] <- 'nS'
MAP[['VK']] <- 'nK'
MAP[['VB']] <- 'nB'
MAP[['VD']] <- 'nD'
MAP[['VV']] <- 'nV'
MAP
}
MAP_D <- INIT_MAP_D()
INIT_QMAP_U = function() {
MAP <- NULL
# Upper Case::
MAP[['A']] <- 'A'
MAP[['C']] <- 'C' # '-'
MAP[['G']] <- 'G' # '-'
MAP[['T']] <- 'T'
MAP[['R']] <- 'A'
MAP[['Y']] <- 'T'
MAP[['S']] <- 'S' # '-'
MAP[['W']] <- 'W' # '-'
MAP[['K']] <- 'T'
MAP[['M']] <- 'A'
MAP[['B']] <- 'T'
MAP[['D']] <- 'W'
MAP[['H']] <- 'W'
MAP[['V']] <- 'A'
MAP[['N']] <- 'W'
# Lower Case::
MAP[['a']] <- 'a'
MAP[['c']] <- 'c' # '-'
MAP[['g']] <- 'g' # '-'
MAP[['t']] <- 't'
MAP[['r']] <- 'a'
MAP[['y']] <- 't'
MAP[['s']] <- 's' # '-'
MAP[['w']] <- 'w' # '-'
MAP[['k']] <- 't'
MAP[['m']] <- 'a'
MAP[['b']] <- 't'
MAP[['d']] <- 'w'
MAP[['h']] <- 'w'
MAP[['v']] <- 'a'
MAP[['n']] <- 'w'
MAP
}
QMAP_U <- INIT_QMAP_U()
INIT_QMAP_M = function() {
MAP <- NULL
# Upper Case::
MAP[['A']] <- 'A' # '-'
MAP[['C']] <- 'C'
MAP[['G']] <- 'G'
MAP[['T']] <- 'T' # '-'
MAP[['R']] <- 'G'
MAP[['Y']] <- 'C'
MAP[['S']] <- 'S' # '-'
MAP[['W']] <- 'W' # '-'
MAP[['K']] <- 'G'
MAP[['M']] <- 'C'
MAP[['B']] <- 'S'
MAP[['D']] <- 'G'
MAP[['H']] <- 'C'
MAP[['V']] <- 'S'
MAP[['N']] <- 'S'
# Lower Case::
MAP[['a']] <- 'a' # '-'
MAP[['c']] <- 'c'
MAP[['g']] <- 'g'
MAP[['t']] <- 't' # '-'
MAP[['r']] <- 'g'
MAP[['y']] <- 'c'
MAP[['s']] <- 's' # '-'
MAP[['w']] <- 'w' # '-'
MAP[['k']] <- 'g'
MAP[['m']] <- 'c'
MAP[['b']] <- 's'
MAP[['d']] <- 'g'
MAP[['h']] <- 'c'
MAP[['v']] <- 's'
MAP[['n']] <- 's'
MAP
}
QMAP_M <- INIT_QMAP_M()
INIT_IUPAC_EQ = function() {
MAP <- NULL
MAP[['AA']] <- TRUE
MAP[['AD']] <- TRUE
MAP[['AH']] <- TRUE
MAP[['AM']] <- TRUE
MAP[['AN']] <- TRUE
MAP[['AR']] <- TRUE
MAP[['AV']] <- TRUE
MAP[['AW']] <- TRUE
MAP[['BB']] <- TRUE
MAP[['BC']] <- TRUE
MAP[['BG']] <- TRUE
MAP[['BK']] <- TRUE
MAP[['BN']] <- TRUE
MAP[['BS']] <- TRUE
MAP[['BT']] <- TRUE
MAP[['BY']] <- TRUE
MAP[['CB']] <- TRUE
MAP[['CC']] <- TRUE
MAP[['CH']] <- TRUE
MAP[['CM']] <- TRUE
MAP[['CN']] <- TRUE
MAP[['CS']] <- TRUE
MAP[['CV']] <- TRUE
MAP[['CY']] <- TRUE
MAP[['DA']] <- TRUE
MAP[['DD']] <- TRUE
MAP[['DG']] <- TRUE
MAP[['DK']] <- TRUE
MAP[['DN']] <- TRUE
MAP[['DR']] <- TRUE
MAP[['DT']] <- TRUE
MAP[['DW']] <- TRUE
MAP[['GB']] <- TRUE
MAP[['GD']] <- TRUE
MAP[['GG']] <- TRUE
MAP[['GK']] <- TRUE
MAP[['GN']] <- TRUE
MAP[['GR']] <- TRUE
MAP[['GS']] <- TRUE
MAP[['GV']] <- TRUE
MAP[['HA']] <- TRUE
MAP[['HC']] <- TRUE
MAP[['HH']] <- TRUE
MAP[['HM']] <- TRUE
MAP[['HN']] <- TRUE
MAP[['HT']] <- TRUE
MAP[['HW']] <- TRUE
MAP[['HY']] <- TRUE
MAP[['KB']] <- TRUE
MAP[['KD']] <- TRUE
MAP[['KG']] <- TRUE
MAP[['KK']] <- TRUE
MAP[['KN']] <- TRUE
MAP[['KT']] <- TRUE
MAP[['MA']] <- TRUE
MAP[['MC']] <- TRUE
MAP[['MH']] <- TRUE
MAP[['MM']] <- TRUE
MAP[['MN']] <- TRUE
MAP[['MV']] <- TRUE
MAP[['NA']] <- TRUE
MAP[['NB']] <- TRUE
MAP[['NC']] <- TRUE
MAP[['ND']] <- TRUE
MAP[['NG']] <- TRUE
MAP[['NH']] <- TRUE
MAP[['NK']] <- TRUE
MAP[['NM']] <- TRUE
MAP[['NN']] <- TRUE
MAP[['NR']] <- TRUE
MAP[['NS']] <- TRUE
MAP[['NT']] <- TRUE
MAP[['NV']] <- TRUE
MAP[['NW']] <- TRUE
MAP[['NY']] <- TRUE
MAP[['RA']] <- TRUE
MAP[['RD']] <- TRUE
MAP[['RG']] <- TRUE
MAP[['RN']] <- TRUE
MAP[['RR']] <- TRUE
MAP[['RV']] <- TRUE
MAP[['SB']] <- TRUE
MAP[['SC']] <- TRUE
MAP[['SG']] <- TRUE
MAP[['SN']] <- TRUE
MAP[['SS']] <- TRUE
MAP[['SV']] <- TRUE
MAP[['TB']] <- TRUE
MAP[['TD']] <- TRUE
MAP[['TH']] <- TRUE
MAP[['TK']] <- TRUE
MAP[['TN']] <- TRUE
MAP[['TT']] <- TRUE
MAP[['TW']] <- TRUE
MAP[['TY']] <- TRUE
MAP[['VA']] <- TRUE
MAP[['VC']] <- TRUE
MAP[['VG']] <- TRUE
MAP[['VM']] <- TRUE
MAP[['VN']] <- TRUE
MAP[['VR']] <- TRUE
MAP[['VS']] <- TRUE
MAP[['VV']] <- TRUE
MAP[['WA']] <- TRUE
MAP[['WD']] <- TRUE
MAP[['WH']] <- TRUE
MAP[['WN']] <- TRUE
MAP[['WT']] <- TRUE
MAP[['WW']] <- TRUE
MAP[['YB']] <- TRUE
MAP[['YC']] <- TRUE
MAP[['YH']] <- TRUE
MAP[['YN']] <- TRUE
MAP[['YT']] <- TRUE
MAP[['YY']] <- TRUE
MAP
}
IUPAC_EQ <- INIT_IUPAC_EQ()
# End of file
|
69e6b902f57eaadcb0f2afd65cd9ac25d948a743
|
7f35d0df4d0552f1a8669f81add1aacc00b5bd1f
|
/man/DammDigit.Rd
|
1eb9295828d23ed998d644e046fd7c69603d550a
|
[] |
no_license
|
Dasonk/Damm
|
31d5428ab65c296e189fe0a91f75a9390d190bcb
|
b38ed2a47cd011c191ebe5f3954491dd3c552b9f
|
refs/heads/master
| 2021-01-18T11:15:45.337206
| 2013-03-25T04:31:40
| 2013-03-25T04:31:40
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 332
|
rd
|
DammDigit.Rd
|
\name{DammDigit}
\alias{DammDigit}
\title{Get Damm check digit}
\usage{
DammDigit(x)
}
\arguments{
\item{x}{Number that you want the check digit for.}
}
\description{
Get Damm check digit
}
\examples{
x <- 12345
DammDigit(x)
# so we need to add 9 to the end of the number
x <- 123459
DammDigit(x)
# The 0 tells us it works
}
|
d4637836a97b801ab7dc4b080441e0300e83c156
|
ff6df032ed4f8089c46db34bb3aaabf6b3a0fc66
|
/R/zzz.R
|
cc02a2a79e1331ddd9d7e2a470da8f135cc79cd2
|
[] |
no_license
|
chuanboguo/arrowtooth
|
57564591c1db9faa997fcf2d5dd757e334d6bc66
|
ba0afc6983f2b944d522f89c5800cda94c6f88e3
|
refs/heads/master
| 2022-11-10T17:02:21.738605
| 2020-06-29T18:32:26
| 2020-06-29T18:32:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 451
|
r
|
zzz.R
|
globalVariables(c(
"coef",
"biomass",
"vcov",
"age",
"rsum",
"unique_models_dirs",
"unique_models_dirs_full",
"nongit_dir",
"landed_kg",
"discarded_kg",
"survey_abbrev",
"ct",
"nsamp",
"param_name",
"base_model_dir_full",
"Estimate",
"Std. Error",
"base_model_dir",
"cv",
"cv_lower",
"cv_upper",
"estimate",
"models_dir",
"n",
"sample_id",
"se",
"sens_models_dirs",
"unique_models",
"year"
))
|
db338cd3654f9a220075ea1f0c4ac3ae33dab208
|
4459eb5432916b4ad6c5c5d911b50c9d2fec1ad5
|
/man/Modigliani.Rd
|
f505a692e498615ce803ec1419e1fa8724bdd26b
|
[] |
no_license
|
braverock/PerformanceAnalytics
|
057af55b0a4ddeb4befcc02e36a85f582406b95c
|
49a93f1ed6e2e159b63bf346672575f3634ed370
|
refs/heads/master
| 2023-08-03T10:18:27.115592
| 2023-03-29T09:23:17
| 2023-03-29T09:23:17
| 58,736,268
| 209
| 113
| null | 2023-05-23T17:46:08
| 2016-05-13T12:02:42
|
R
|
UTF-8
|
R
| false
| true
| 1,803
|
rd
|
Modigliani.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Modigliani.R
\name{Modigliani}
\alias{Modigliani}
\title{Modigliani-Modigliani measure}
\usage{
Modigliani(Ra, Rb, Rf = 0, ...)
}
\arguments{
\item{Ra}{an xts, vector, matrix, data frame, timeSeries or zoo object of
asset returns}
\item{Rb}{return vector of the benchmark asset}
\item{Rf}{risk free rate, in same period as your returns}
\item{\dots}{any other passthrough parameters}
}
\description{
The Modigliani-Modigliani measure is the portfolio return adjusted upward
or downward to match the benchmark's standard deviation. This puts the
portfolio return and the benchmark return on 'equal footing' from a standard
deviation perspective.
\deqn{MM_{p}=\frac{E[R_{p} - R_{f}]}{\sigma_{p}}=SR_{p} * \sigma_{b} +
E[R_{f}]}{MMp = SRp * sigmab + E[Rf]}
where \eqn{SR_{p}}{SRp} - Sharpe ratio, \eqn{\sigma_{b}}{sigmab} - benchmark
standard deviation
}
\details{
This is also analogous to some approaches to 'risk parity' portfolios, which
use (presumably costless) leverage to increase the portfolio standard
deviation to some target.
}
\examples{
data(managers)
Modigliani(managers[,1,drop=FALSE], managers[,8,drop=FALSE], Rf=.035/12)
Modigliani(managers[,1:6], managers[,8,drop=FALSE], managers[,8,drop=FALSE])
Modigliani(managers[,1:6], managers[,8:7], managers[,8,drop=FALSE])
}
\references{
J. Christopherson, D. Carino, W. Ferson. \emph{Portfolio
Performance Measurement and Benchmarking}. 2009. McGraw-Hill, p. 97-99. \cr
Franco Modigliani and Leah Modigliani, "Risk-Adjusted Performance: How to
Measure It and Why," \emph{Journal of Portfolio Management}, vol.23, no.,
Winter 1997, pp.45-54 \cr
}
\seealso{
\code{\link{SharpeRatio}}, \code{\link{TreynorRatio}}
}
\author{
Andrii Babii, Brian G. Peterson
}
|
c60fba5d4954160bb937a38e7767aba4cf3ea287
|
3139a2d91d491000ab7f90e253f501bb5aed0df9
|
/HW15/hw15.R
|
6de05b64c210291f1d4587e31917e8e2b156f0d4
|
[] |
no_license
|
duddlf23/CS564
|
1bcb236d5c13794364aa88779e6567682d11f968
|
ae8f5de93b206eb3f5d5abefc9919b1873a38123
|
refs/heads/master
| 2020-08-26T13:01:14.102155
| 2019-10-23T09:33:07
| 2019-10-23T09:33:07
| 217,018,586
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 422
|
r
|
hw15.R
|
#1.
data("attitude")
str(attitude)
library(rela)
paf_dat <- paf(as.matrix(attitude))
paf_dat$KMO
library(psych)
scree(attitude, factors = F, pc = T)
pca <- principal(attitude, nfactors = 2, rotate = 'none')
pca
fa.diagram(pca)
#2.
pca <- principal(USArrests, nfactors = 2, rotate = 'none')
pca
biplot.psych(pca, col = c('black', 'red'), cex = c(0.5, 1), arrow.len = 0.08, main = NULL, labels = rownames(USArrests))
|
70e3e489849223bf52c17e89d0d89c955b44622c
|
fed82a720cd8e651b517fafdd11bb1a7fd470d7a
|
/scripts/theme.R
|
6450c40a6fd8d0b04ccf91aa2e65c328a247af89
|
[
"MIT"
] |
permissive
|
The-Human-Protein-Atlas/HPA-SingleCellType
|
d68605420958fb800c5cebac8dfebbd1070ded5c
|
6788a4c1e80a586a823d2e8f76b724e4958acd7c
|
refs/heads/main
| 2023-03-17T03:22:50.545142
| 2021-03-08T15:24:42
| 2021-03-08T15:24:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 28,433
|
r
|
theme.R
|
library(viridis)
# ----- colors & factor levels -----
tissue_colors <-
c('abdominal adipose tissue' = '#A7DACD',
'subcutaneous adipose tissue' = '#A7DACD',
'adrenal gland' = '#7F6A9C',
'aorta' = '#A7DACD',
'cecum' = '#1280C4',
'bone marrow' = '#A1A8AA',
'breast' = '#F8BDD7',
'bronchus' = '#6AA692',
'bulbourethral gland' = '#95D4F5',
'ear cartilage' = '#A7DACD',
'joint cartilage' = '#A7DACD',
'cervix' = '#F8BDD7',
'colon' = '#1280C4',
'duodenum' = '#1280C4',
'endometrium' = '#F8BDD7',
'epididymis' = '#95D4F5',
'esophagus' = '#FBDAD9',
'fallopian tube' = '#F8BDD7',
'gallbladder' = '#D1CBE5',
"Heart muscle" = '#DE6C7D',
'heart atrium' = '#DE6C7D',
'heart valva' = '#DE6C7D',
'heart wall' = '#DE6C7D',
'ileum' = '#1280C4',
'jejunum' = '#1280C4',
'synovial tissue' = '#A7DACD',
'kidney cortex' = '#F9A266',
'kidney medulla' = '#F9A266',
'larynx' = '#6AA692',
'liver' = '#D1CBE5',
'lung' = '#6AA692',
'lymph node' = '#A1A8AA',
'urethral gland' = '#95D4F5',
'nasopharynx epithelium' = '#6AA692',
'olfactory epithelium' = '#6AA692',
'oral mucosa' = '#FBDAD9',
'ovary' = '#F8BDD7',
'pancreas' = '#96C08E',
'penis' = '#95D4F5',
'peritoneum' = '#A7DACD',
'pleura' = '#A7DACD',
'prostate' = '#95D4F5',
'rectum' = '#1280C4',
'salivary gland' = '#FBDAD9',
'seminal vesicle' = '#95D4F5',
'skin (groin)' = '#FCCAB3',
'skin (back)' = '#FCCAB3',
'lip' = '#FBDAD9',
'skeletal muscle' = '#DE6C7D',
'smooth muscle (intestine)' = '#DE6C7D',
'smooth muscle (uterus)' = '#DE6C7D',
'spleen' = '#A1A8AA',
'stomach lower' = '#1280C4',
'stomach upper' = '#1280C4',
'tongue' = '#FBDAD9',
'facial adipose' = '#A7DACD',
'testis' = '#95D4F5',
'thymus' = '#A1A8AA',
'thyroid gland' = '#7F6A9C',
'tonsil' = '#A1A8AA',
'trachea' = '#6AA692',
'urinary bladder' = '#F9A266',
'vagus nerve' = '#A7DACD',
'ductus deferens' = '#95D4F5',
'vein' = '#A7DACD',
'cingulate cortex' = '#53977F',
'occipital cortex' = '#53977F',
'insular cortex' = '#53977F',
'motor cortex' = '#53977F',
'prefrontal cortex' = '#53977F',
'somatosensory cortex' = '#53977F',
'temporal cortex' = '#53977F',
'retrosplenial cortex' = '#53977F',
'olfactory bulb' = '#99D0C1',
'dorsal hippocampus' = '#B4CE52',
'ventral hippocampus' = '#B4CE52',
'entorhinal cortex' = '#B4CE52',
'subiculum' = '#B4CE52',
'amygdala' = '#82B579',
'caudate' = '#53C2EA',
'putamen' = '#53C2EA',
'septum' = '#53C2EA',
'ventral pallidum' = '#53C2EA',
'thalamus' = '#EA9586',
'hypothalamus' = '#E6351D',
'substantia nigra' = '#B67AB3',
'periaqueductal gray' = '#B67AB3',
'midbrain' = '#B67AB3',
'medulla' = '#EA5699',
'pons' = '#EA5699',
'cerebellum' = '#FDCA43',
'corpus callosum' = '#878786',
'dorsal spinal cord' = '#008BCC',
'ventral spinal cord' = '#008BCC',
'superiour colliculi' = '#B67AB3',
'pituitary gland' = '#7F6A9C',
'retina' = '#FFEF78',
'eye' = '#FFEF78',
'cornea' = '#FFEF78',
'lens' = '#FFEF78',
'choroid plexus' = '#A7DACD',
'dura mater' = '#A7DACD',
'pineal gland' = '#7F6A9C',
'parathyroid gland' = '#7F6A9C',
'placenta' = '#F8BDD7',
'vagina' = '#F8BDD7',
'intermediate monocyte' = '#E64B35',
'non-classical monocyte' = '#E64B35',
'classical monocyte' = '#E64B35',
'neutrophil' = '#F39B7F',
'basophil' = '#F39B7F',
'eosinophil' = '#F39B7F',
'T-reg' = '#7D8DAF',
'MAIT T-cell' = '#7D8DAF',
'memory CD4 T-cell' = '#7D8DAF',
'naive CD4 T-cell' = '#7D8DAF',
'memory CD8 T-cell' = '#7D8DAF',
'naive CD8 T-cell' = '#7D8DAF',
'memory B-cell' = '#66287F',
'naive B-cell' = '#66287F',
'NK-cell' = '#AD1D78',
'gdTCR' = '#66287F',
'myeloid DC' = '#199985',
'plasmacytoid DC' = '#199985',
'total PBMC' = '#b30000',
'cerebral cortex' = '#53977F',
'olfactory region' = '#99D0C1',
'hippocampal formation' = '#B4CE52',
'basal ganglia' = '#53C2EA',
'pons and medulla' = '#EA5699',
'spinal cord' = '#008BCC',
'pitutiary' = '#7F6A9C',
'monocytes' = '#E64B35',
'granulocytes' = '#F39B7F',
'T-cells' = '#7D8DAF',
'B-cells' = '#66287F',
'NK-cells' = '#AD1D78',
'dendritic cells' = '#199985',
'blood' = '#b30000',
'adipose tissue' = '#A7DACD',
'large intestine' = '#1280C4',
'cartilage' = '#A7DACD',
'small intestine' = '#1280C4',
'heart' = '#DE6C7D',
'kidney' = '#F9A266',
'upper respiratory system' = '#6AA692',
'lymphoid tissue' = '#A1A8AA',
'mouth' = '#FBDAD9',
'mesothelial tissue' = '#A7DACD',
'skin' = '#FCCAB3',
'smooth muscle' = '#DE6C7D',
'stomach' = '#1280C4',
'brain' = '#FFDD00',
'Adipose & soft tissue' = '#A7DACD',
'Endocrine tissues' = '#7F6A9C',
'Gastrointestinal tract' = '#1280C4',
'Bone marrow & immune system' = '#A1A8AA',
'Breast and female reproductive system' = '#F8BDD7',
'Respiratory system' = '#6AA693',
'Male reproductive system' = '#95D4F5',
'Proximal digestive tract' = '#FBDAD9',
'Liver & gallbladder' = '#D1CBE5',
'Muscle tissues' = '#DE6C7D',
'Kidney & urinary bladder' = '#F9A266',
'Respiratory system' = '#6AA692',
'Pancreas' = '#96C08E',
'Skin' = '#FCCAB3',
'Brain' = '#FFDD00',
'Eye' = '#FFEF78',
'Blood' = '#b30000',
'Abdominal adipose tissue' = '#A7DACD',
'Subcutaneous adipose tissue' = '#A7DACD',
'Adrenal gland' = '#7F6A9C',
'Aorta' = '#A7DACD',
'Cecum' = '#1280C4',
'Bone marrow' = '#A1A8AA',
'Breast' = '#F8BDD7',
'Bronchus' = '#6AA692',
'Bulbourethral gland' = '#95D4F5',
'Ear cartilage' = '#A7DACD',
'Joint cartilage' = '#A7DACD',
'Cervix' = '#F8BDD7',
'Colon' = '#1280C4',
'Duodenum' = '#1280C4',
'Endometrium' = '#F8BDD7',
'Epididymis' = '#95D4F5',
'Esophagus' = '#FBDAD9',
'Fallopian tube' = '#F8BDD7',
'Gallbladder' = '#D1CBE5',
'Heart atrium' = '#DE6C7D',
'Heart valva' = '#DE6C7D',
'Heart wall' = '#DE6C7D',
'Ileum' = '#1280C4',
'Jejunum' = '#1280C4',
'Synovial tissue' = '#A7DACD',
'Kidney cortex' = '#F9A266',
'Kidney medulla' = '#F9A266',
'Larynx' = '#6AA692',
'Liver' = '#D1CBE5',
'Lung' = '#6AA692',
'Lymph node' = '#A1A8AA',
'Urethral gland' = '#95D4F5',
'Nasopharynx epithelium' = '#6AA692',
'Olfactory epithelium' = '#6AA692',
'Oral mucosa' = '#FBDAD9',
'Ovary' = '#F8BDD7',
'Pancreas' = '#96C08E',
'Penis' = '#95D4F5',
'Peritoneum' = '#A7DACD',
'Pleura' = '#A7DACD',
'Prostate' = '#95D4F5',
'Rectum' = '#1280C4',
'Salivary gland' = '#FBDAD9',
'Seminal vesicle' = '#95D4F5',
'Skin (groin)' = '#FCCAB3',
'Skin (back)' = '#FCCAB3',
'Lip' = '#FBDAD9',
'Skeletal muscle' = '#DE6C7D',
'Smooth muscle (intestine)' = '#DE6C7D',
'Smooth muscle (uterus)' = '#DE6C7D',
'Spleen' = '#A1A8AA',
'Stomach lower' = '#1280C4',
'Stomach upper' = '#1280C4',
'Tongue' = '#FBDAD9',
'Facial adipose' = '#A7DACD',
'Testis' = '#95D4F5',
'Thymus' = '#A1A8AA',
'Thyroid gland' = '#7F6A9C',
'Tonsil' = '#A1A8AA',
'Trachea' = '#6AA692',
'Urinary bladder' = '#F9A266',
'Vagus nerve' = '#A7DACD',
'Ductus deferens' = '#95D4F5',
'Vein' = '#A7DACD',
'Cingulate cortex' = '#53977F',
'Occipital cortex' = '#53977F',
'Insular cortex' = '#53977F',
'Motor cortex' = '#53977F',
'Prefrontal cortex' = '#53977F',
'Somatosensory cortex' = '#53977F',
'Temporal cortex' = '#53977F',
'Retrosplenial cortex' = '#53977F',
'Olfactory bulb' = '#99D0C1',
'Dorsal hippocampus' = '#B4CE52',
'Ventral hippocampus' = '#B4CE52',
'Entorhinal cortex' = '#B4CE52',
'Subiculum' = '#B4CE52',
'Amygdala' = '#82B579',
'Caudate' = '#53C2EA',
'Putamen' = '#53C2EA',
'Septum' = '#53C2EA',
'Ventral pallidum' = '#53C2EA',
'Thalamus' = '#EA9586',
'Hypothalamus' = '#E6351D',
'Substantia nigra' = '#B67AB3',
'Periaqueductal gray' = '#B67AB3',
'Midbrain' = '#B67AB3',
'Medulla' = '#EA5699',
'Pons' = '#EA5699',
'Cerebellum' = '#FDCA43',
'Corpus callosum' = '#878786',
'Dorsal spinal cord' = '#008BCC',
'Ventral spinal cord' = '#008BCC',
'Superiour colliculi' = '#B67AB3',
'Pituitary gland' = '#7F6A9C',
'Retina' = '#FFEF78',
'Cornea' = '#FFEF78',
'Lens' = '#FFEF78',
'Choroid plexus' = '#A7DACD',
'Dura mater' = '#A7DACD',
'Pineal gland' = '#7F6A9C',
'Parathyroid gland' = '#7F6A9C',
'Placenta' = '#F8BDD7',
'Vagina' = '#F8BDD7',
'Intermediate monocyte' = '#E64B35',
'Non-classical monocyte' = '#E64B35',
'Classical monocyte' = '#E64B35',
'Neutrophil' = '#F39B7F',
'Basophil' = '#F39B7F',
'Eosinophil' = '#F39B7F',
'T-reg' = '#7D8DAF',
'MAIT T-cell' = '#7D8DAF',
'Memory CD4 T-cell' = '#7D8DAF',
'Naive CD4 T-cell' = '#7D8DAF',
'Memory CD8 T-cell' = '#7D8DAF',
'Naive CD8 T-cell' = '#7D8DAF',
'Memory B-cell' = '#66287F',
'Naive B-cell' = '#66287F',
'NK-cell' = '#AD1D78',
'GdTCR' = '#66287F',
'Myeloid DC' = '#199985',
'Plasmacytoid DC' = '#199985',
'Total PBMC' = '#b30000',
'Cerebral cortex' = '#53977F',
'Olfactory region' = '#99D0C1',
'Hippocampal formation' = '#B4CE52',
'Basal ganglia' = '#53C2EA',
'Pons and medulla' = '#EA5699',
'Spinal cord' = '#008BCC',
'Pitutiary' = '#7F6A9C',
'Monocytes' = '#E64B35',
'Granulocytes' = '#F39B7F',
'T-cells' = '#7D8DAF',
'B-cells' = '#66287F',
'Nk-cells' = '#AD1D78',
'Dendritic cells' = '#199985',
'Blood' = '#b30000',
'Adipose tissue' = '#A7DACD',
'Large intestine' = '#1280C4',
'Cartilage' = '#A7DACD',
'Small intestine' = '#1280C4',
'Heart' = '#DE6C7D',
'Kidney' = '#F9A266',
'Upper respiratory system' = '#6AA692',
'Lymphoid tissue' = '#A1A8AA',
'Mouth' = '#FBDAD9',
'Mesothelial tissue' = '#A7DACD',
'Skin' = '#FCCAB3',
'Smooth muscle' = '#DE6C7D',
'Stomach' = '#1280C4',
'Brain' = '#FFDD00',
'Adipose & soft tissue' = '#A7DACD',
'Endocrine tissues' = '#7F6A9C',
'Gastrointestinal tract' = '#1280C4',
'Bone marrow & immune system' = '#A1A8AA',
'Breast and female reproductive system' = '#F8BDD7',
'Respiratory system' = '#6AA693',
'Male reproductive system' = '#95D4F5',
'Proximal digestive tract' = '#FBDAD9',
'Liver & gallbladder' = '#D1CBE5',
'Muscle tissues' = '#DE6C7D',
'Kidney & urinary bladder' = '#F9A266',
'Respiratory system' = '#6AA692',
'Pancreas' = '#96C08E',
'Skin' = '#FCCAB3',
'Brain' = '#FFDD00',
'Eye' = '#FFEF78',
'Blood' = '#b30000',
'Monocyte' = '#E64B35',
'Granulocyte' = '#F39B7F',
'T-cell' = '#7D8DAF',
'B-cell' = '#66287F',
'NK-cell' = '#AD1D78',
'Dendritic cell' = '#199985',
'T/NK-cell' = "#955593",
"Unknown" = "gray",
"B/DC" = "#3F6082",
"Endothelial" = "brown",
"Endothelium" = "brown",
"Monocyte/DC" = "#7F715D",
"Epithelium" = "tan1",
"B/T-cell" = "#715A97",
"Epithelium/B/T-cell" = "#B87F73",
"B/T/DC" = "#45798E",
"Endothelium/DC" = "#5F6157",
"Trophoblast" = "tan3",
"cervix, uterine" = '#F8BDD7',
'intestine' = '#1280C4',
"Not tissue enriched" = "black",
"heart muscle" = "#DE6C7D",
"Not enriched" = "darkgray",
"not enriched" = "darkgray")
cell_type_palette_old <-
c('?' = 'gray',
'Amacrine' = '#FFDD00',
'Atrial Cardiomyocyte' = '#DE6C7D',
"Cardiomyocytes" = '#DE6C7D',
'B cells' = '#66287F',
'Basal' = 'tan',
'Basal cells' = 'tan',
'Basal epithelia' = 'tan',
'Bipolar' = '#FFDD00',
'Cones' = 'orange',
'Dendritic cells' = '#199985',
'Endothelial cells' = 'brown',
'Enterocytes' = 'tan',
'Epithelial cells' = 'tan',
'Epithelial cells of prostatic glands' = 'tan',
"Prostate glands, basal cells" = 'tan',
"Prostate glands, eptithelial cells" = 'tan',
'Extravillous trophoblasts' = 'tan',
'Fibroblastss' = 'tan2',
'Fibroblasts' = 'tan2',
'Glomerular parietal Epithelial cells' = 'tan',
'Goblet' = 'tan',
'Granulocytes' = '#F39B7F',
'Hepatocytes' = '#D1CBE5',
'Hofbauer cells' = '#E64B35',
'Horizontal' = 'orange',
'Leydig and myoid-like cells' = '#95D4F5',
'Luminal 1' = 'brown',
'Luminal 2' = 'brown',
'Macrophages' = '#E64B35',
'Monocytes' = '#E64B35',
'Muller' = '#FFDD00',
'Myeloid' = '#E64B35',
'Myofibroblasts' = 'tan2',
'NK cells' = '#AD1D78',
'Paneth cells' = 'tan',
"Paneth-like cells" = 'tan',
'Progenitor cells' = 'green3',
'Proximal tubule' = '#F9A266',
"Renal tubules, distal cells" = '#F9A266',
"Renal tubules, proximal cells" = '#F9A266',
'Retinal ganglion cells' = 'orange',
'Retinal pigment Epithelial cells' = 'tan',
'Rods' = 'orange',
'Sertoli cells' = '#95D4F5',
'Smooth muscle cells' = '#DE6C7D',
'Spermatocytes' = '#95D4F5',
'Spermatogonia' = '#95D4F5',
"Sperm"= '#95D4F5',
"Peritubular cells" = '#F9A266',
"Late spermatids"= '#95D4F5',
"Spermatids"= '#95D4F5',
"Early spermatids"= '#95D4F5',
'Stellate cells' = '#F9A266',
'stem cells' = 'darkblue',
"Stem cells" = 'darkblue',
"Undifferentiated cells" = "darkblue",
'Syncytiotrophoblast' = 'tan',
'T cells' = '#7D8DAF',
'Transient amplifying cells' = '#1280C4',
"Transient-amplifying (TA) cells" = '#1280C4',
'Vascular' = 'brown',
'Ventricular Cardiomyocyte' = '#DE6C7D',
'Villous cytotrophoblast' = 'tan',
'Type II pneumocytes' = 'tan',
'Type I pneumocytes' = 'tan',
"Alveolar cells type 1" = 'tan',
"Alveolar cells type 2" = 'tan',
" " = "gray",
"Unknown cell type" = "gray",
'Bronchial epithelium, ciliated cells' = 'tan',
'Bronchial epithelium, mucus-secreting cells' = 'tan',
"Bronchial epithelium, Club cells" = 'tan',
"Ciliated cells" = 'tan',
'Syncytotrophoblasts' = 'tan',
'acinary' = 'tan',
'Inflammatory cells' = '#b30000',
"Immune cells" = '#b30000',
'Vesicula seminalis' = '#95D4F5',
'Vesicula seminalis transcripts' = '#95D4F5',
'Urothelium' = 'tan',
'Urothelium transcripts' = 'tan',
'Dendritic cell' = '#199985',
'Mono' = '#E64B35',
'Dendritic cell myeloid' = '#199985',
'Erythrocytes' = '#b30000',
'Dendritic cell plasmacytoid' = '#199985',
'Basophils' = '#F39B7F',
'NK' = '#AD1D78',
'MAIT' = '#7D8DAF',
'Neutrophils' = "#F39B7F",
'Spermatogonia and fibroblasts' = 'blue',
'Leydig cells and fibroblasts' = 'blue',
'Leydig cells and myoid cells' = 'blue',
'Leydig cells and Sertoli cells' = 'blue',
'Myoid cells' = 'blue',
'Spermatids and Sertoli cells' = 'blue',
'Fibroblasts and Leydig cells' = 'blue',
'Fibroblasts and myoid cells' = 'blue',
'Spermatogonia and myoid cells' = 'blue',
'Spermatocytes, spermatids and Sertoli cells' = 'blue',
'Myoid cells?' = 'blue',
'Myoid cells and Sertoli cells?' = 'blue',
'Leydig cells' = 'blue',
'Adipocytes' = '#A7DACD',
'Cytotrophoblasts' = 'tan',
'Fibroblasts' = 'tan2',
'Proximal tubule cells' = '#F9A266',
'Myoepithelial cells' = '#7F6A9C',
'Type i pneumocytes' = 'tan',
'B lymphocytes' = '#66287F',
'Dendritic cells' = '#199985',
'T lymphocytes' = '#7D8DAF',
'Granulocytes' = '#F39B7F',
'Hofbauer cells' = '#E64B35',
'Kupffer cells' = '#E64B35',
"Macrophages (Hofbauer cells)" = '#E64B35',
"Macrophages (Kupffer cells)" = '#E64B35',
'Monocytes' = '#E64B35',
"Erythroid cells" = '#b30000',
'Erthyroid cells' = '#b30000',
'Horizontal cells' = 'orange',
'Natural killer cells' = '#AD1D78',
'Type ii pneumocytes' = 'tan',
'Basal epithelial cells of prostatic glands' = 'tan',
'Decidual cells' = '#F8BDD7',
'Endothelial cells' = 'peru',
'Hepatic stellate cells (ito cells)' = '#D1CBE5',
"Ito cells" = '#D1CBE5',
'Muller glia cells' = '#FFEF78',
'Inflammatory cells' = '#b30000',
'Epithelial cells of prostatic glands' = '#95D4F5',
'Vesicula seminalis cells' = '#95D4F5',
'Atrial cardiomyocyte' = '#DE6C7D',
'Smooth muscle cells' = '#DE6C7D',
'Ventricular cardiomyocyte' = '#DE6C7D',
'Urothelial cells' = '#F9A266',
'Lens' = '#FFEF78',
'Pigment epithelial cells' = '#FFEF78',
'Bronchial epithelium, ciliated cells' = 'tan',
'Bronchial epithelium, mucus-secreting cells' = 'tan',
'Collecting duct cells' = '#F9A266',
'Distal tubule cells' = '#F9A266',
'Syncytotrophoblasts' = '#F8BDD7',
"Cholangiocytes" = '#D1CBE5',
'Bipolar cells' = '#FFDD00',
'Cone photoreceptor cells' = '#FFEF78',
'Rod photoreceptor cells' = '#FFEF78',
'Extravillous trophoblasts' = '#F8BDD7',
"Ductal epithelial cells" = "tan",
"islets of Langerhans" = '#96C08E',
"Islets of Langerhans" = '#96C08E',
'Acinar cells' = 'tan',
"Melanocytes" = "brown",
"Basal keratinocytes (undifferentiated)" = "tan",
"Suprabasal keratinocytes (differentiated)" = "tan",
'collecting duct cells' = '#F9A266',
'distal tubule cells' = '#F9A266',
'Hepatic stellate cells (Ito cells)' = '#D1CBE5',
'Hepatic stellate cells' = '#D1CBE5',
'myoepithelial cells' = '#7F6A9C',
'proximal tubule cells' = '#F9A266',
"Goblet cells" = "#1280C4",
"Goblet cells" = "#1280C4",
"Mucus-secreting cells" = "#1280C4",
"luminal epithelial cells" = "tan",
"luminal epithelial cells" = "tan",
"neuroendocrine cells" = '#7F6A9C',
"neuroendocrine cells" = '#7F6A9C',
"Neuroendocrine cells" = '#7F6A9C',
"Not annotated" = "gray",
"Not enriched" = "darkgray",
"not enriched" = "darkgray",
"Not cell enriched"= "darkgray")
spec_category_levels <-
c('tissue enriched',
'group enriched',
'tissue enhanced',
'low tissue specificity',
'not detected',
'Tissue enriched',
'Group enriched',
'Tissue enhanced',
'Low tissue specificity',
'Not detected')
dist_category_levels <-
c('detected in all',
'detected in many',
'detected in some',
'detected in single',
'not detected',
'Detected in all',
'Detected in many',
'Detected in some',
'Detected in single',
'Not detected')
enrichment_overlap_levels <-
c("full overlap",
"partial overlap",
"no overlap",
"Full overlap",
"Partial overlap",
"No overlap")
shared_category_levels <-
c("shared",
"minor difference",
"medium difference",
"major difference")
enrichment_overlap_pal <-
set_names(c(viridis(3), inferno(4),
viridis(3), inferno(4)),
c("full overlap",
"partial overlap",
"no overlap",
"shared",
"minor difference",
"medium difference",
"major difference",
"Full overlap",
"Partial overlap",
"No overlap",
"Shared",
"Minor difference",
"Medium difference",
"Major difference"))
spec_category_overlap_levels <-
c('tissue enriched full overlap',
'tissue enriched partial overlap',
'tissue enriched no overlap',
'group enriched full overlap',
'group enriched partial overlap',
'group enriched no overlap',
'tissue enhanced full overlap',
'tissue enhanced partial overlap',
'tissue enhanced no overlap',
'low tissue specificity no overlap',
'not detected no overlap')
spec_category_overlap_levels_short <-
c('tissue enriched FO',
'tissue enriched PO',
'tissue enriched NO',
'group enriched FO',
'group enriched PO',
'group enriched NO',
'tissue enhanced FO',
'tissue enhanced PO',
'tissue enhanced NO',
'low tissue specificity NO',
'not detected NO')
spec_category_overlap_pal <-
c('tissue enriched full overlap' = "#E41A1C",
'tissue enriched partial overlap' = "#ED6667" ,
'tissue enriched no overlap' = "#F6B2B3" ,
'group enriched full overlap' = "#FF9D00",
'group enriched partial overlap' = "#FFBD55",
'group enriched no overlap' = "#FFDEAA",
'tissue enhanced full overlap' = "#984EA3",
'tissue enhanced partial overlap' = "#BA89C1",
'tissue enhanced no overlap' = "#DCC3E0",
'low tissue specificity no overlap' = "grey40",
'not detected no overlap' = "grey")
spec_category_overlap_short_pal <-
c('tissue enriched FO' = "#E41A1C",
'tissue enriched PO' = "#ED6667" ,
'tissue enriched NO' = "#F6B2B3" ,
'group enriched FO' = "#FF9D00",
'group enriched PO' = "#FFBD55",
'group enriched NO' = "#FFDEAA",
'tissue enhanced FO' = "#984EA3",
'tissue enhanced PO' = "#BA89C1",
'tissue enhanced NO' = "#DCC3E0",
'low tissue specificity NO' = "grey40",
'not detected NO' = "grey")
gene_category_pal <-
c("tissue enriched" = "#e41a1c",
"group enriched" = "#FF9D00",
"tissue enhanced" = "#984ea3",
"low tissue specificity" = "grey40",
"detected in all" = "#253494",
"detected in many" = "#2c7fb8",
"detected in some" = "#41b6c4",
"detected in single" = "#a1dab4",
"not detected" = "grey",
"not detected " = "grey",
"Tissue enriched" = "#e41a1c",
"Group enriched" = "#FF9D00",
"Tissue enhanced" = "#984ea3",
"Low tissue specificity" = "grey40",
"Detected in all" = "#253494",
"Detected in many" = "#2c7fb8",
"Detected in some" = "#41b6c4",
"Detected in single" = "#a1dab4",
"Not detected" = "grey",
"Not detected " = "grey",
"cluster enriched" = "#e41a1c",
"cluster enhanced" = "#984ea3",
"low cluster specificity" = "grey40",
"Cluster enriched" = "#e41a1c",
"Cluster enhanced" = "#984ea3",
"Low cluster specificity" = "grey40")
gene_category_pal_human_pig <-
gene_category_pal %>%
enframe() %>%
do(bind_rows(mutate(., name = paste(name, "human")),
mutate(., name = paste(name, "pig")))) %$%
set_names(value, name)
gene_category_pal_comparison <-
gene_category_pal %>%
enframe() %>%
do(bind_rows(mutate(., name = paste(name, "canon")),
mutate(., name = paste(name, "comparison")))) %$%
set_names(value, name)
anova_pal <-
c("Residuals and species" = "gray",
"Residuals" = "gray",
"tissue_ID" = "#D95E37",
"comparison_tissue" = "#D95E37",
"Tissue" = "#D95E37",
"individual" = "#149684",
"species" = "#F9C770",
"Species" = "#F9C770",
"sex" = "#E8C9C7")
anova_levels <-
c("Residuals and species",
"Residuals",
"tissue_ID",
"comparison_tissue",
"Tissue",
"individual",
"species",
"Species",
"sex")
sex_palette <-
c("male" = "#D96767", "female" = "#214785")
elevation_identity_levels <-
c("identity", "overlapping tissues", "different tissues", "no enrichment")
elevation_identity_pal <-
set_names(rev(pals::ocean.haline(9)[c(3, 4, 6, 7)]), elevation_identity_levels)
elevation_overlap_levels <-
c("Pig", "Both", "Human")
elevation_overlap_pal <-
set_names(rev(pals::ocean.haline(9)[c(3, 4, 6)]), elevation_overlap_levels)
overlap_type_pal <-
c("Human" = "#DB1F48",
"Overlap" = "#004369",
"Pig" = "#01949A",
"Different tissues" = "orangered")
celltype_pal <-
c('B cells' = '#66287F',
'Cholangiocytes' = 'palegreen4',
'Endothelial' = 'peru',
'Erythroid cells' = 'darkred',
'Hepatic stellate cells' = 'salmon4',
'Hepatocytes' = 'saddlebrown',
'Hepatocytes sub' = 'sandybrown',
'Macrophages' = 'orangered',
'Plasma cells' = 'slateblue4',
'T cells' = '#7D8DAF')
celltype_levels <-
c('B cells',
'Plasma cells',
'T cells',
'Macrophages',
'Erythroid cells',
'Endothelial',
'Cholangiocytes',
'Hepatic stellate cells',
'Hepatocytes',
'Hepatocytes sub')
# protein_type_pal <-
# c("secreted" = '#911D51',
# "membrane" = '#6D4BAA',
# "other" = '#008490',
# "cd_marker" = '#318F1E',
# "transcription_factors" = '#B8801B',
# "mitochondrial" = '#E371B4',
# "ribosomal" = '#89A0F3',
# "none" = "black",
# '#00C9BC', '#97C542', '#FFA05E')
#
# protein.localization.palette2 <- c("membrane" = "#CE70A4",
# "secreted" = "#FCAC3B",
# "membrane and secreted isoforms" = "#755A85")
# protein.localization.palette <- c("intracellular and membrane isoforms" = "#858141",
# "membrane" = "#6DB9C6",
# "intracellular" = "#FCAC3B",
# "secreted" = "#CE70A4",
# "intracellular and secreted isoforms" = "#CF5734",
# "membrane and secreted isoforms" = "#755A85",
# "intracellular, membrane, secreted isoforms" = "#794A39")
cluster_levels <-
c("TOTAL",
paste0("Cluster-", 0:100))
# ----- themes -----
simple_theme <-
theme_bw() +
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
heatmap_palette = viridis::inferno(20, direction = -1)
theme_stripped <-
theme(panel.background = element_rect(fill = NA, colour = NA),
plot.background = element_rect(fill = NA, color = NA),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
legend.key = element_rect(colour = NA),
#legend.position = "bottom",
#legend.direction = "horizontal",
legend.key.size= unit(0.3, "cm"),
legend.title = element_text(face="italic"),
axis.line = element_line(colour="black",size=0.5))
theme_stripped_frame <-
theme(panel.background = element_rect(fill = NA, colour = "gray"),
plot.background = element_rect(fill = NA, color = "gray"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
legend.key = element_rect(colour = NA),
#legend.position = "bottom",
#legend.direction = "horizontal",
legend.key.size= unit(0.3, "cm"),
legend.title = element_text(face="italic"),
axis.line = element_line(colour="black",size=0.5))
theme_angletext <- theme(axis.text.x = element_text(angle = 60, hjust = 1))
# Make plot theme
stripped_theme <-
theme(panel.background = element_rect(fill = NA, colour = NA),
plot.background = element_rect(fill = NA, color = NA),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
legend.key = element_rect(colour = NA),
#legend.position = "bottom",
#legend.direction = "horizontal",
legend.key.size= unit(0.3, "cm"),
legend.title = element_text(face="italic"),
axis.line = element_line(colour="black",size=0.5))
# stripped theme facet
stripped_theme_facet <-
stripped_theme+
theme(legend.position = "right",
panel.border = element_rect(color = "gray", fill = NA),
strip.background = element_rect(fill = "#003253",
color = "#003253"),
strip.text = element_text(color = "white"))
# stripped theme facet
stripped_theme_HPA <-
stripped_theme+
theme(legend.position = "right",
panel.border = element_rect(color = "gray", fill = NA),
strip.background = element_rect(fill = "#313131",
color = "#313131"),
strip.text = element_text(color = "white"),
plot.background = element_rect(fill = "#D9D9D9",
color = "#D9D9D9"),
panel.background = element_rect(fill = "white",
color = "black"))
|
0b9dcce7d5225e30425fc02187b6130fda2c1b2c
|
39f3d047487c268f8a6921e39e228846df8acefc
|
/cachematrix.R
|
ad9700f4dd00142fa81d792dbc5ec13af1cdfbe8
|
[] |
no_license
|
aaronbcrampton/ProgrammingAssignment2
|
242ed4d8d4c486b609bea483c33a9222b570bf23
|
b010a5d76ad62f4965b41f9e4bcd2ce81d678d45
|
refs/heads/master
| 2020-12-02T20:33:01.046215
| 2020-01-02T14:19:39
| 2020-01-02T14:19:39
| 231,112,974
| 0
| 0
| null | 2019-12-31T15:50:12
| 2019-12-31T15:50:11
| null |
UTF-8
|
R
| false
| false
| 1,659
|
r
|
cachematrix.R
|
## FORWARD: I am also one to avoid single letter variables so I made more descriptive names. In retrospect it complicated
## this particular assignment. It's still a good habit though in my belief. Thanks for reviewing and for any feedback!
makeCacheMatrix <- function(item = matrix()) { ##creates a matrix
inverse <- NULL ##establishes the empty version of the 'inverse' vector
set <- function(action){ ##creates set vector to store the new value of matrix 'item'
item <<- action ##stores in the global environment
inverse <<- NULL ##establishes the emply version of the inverse variable in global environment
}
get <- function() item ##defines 'get' as as function on 'item' matrix
setinverse <- function(storedinverse) inverse <<- inverse # stores 'storedinverse' results in parent environment 'inverse'
getinverse <- function() inverse ## gets the value of 'inverse'
list (inverse = inverse,
get = get,
setinverse = setinverse,
getinverse = getinverse) ##allows to be called with $
}
cacheSolve <- function(item, ...) { ##establish function for inverse
inverse <- item$makeCacheMatrix()
if(!is.null(inverse)) { ##retrieves cached data if it exists
message("getting cashed data")
return(m)
}
data <- item$get() ##runs function for inverse if it does not exist in cache
inverse <- solve(data, ...)
item$setinverse(inverse)
inverse
}
|
66f79ee7f92be05074fbbe95f160a762cb944f4a
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/labelled/examples/remove_labels.Rd.R
|
b7f55c0fd3f3d72a2a99ef49de631bc4b12bdd2d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 477
|
r
|
remove_labels.Rd.R
|
library(labelled)
### Name: remove_labels
### Title: Remove variable label, value labels and user defined missing
### values
### Aliases: remove_labels remove_var_label remove_val_labels
### remove_user_na user_na_to_na
### ** Examples
x1 <- labelled_spss(1:10, c(Good = 1, Bad = 8), na_values = c(9, 10))
var_label(x1) <- "A variable"
x1
x2 <- remove_labels(x1)
x2
x3 <- remove_labels(x1, user_na_to_na = TRUE)
x3
x4 <- remove_user_na(x1, user_na_to_na = TRUE)
x4
|
733a5993f77a8013a279bc9100d89745b40d387f
|
46e87270da7b545679f27a1761b05561fba55283
|
/R/f_package.R
|
aae967566358a344ff7650b38ffac6a203140fa6
|
[] |
no_license
|
cwickham/scope
|
cd579ae413b954c963764a692ae64b806eadbc05
|
e654e8b39681a3574f66c35b5ff35e8103e684a9
|
refs/heads/main
| 2023-01-30T04:48:42.167252
| 2020-12-08T01:18:04
| 2020-12-08T01:18:04
| 319,489,319
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 48
|
r
|
f_package.R
|
y <- 100
f_package <- function(x){
y <<- x
}
|
d043a2ac8442fa71d3b4f2e167c91b0cc2d79a4c
|
a2911052de412fe78e95bae137ab3253adae4522
|
/R/getArr.R
|
efa6106d23dd6759d3d88d7f9ed887ffaace6586
|
[] |
no_license
|
pyzhu/ajl
|
7f3895bc2643b76fd2a6fc68ba8c60a41c66ff86
|
e7b867b102316021af1b3310cd2ccb4ad39dc27f
|
refs/heads/master
| 2021-08-22T17:01:07.506079
| 2017-11-30T03:53:02
| 2017-11-30T03:53:02
| 111,603,307
| 0
| 1
| null | 2017-11-30T18:24:26
| 2017-11-21T21:28:53
|
R
|
UTF-8
|
R
| false
| false
| 696
|
r
|
getArr.R
|
#'
#'
#' @import testthat
getArr <- function(n, num.obs.nonwhite, a, b, mu, sigma, k, theta) {
# throw an error if num.obs.nonwhite is greater than n
assert_that(n > num.obs.nonwhite)
dt <- data.table(arr_id = seq.int(n), key="arr_id")
# setkey(dt, arr_id)
dt[, true_risk := rbeta(.N, a, b)]
# Set biases according to race, nonwhite = 1
# no systematic bias if white
dt[, nonwhite := c(rep(TRUE, num.obs.nonwhite), rep(FALSE, .N-num.obs.nonwhite))]
dt[, noise := rnorm(.N, mu * as.numeric(nonwhite), sigma), by=nonwhite]
# given the true risk, draw from binomial
# TODO: would give false if true_risk = 1
dt[, misbehave := runif(.N) < true_risk]
return(dt)
}
|
da826dfa3d1499a631c5b8e5965aaa824b77edfe
|
80ada99bd981917c00ef9c6e115e9e61dfa8c038
|
/man/googledrive.Rd
|
713dc1dd786116a32cbe6b1233bae6cf49b10caa
|
[] |
no_license
|
hturner/googledrive
|
10e513d1b68f92f3af100f2289bb6c1587528817
|
76af28ee782462aca678438d6d1017223054147d
|
refs/heads/master
| 2021-01-19T20:06:25.251238
| 2017-08-23T16:15:36
| 2017-08-23T16:15:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 235
|
rd
|
googledrive.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/googledrive-package.r
\docType{package}
\name{googledrive}
\alias{googledrive}
\alias{googledrive-package}
\title{googledrive.}
\description{
googledrive.
}
|
4ebac1878672f748e17e4d7dd7779ac01efa34ed
|
241c99c7dbd3eba623d72ffdfdaa1a652be1bafb
|
/man/computePageAsymptoticProbability.Rd
|
f71b29668021f0e30ac15876cca396c478942fa2
|
[] |
no_license
|
JacintoCC/rNPBST
|
29b64f3230f5fc905f358f65e2b176e29c517324
|
633e7d2ba56707c721c4e6f6e1cfa3c67a79c703
|
refs/heads/master
| 2022-06-07T22:32:56.036084
| 2022-04-12T15:31:45
| 2022-04-12T15:31:45
| 80,525,887
| 11
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 489
|
rd
|
computePageAsymptoticProbability.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/NP-Page.R
\name{computePageAsymptoticProbability}
\alias{computePageAsymptoticProbability}
\title{Compute asymptotic probability of distribution}
\usage{
computePageAsymptoticProbability(N, k, L)
}
\arguments{
\item{N}{number of columns}
\item{k}{number of rows}
\item{L}{Page statistic}
}
\value{
Exact p-value computed
}
\description{
Function to get the asymptotic probability given a distribution table
}
|
586e624bbb8230b2027fa5157f72abef0d65a06b
|
dba4e381f713a794ce7f455b4141fab6c2a863e2
|
/ch02/Chapter_2.r
|
aefc7ce2b88d7c8f477eead3efaac3c3c3291cbb
|
[
"MIT"
] |
permissive
|
chaoshunh/Hands-On-Exploratory-Data-Analysis-with-R
|
32639f65a28d241f9a338da42cfe441b146426d2
|
c6880753cb4b99a97b113f891dd4a0afb58de70c
|
refs/heads/master
| 2020-05-27T05:14:54.232399
| 2019-05-24T18:55:33
| 2019-05-24T18:55:33
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,109
|
r
|
Chapter_2.r
|
library(readr)
read_csv("mtcars.csv")
cols(
mpg = col_double(),
cyl = col_double(),
disp = col_double(),
hp = col_double(),
drat = col_double(),
wt = col_double(),
qsec = col_double(),
vs = col_double(),
am = col_double(),
gear = col_double(),
carb = col_double()
)
cars_data <- read_csv(readr_example("mtcars.csv"))
cols(
mpg = col_double(),
cyl = col_double(),
disp = col_double(),
hp = col_double(),
drat = col_double(),
wt = col_double(),
qsec = col_double(),
vs = col_double(),
am = col_double(),
gear = col_double(),
carb = col_double()
)
read_csv("data.csv", skip = 2)
read_csv("data.csv", col_names = FALSE)
cars_data <- read_csv(readr_example("mtcars.csv"), col_types="ddddddddd")
read_csv(file, col_names = TRUE, col_types = NULL,
locale = default_locale(), na = c("", "NA"), quoted_na = TRUE,
quote = "\"", comment = "", trim_ws = TRUE, skip = 0,
n_max = Inf, guess_max = min(1000, n_max),
progress = show_progress(), skip_empty_rows = TRUE)
read_tsv("data.tsv")
read_tsv(file, col_names = TRUE, col_types = NULL,
locale = default_locale(), na = c("", "NA"), quoted_na = TRUE,
quote = "\"", comment = "", trim_ws = TRUE, skip = 0,
n_max = Inf, guess_max = min(1000, n_max),
progress = show_progress(), skip_empty_rows = TRUE)
read_delim("data.del", delim = "|")
read_fwf("data.txt")
read_fwf(data.txt, fwf_widths(c(10, 20, 18), c("ID", "Revenue", "Region")))
read_table("table.csv")
read_log("data.log")
library(readxl)
read_excel("data.xls")
read_excel("data.xlsx")
excel_sheets("data.xlsx")
read_excel("data.xlsx", sheet= 1)
read_excel("data.xlsx", sheet= "sheet1")
library(jsonlite)
json_data <-
'[
{"Name" : "John", "Age" : 42, "City" : "New York"},
{"Name" : "Jane", "Age" : 41, "City" : "Paris"},
{},
{"Name" : "Bob", "City" : "London"}
]'
df <- fromJSON(json_data)
df
library(httr)
r <- GET("http://httpbin.org/get")
r
content(r, "raw")
library(DBI)
con <- dbConnect(RSQLite::SQLite(), dbname = ":memory:")
dbListTables(con)
|
b8d6dc63b7a05c5f90951e5b779da3853e25548a
|
d95961457e7c226e5df91bd94bd0f729234392a4
|
/WordCloud.R
|
3d0549ecef39d483917835ca365167d20e5fd651
|
[] |
no_license
|
DeepikaMudigonda/whatcloud
|
3119bd2ceba2743816c9571fc885de78dc4f3739
|
884d2526408dbec27b375f183681fb102d0ddcdf
|
refs/heads/master
| 2021-05-08T01:19:16.808272
| 2017-10-22T02:32:28
| 2017-10-22T02:32:28
| 107,803,457
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 669
|
r
|
WordCloud.R
|
library(tm)
library(wordcloud)
dirPath<-"C:/Users/Deepika/Desktop/wordcloud/"
txt<-Corpus(DirSource(dirPath))
toSpace <- content_transformer(function(x, pattern) gsub(pattern, " ", x))
txt <- tm_map(txt, content_transformer(tolower))
txt<- tm_map(txt, toSpace, "[^a-zA-Z]")
rmv<-c("griet","mudigonda","media","omitted","haa","haaa","haaaa")
txt<-tm_map(txt,removeWords,rmv)
dtm<-TermDocumentMatrix(txt)
mtx<-as.matrix(dtm)
smtx<-sort(rowSums(mtx),decreasing=TRUE)
frm<-data.frame(word=names(smtx),freq=smtx)
head(frm,100)
wordcloud(frm$word,frm$freq,rot.per = .2,min.freq = 1,max.words = 300,random.color = TRUE,colors = brewer.pal(8,"Dark2"),random.order = FALSE)
|
ec5e056ab5f03c8de1ee0c18330e904834d4f45c
|
7505f15b5e579e8d38b84e45ce55fb96f33b8ce2
|
/man/Duplicate.Rd
|
804c2fe30d7b0f4409301edb5d9f8cd0df4001aa
|
[] |
no_license
|
abuchmueller/Rpriori
|
9eaf823234760a7353cbf8bca770da128ac4763c
|
209b23c4596d641d86513381c2ea2c61b4b10880
|
refs/heads/master
| 2020-03-27T20:59:08.084554
| 2019-05-03T21:51:28
| 2019-05-03T21:51:28
| 147,108,083
| 1
| 0
| null | 2018-09-02T17:44:29
| 2018-09-02T17:44:29
| null |
UTF-8
|
R
| false
| true
| 587
|
rd
|
Duplicate.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/HelperFunctions.R
\name{Duplicate}
\alias{Duplicate}
\title{Find duplicate columns in a sparse matrix}
\usage{
Duplicate(mat)
}
\arguments{
\item{mat}{A sparse matrix of type ngTMatrix, lgCMatrix or ngCMatrix.}
}
\value{
A boolean vector that does have one element for each column in the matrix. True does
represent a column that is replicated, False means it is not replicated.
}
\description{
This function is based on a c function from the arules package that finds duplicated columns
in a sparse matrix
}
|
ced9755f390464f9de8c192b723f7f5a8d5cf2c2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/iemisc/examples/secd.Rd.R
|
5c3dc7f19f6766973ff5ef810454084d3c471d3f
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 255
|
r
|
secd.Rd.R
|
library(iemisc)
### Name: secd
### Title: Secant (in degrees) [GNU Octave/MATLAB compatible]
### Aliases: secd
### ** Examples
library(iemisc)
# Examples from GNU Octave secd
secd (seq(0, 80, by = 10))
secd (c(0, 180, 360))
secd (c(90, 270))
|
c532837c64b5c1edfb93755f7e29d87fefd5a8c0
|
c6aed201f17ea0305360df11e2d5aa23a8f6289f
|
/man/elbow.Rd
|
582105bbd1ff8a9f407b5a8cb283a485f3f7a280
|
[] |
no_license
|
songmm19900210/GMD
|
ede703077eeb9a2ac1dec6c19ab6cb62c46c6aac
|
7fa0ce70104fae31ae371714d1d2e36cdacd2fce
|
refs/heads/master
| 2021-01-17T22:54:32.355582
| 2014-08-26T00:00:00
| 2014-08-26T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,869
|
rd
|
elbow.Rd
|
% Generated by roxygen2 (4.0.1): do not edit by hand
\name{elbow}
\alias{elbow}
\alias{elbow.batch}
\alias{plot.elbow}
\title{The "Elbow" Method for Clustering Evaluation}
\usage{
## find a good k given thresholds of EV and its increment.
elbow(x,inc.thres,ev.thres,precision=3,print.warning=TRUE)
## a wrapper of `elbow' testing multiple thresholds.
elbow.batch(x,inc.thres=c(0.01,0.05,0.1),
ev.thres=c(0.95,0.9,0.8,0.75,0.67,0.5,0.33),precision=3)
\method{plot}{elbow}(x,elbow.obj=NULL,main,xlab="k",
ylab="Explained_Variance",type="b",pch=20,col.abline="red",
lty.abline=3,if.plot.new=TRUE,print.info=TRUE,
mar=c(4,5,3,3),omi=c(0.75,0,0,0),...)
}
\arguments{
\item{x}{a `css.multi' object, generated by \emph{\code{css.hclust}}}
\item{inc.thres}{numeric with value(s) from 0 to 1, the threshold of the increment of EV.
A single value is used in \code{elbow} while a vector of values in \code{elbow.batch}.}
\item{ev.thres}{numeric with value(s) from 0 to 1, the threshold of EV.
A single value is used in \code{elbow} while a vector of values in \code{elbow.batch}.}
\item{precision}{integer, the number of digits to round for numerical comparison.}
\item{print.warning}{logical, whether to print warning messages.}
\item{elbow.obj}{a `elbow' object, generated by \emph{\code{elbow}} or \emph{\code{elbow.batch}}}
\item{main}{an overall title for the plot.}
\item{ylab}{a title for the y axis.}
\item{xlab}{a title for the x axis.}
\item{type}{what type of plot should be drawn.\cr
See \code{help("plot", package="graphics")}.}
\item{pch}{Either an integer specifying a symbol or a single character
to be used as the default in plotting points (see \code{par}).}
\item{col.abline}{color for straight lines through the current plot
(see option \code{col} in \code{par}).}
\item{lty.abline}{line type for straight lines through the current plot
(see option \code{lty} in \code{par}).}
\item{if.plot.new}{logical, whether to start a new plot device or not.}
\item{print.info}{logical, whether to print the information of `elbow.obj'.}
\item{mar}{A numerical vector of the form 'c(bottom, left, top, right)'
which gives the number of lines of margin to be specified on
the four sides of the plot (see option \code{mar} in \code{par}).
The default is 'c(4, 5, 3, 3) + 0.1'.}
\item{omi}{A vector of the form 'c(bottom, left, top, right)' giving the
size of the outer margins in inches (see option \code{omi} in \code{par}).}
\item{...}{arguments to be passed to method \code{plot.elbow},
such as graphical parameters (see \code{par}).}
}
\value{
Both \code{elbow} and \code{elbow.btach} return a `elbow' object
(if a "good" \emph{\code{k}} exists),
which is a list containing the following components
\tabular{ll}{
k \tab number of clusters\cr
ev \tab explained variance given \emph{\code{k}}\cr
inc.thres \tab the threshold of the increment in EV\cr
ev.thres \tab the threshold of the EV\cr
}
, and with an attribute `meta' that contains
\tabular{ll}{
description \tab A description about the "good" \emph{\code{k}}\cr
}
}
\description{
Determining the number of clusters in a data set by the "elbow" rule.
}
\details{
Determining the number of clusters in a data set by the "elbow" rule and
thresholds in the explained variance (EV) and its increment.
}
\examples{
## load library
require("GMD")
## simulate data around 12 points in Euclidean space
pointv <- data.frame(x=c(1,2,2,4,4,5,5,6,7,8,9,9),
y=c(1,2,8,2,4,4,5,9,9,8,1,9))
set.seed(2012)
mydata <- c()
for (i in 1:nrow(pointv)){
mydata <- rbind(mydata,cbind(rnorm(10,pointv[i,1],0.1),
rnorm(10,pointv[i,2],0.1)))
}
mydata <- data.frame(mydata); colnames(mydata) <- c("x","y")
plot(mydata,type="p",pch=21, main="Simulated data")
## determine a "good" k using elbow
dist.obj <- dist(mydata[,1:2])
hclust.obj <- hclust(dist.obj)
css.obj <- css.hclust(dist.obj,hclust.obj)
elbow.obj <- elbow.batch(css.obj)
print(elbow.obj)
## make partition given the "good" k
k <- elbow.obj$k; cutree.obj <- cutree(hclust.obj,k=k)
mydata$cluster <- cutree.obj
## draw a elbow plot and label the data
dev.new(width=12, height=6)
par(mfcol=c(1,2),mar=c(4,5,3,3),omi=c(0.75,0,0,0))
plot(mydata$x,mydata$y,pch=as.character(mydata$cluster),
col=mydata$cluster,cex=0.75,main="Clusters of simulated data")
plot(css.obj,elbow.obj,if.plot.new=FALSE)
## clustering with more relaxed thresholds (, resulting a smaller "good" k)
elbow.obj2 <- elbow.batch(css.obj,ev.thres=0.90,inc.thres=0.05)
mydata$cluster2 <- cutree(hclust.obj,k=elbow.obj2$k)
dev.new(width=12, height=6)
par(mfcol=c(1,2), mar=c(4,5,3,3),omi=c(0.75,0,0,0))
plot(mydata$x,mydata$y,pch=as.character(mydata$cluster2),
col=mydata$cluster2,cex=0.75,main="Clusters of simulated data")
plot(css.obj,elbow.obj2,if.plot.new=FALSE)
}
\seealso{
\code{\link{css}} and \code{\link{css.hclust}} for computing Clustering Sum-of-Squares.
}
|
79c518beb61276b5068702d6f8b0e0ce4b2cd45e
|
095f8d82a843eb3fd7c7308c1ba14b301513c9ed
|
/scripts/t-testLOOP.r
|
7edb244600d24900ee0573125925904fcda1d9f1
|
[] |
no_license
|
DemonKiku/Internship
|
6c63c3d3b345cd36469accfc4f7bfcc13f57b609
|
6c1b47902bb78c83f917856efe7242e932efbbcd
|
refs/heads/master
| 2020-05-21T14:38:28.981538
| 2019-05-17T02:02:44
| 2019-05-17T02:02:44
| 186,085,791
| 0
| 0
| null | null | null | null |
GB18030
|
R
| false
| false
| 1,106
|
r
|
t-testLOOP.r
|
setwd("C:\\Users\\dell\\Desktop\\landmarks")
#读取所有符合要求的文件
myfiles0 <- Sys.glob("*.csv")
#myfiles <- list.files(pattern = "*.csv")
#循环进行t-test
example=read.csv("names.csv",header = T)
p=example[c("names")]
rownames(p)=p[,1]
for (i in 1:19){
a=read.csv(myfiles0[i],header = T)
#人工之间
p$mlx[i]=t.test(a$ml_x1,a$ml_x2,paired=T)$p.value
p$mly[i]=t.test(a$ml_y1,a$ml_y2,paired=T)$p.value
p$mlz[i]=t.test(a$ml_z1,a$ml_z2,paired=T)$p.value
#机器之间
p$aux[i]=t.test(a$au_x1,a$au_x2,paired=T)$p.value
p$auy[i]=t.test(a$au_y1,a$au_y2,paired=T)$p.value
p$auz[i]=t.test(a$au_z1,a$au_z2,paired=T)$p.value
#第一次人工与机器
p$ma1x[i]=t.test(a$ml_x1,a$au_x1,paired=T)$p.value
p$ma1y[i]=t.test(a$ml_y1,a$au_y1,paired=T)$p.value
p$ma1z[i]=t.test(a$ml_z1,a$au_z1,paired=T)$p.value
#第二次人工与机器
p$ma2x[i]=t.test(a$ml_x2,a$au_x2,paired=T)$p.value
p$ma2y[i]=t.test(a$ml_y2,a$au_y2,paired=T)$p.value
p$ma2z[i]=t.test(a$ml_z2,a$au_z2,paired=T)$p.value
}
write.table(p,file = "p-values.csv",sep=",")
|
403c0def157ce29283ef294f8feee1e1e91e8662
|
52cf2b700d2271b3346580b342252498762fc652
|
/Chicago/tests/testthat/testWeights.R
|
1e3bc548edc978c411e370dcdc461ae426598a80
|
[] |
no_license
|
dovetail-genomics/chicago
|
985e077782f7bcad5e8c8e65c62fdebe62a5aff0
|
81bbe3164b012a96f17af84aee04daade794a8c3
|
refs/heads/main
| 2023-06-15T17:57:23.506628
| 2021-07-03T20:00:22
| 2021-07-03T20:00:22
| 379,648,353
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,617
|
r
|
testWeights.R
|
library(Chicago)
data("cdUnitTest")
##modifications to cdUnitTest, ensuring it uses correct design directory
designDir <- file.path(system.file("extdata", package="Chicago"), "unitTestDesign")
cdUnitTest <- modifySettings(cd=cdUnitTest, designDir=designDir,
settings = list(brownianNoise.samples=1, brownianNoise.subset=NA))
cd <- copyCD(cdUnitTest) ##NB not cd <- cdUnitTest. this line is crucial!
weightsPath <- file.path(system.file("extdata", package="Chicago"), "weights")
context("Weights")
##move weights
##FIXME!
setnames(cd@x, old = "log.w", new="logW.cdUnitTest")
setnames(cd@x, old = "log.q", new="logQ.cdUnitTest")
GMweightSettings <- file.path(weightsPath, "GM12878-2reps.settings")
cd <- modifySettings(cd, settingsFile = GMweightSettings)
cd <- getScores(cd)
setnames(cd@x, old = "log.w", new="logW.GM")
setnames(cd@x, old = "log.q", new="logQ.GM")
hMweightSettings <- file.path(weightsPath, "humanMacrophage-7reps.settings")
cd <- modifySettings(cd, settingsFile = hMweightSettings)
cd <- getScores(cd)
setnames(cd@x, old = "log.w", new="logW.hM")
setnames(cd@x, old = "log.q", new="logQ.hM")
mESCweightSettings <- file.path(weightsPath, "mESC-2reps.settings")
cd <- modifySettings(cd, settingsFile = mESCweightSettings)
cd <- getScores(cd)
setnames(cd@x, old = "log.w", new="logW.mESC")
setnames(cd@x, old = "log.q", new="logQ.mESC")
test_that("using alternative weight settings results in differing weights", {
expect_true(all(cd@x[, logW.GM != logW.hM]))
expect_true(all(cd@x[, logW.GM != logW.mESC]))
expect_true(all(cd@x[, logW.hM != logW.mESC]))
})
|
512d7c3df9e3ed529209856eff1c123fa05a9a7a
|
02cf3319522b31765710ae33d43c1fb40bf870c6
|
/DataWrSlides/comics/comics.R
|
79c6a03fe9f63fcda966c0637352cce5118649b5
|
[] |
no_license
|
tjmckinley/AdVis
|
cbba658e99b8557efc3298f2984d08bc0e93d13c
|
661c80cfac2614e28a24bc72404928f43ed07ce0
|
refs/heads/master
| 2023-06-08T10:14:51.973351
| 2019-06-11T09:51:45
| 2019-06-11T09:51:45
| 381,625,467
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,046
|
r
|
comics.R
|
## extract subsets of comic book data
## load libraries
library(tidyverse)
## load data (data from https://github.com/fivethirtyeight/data/blob/master/comic-characters/marvel-wikia-data.csv)
marvel <- read_csv("marvel-wikia-data.csv")
## extract subsets
mM <- marvel %>%
filter(SEX == "Male Characters") %>%
slice(1)
mF <- marvel %>%
filter(SEX == "Female Characters") %>%
slice(1)
mNA <- marvel %>%
filter(is.na(Year)) %>%
slice(1)
marvel <- rbind(mM, mF, mNA)
rm(mM, mF, mNA)
## separate into tables
publisher <- marvel %>%
select(name) %>%
mutate(publisher = "Marvel")
year_published <- marvel %>%
select(name, Year) %>%
na.omit()
marvel <- marvel %>%
select(name, EYE, HAIR, SEX, APPEARANCES)
## load data (data from https://github.com/fivethirtyeight/data/blob/master/comic-characters/dc-wikia-data.csv)
dc <- read_csv("dc-wikia-data.csv") %>%
rename(Year = YEAR)
## extract subsets
mM <- dc %>%
filter(SEX == "Male Characters") %>%
slice(1)
mF <- dc %>%
filter(SEX == "Female Characters") %>%
slice(1)
mNA <- dc %>%
filter(is.na(Year)) %>%
slice(1)
dc <- rbind(mM, mF, mNA)
rm(mM, mF, mNA)
## separate into tables
publisher <- dc %>%
select(name) %>%
mutate(publisher = "DC") %>%
rbind(publisher)
year_published <- dc %>%
select(name, Year) %>%
na.omit() %>%
rbind(year_published)
comics <- dc %>%
select(name, EYE, HAIR, SEX, APPEARANCES) %>%
rbind(marvel) %>%
select(-SEX) %>%
mutate(name = map_chr(strsplit(name, "\\("), 1)) #%>%
#mutate_if(is.character, factor)
publisher <- publisher %>%
mutate(name = map_chr(strsplit(name, "\\("), 1)) #%>%
#mutate_if(is.character, factor)
year_published <- year_published %>%
mutate(name = map_chr(strsplit(name, "\\("), 1)) #%>%
#mutate_if(is.character, factor)
rm(marvel, dc)
## write tables out
saveRDS(as.data.frame(comics), "comics.rds")
saveRDS(as.data.frame(publisher), "publisher.rds")
saveRDS(as.data.frame(year_published), "year_published.rds")
|
14b1ca3ca1162651d24e15b463c5f8e0014ad5f6
|
fcb656a34dae2d69dc03842e1ec86352be0dc0a5
|
/Lesson-01/lesson_01_Hit_the_ground_running.R
|
b68bd8eed357cf25f79d81bd2f54b0de7e517302
|
[
"MIT"
] |
permissive
|
hossainlab/R4Biologists
|
24706f056485c4151e8965fb14f2ceffb6b14c7f
|
caeab4bd0c8bbcb083114eb0422a5753b55ba800
|
refs/heads/main
| 2023-04-12T23:04:57.533534
| 2021-04-17T10:42:21
| 2021-04-17T10:42:21
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,879
|
r
|
lesson_01_Hit_the_ground_running.R
|
# ==========================================================
#
# Lesson 1 -- Hit the ground running
# • Reading in data
# • Creating a quick plot
# • Saving publication-quality plots in multiple
# file formats (.png, .jpg, .pdf, and .tiff)
#
# ==========================================================
# Go to the packages tab in the bottom right part of Rstudio, click "Install" at the top, type in ggplot2, and hit Install
# Go to the Files tab in the bottom right part of Rstudio, navigate to where you can see the Lesson-01 folder.
# then click "More" and choose "Set As Working Directory"
library(ggplot2)
filename <- "Lesson-01/Encode_HMM_data.txt"
# Select a file and read the data into a data-frame
my_data <- read.csv(filename, sep="\t", header=FALSE)
# if this gives an error, make sure you have followed the steps above to set your working directory to the folder that contains the file you are trying to open
head(my_data)
# Rename the columns so we can plot things more easily without looking up which column is which
names(my_data)[1:4] <- c("chrom","start","stop","type")
# At any time, you can see what your data looks like using the head() function:
head(my_data)
# Now we can make an initial plot and see how it looks
ggplot(my_data,aes(x=chrom,fill=type)) + geom_bar()
# Save the plot to a file
# Different file formats:
png("Lesson-01/plot.png")
ggplot(my_data,aes(x=chrom,fill=type)) + geom_bar()
dev.off()
tiff("Lesson-01/plot.tiff")
ggplot(my_data,aes(x=chrom,fill=type)) + geom_bar()
dev.off()
jpeg("Lesson-01/plot.jpg")
ggplot(my_data,aes(x=chrom,fill=type)) + geom_bar()
dev.off()
pdf("Lesson-01/plot.pdf")
ggplot(my_data,aes(x=chrom,fill=type)) + geom_bar()
dev.off()
# High-resolution:
png("Lesson-01/plot_hi_res.png",1000,1000)
ggplot(my_data,aes(x=chrom,fill=type)) + geom_bar()
dev.off()
|
d105dfb4c708cf5c7cfa0eb9e8e6584c6b83b1f3
|
8caeff2957ae777eabbb17e92ac49a5f51f1937c
|
/Ryan_Additon_9:26:18.R
|
92e6eed0ba8ddc0ebae339ba804f98d0d4707d73
|
[] |
no_license
|
ayusharora99/2018_Umpires
|
fbe382e7c3d1b6fbafeb2503cb9a9bffc26103db
|
ea344d33ad55e732a22c33ab155842834ded4422
|
refs/heads/master
| 2020-04-23T18:22:49.316852
| 2019-03-04T00:43:47
| 2019-03-04T00:43:47
| 171,364,791
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,025
|
r
|
Ryan_Additon_9:26:18.R
|
# 9/26/18 : Giants vs Padres : Ryan Additon
# 125 pitches were called strikes/balls
# The robot-ump called 32 of those pitches as called strikes & 93 as balls
# Ryan Additon called 36 of those pitches as called strikes & 89 as balls
# Accuracy: 90%
# File --> Import Dataset --> From text (base) (YES TO HEADER) & import csv file downloaded from https://baseballsavant.mlb.com/statcast_search
Ryan_Additon <- read.csv("~/Desktop/Analyzing Baseball Data with R/2018 Giants Umpires/Ryan_Additon_9:26:18.csv")
# Packages needed for Analysis
install.packages(c("e1071","caret","rpart"))
library(e1071)
library(caret)
library(rpart)
# Getting Familiar With Dataset & removing any NULL values
dim(Ryan_Additon)
names(Ryan_Additon)
is.na(Ryan_Additon)
colSums(is.na(Ryan_Additon))
Ryan_Additon = Ryan_Additon[,colSums(is.na(Ryan_Additon)) == 0]
dim(Ryan_Additon)
# Subsetting Relevant Info
drops = c("event","des","hit_location","bb_type","on_3b","on_2b","on_1b","hc_x","hc_y","hit_distance_sc","launch_speed","launch_angle","estimated_ba_using_speedangle","estimated_woba_using_speedangle","woba_value","woba_denom","launch_speed_angle","iso_value","babip_value")
Ryan_Additon = Ryan_Additon[ , !(names(Ryan_Additon) %in% drops)]
dim(Ryan_Additon)
# Splitting data into Training (80% of data) & Testing (20% of data) sets
Ryan_Additon_train = Ryan_Additon[0:(0.8 * nrow(Ryan_Additon)),]
dim(Ryan_Additon_train)
prop.table(table(Ryan_Additon_train$type))
Ryan_Additon_test = Ryan_Additon[(0.8*nrow(Ryan_Additon)):nrow(Ryan_Additon),]
dim(Ryan_Additon_test)
prop.table(table(Ryan_Additon_test$type))
# Creating Decision Tree to Predict Umpire's Call
tree_model <-rpart(type~., data = Ryan_Additon_train)
plot(tree_model)
text(tree_model, use.n = T)
# Testing Decision Tree with Test Data
Prediction_UMP<-predict(tree_model, newdata = Ryan_Additon_test, type = 'class')
# Accuracy of Decision Tree created for specific Umpire
confusionMatrix(table(Prediction_UMP, Ryan_Additon_test$type))
# Subset for Borderline Calls
Ryan_Additon$Borderline = ifelse(((abs(Ryan_Additon$plate_x)> 0.748) & (abs(Ryan_Additon$plate_x)<0.914))
& (((Ryan_Additon$plate_z > Ryan_Additon$sz_top-0.83) & (Ryan_Additon$plate_z < Ryan_Additon$sz_top+0.83))
| (((Ryan_Additon$plate_z)<Ryan_Additon$sz_bot+0.83) & ((Ryan_Additon$plate_z) > Ryan_Additon$sz_bot-0.83))), 'T','F')
# Copy Pitch Calls into another data set and adjust type to the electronic strike zone calls
# Seperate Ball & Strike Types
Ryan_Additon_Strikes = subset(Ryan_Additon, Ryan_Additon$type == "S")
Ryan_Additon_Balls = subset(Ryan_Additon, Ryan_Additon$type == "B")
# Borderline
Ryan_Additon_Borderline = subset(Ryan_Additon, Ryan_Additon$Borderline == "T")
# Create new column for adjusted call based on electronic strike zone on Umpire's called strikes
# (plate_x < 0.833 & $plate_x > -0.833) & ($plate_z > sz_bot & plate_z < sz_top) == S
Ryan_Additon_Strikes$AdjustedCall = ifelse((Ryan_Additon_Strikes$plate_x < 0.833 & Ryan_Additon_Strikes$plate_x > -0.833) & (Ryan_Additon_Strikes$plate_z > Ryan_Additon_Strikes$sz_bot & Ryan_Additon_Strikes$plate_z < Ryan_Additon_Strikes$sz_top), 'S', 'B')
table(Ryan_Additon_Strikes$AdjustedCall)
# Create new column for adjusted call based on electronic strike zone on Umpire's called balls
# (plate_x > 0.833 | $plate_x < -0.833) | ($plate_z < sz_bot | plate_z > sz_top) == B
Ryan_Additon_Balls$AdjustedCall = ifelse((Ryan_Additon_Balls$plate_x > 0.833 | Ryan_Additon_Balls$plate_x < -0.833)|(Ryan_Additon_Balls$plate_z < Ryan_Additon_Balls$sz_bot | Ryan_Additon_Balls$plate_z > Ryan_Additon_Balls$sz_top),'B','S')
table(Ryan_Additon_Balls$AdjustedCall)
# Borderline
Ryan_Additon_Borderline$AdjustedCall = ifelse((Ryan_Additon_Borderline$plate_x < 0.833 & Ryan_Additon_Borderline$plate_x > -0.833) & (Ryan_Additon_Borderline$plate_z > Ryan_Additon_Borderline$sz_bot & Ryan_Additon_Borderline$plate_z < Ryan_Additon_Borderline$sz_top), 'S', 'B')
# Merge to create new dataset
Ryan_Additon_AdjustedCalls = rbind(Ryan_Additon_Strikes,Ryan_Additon_Balls)
Ryan_Additon_AdjustedCalls$OnFieldRuling = ifelse(Ryan_Additon_AdjustedCalls$type == "S","S","B")
# Re-create Decision Tree but this time with whole Data rather than just training set.
tree_model <-rpart(type~., data = Ryan_Additon)
plot(tree_model)
text(tree_model, use.n = T)
# Predict using Umpire's Decision Tree on the AdjustedCalls dataset & compare calls with adjusted_call to find Accuracy
Prediction_UMP<-predict(tree_model, newdata = Ryan_Additon_AdjustedCalls, type = 'class')
confusionMatrix(table(Prediction_UMP,Ryan_Additon_AdjustedCalls$AdjustedCall))
# Borderline
Prediction_BORDERLINE<-predict(tree_model, newdata = Ryan_Additon_Borderline, type = 'class')
confusionMatrix(table(Prediction_BORDERLINE,Ryan_Additon_Borderline$AdjustedCall))
# Correct vs InCorrect Call
# Correct Calls
Ryan_Additon_AdjustedCalls$Call = ifelse( ((Ryan_Additon_AdjustedCalls$type == 'B') & ( (Ryan_Additon_AdjustedCalls$AdjustedCall == "B") | (Ryan_Additon_AdjustedCalls$Borderline == "T") ) ), "C","I" )
Ryan_Additon_AdjustedCalls$Call = ifelse( ((Ryan_Additon_AdjustedCalls$type == 'S') & ((Ryan_Additon_AdjustedCalls$AdjustedCall == "S") | (Ryan_Additon_AdjustedCalls$Borderline == "T") ) ), "C","I")
# InCorrect Calls
Ryan_Additon_AdjustedCalls$Call = ifelse( ( (Ryan_Additon_AdjustedCalls$type == 'B') & ((Ryan_Additon_AdjustedCalls$AdjustedCall == "S") & (Ryan_Additon_AdjustedCalls$Borderline == "F") ) ), "I","C")
Ryan_Additon_AdjustedCalls$Call = ifelse( ( (Ryan_Additon_AdjustedCalls$type == 'S') & ((Ryan_Additon_AdjustedCalls$AdjustedCall == "B") & (Ryan_Additon_AdjustedCalls$Borderline == "F") ) ), "I","C")
table(Ryan_Additon_AdjustedCalls$Call)
# Which Pitchers Recieved the InCorrect Calls
Ryan_Additon_Incorrect = subset(Ryan_Additon_AdjustedCalls, Ryan_Additon_AdjustedCalls$Call == "I")
print(Ryan_Additon_Incorrect$player_name)
print(Ryan_Additon_Incorrect$AdjustedCall)
|
5c0af33419774ae044f241360f568ce9fa209996
|
2ca29aa14b47895f8f5f7ef1325503a784f52b3a
|
/cning0506_exercise2_1_b2.R
|
a45144a66ae3b0f3b702415f5603ecc37c831e13
|
[] |
no_license
|
cning0506/DATA_330_Applied_TimeSeries_Analysis
|
63ce9361d0e3e057a978b1a2a0b0a86437375a2d
|
6a0fb39df4275e2a47be61ffc62b118cc85a3aff
|
refs/heads/main
| 2023-04-01T05:07:32.452796
| 2021-04-07T21:37:29
| 2021-04-07T21:37:29
| 332,911,145
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,482
|
r
|
cning0506_exercise2_1_b2.R
|
# R-Version 4.0.2 "Taking Off Again"
# File Name: "Exercise 2.1.R"
# Case Study Description: Duplicate Analysis
## The goal of this project was to determine the efficacy of wireless sensor networks (WSN) under natural outdoor conditions for collecting high precision environmental data.
# Created by: Conrad Ning
# Last edited on: 3/12/2021
setwd("C:/Users/Winston_Ning/Desktop/Spring 2021/DATA 330//Module 2/Exercsie2_1/")
######Import Sources #####
source("Utility.R")
# Load all the Dataset using
id003 <-load_data("https://raw.githubusercontent.com/ds-wm/ds-wm.github.io/master/course/atsa/data/MDA300BKUP_resultsConverted-2YR_2003.txt")
id015 <-load_data("https://raw.githubusercontent.com/ds-wm/ds-wm.github.io/master/course/atsa/data/MDA300BKUP_resultsConverted-2YR_2015.txt")
id025 <-load_data("https://raw.githubusercontent.com/ds-wm/ds-wm.github.io/master/course/atsa/data/MDA300BKUP_resultsConverted-2YR_2025.txt")
id045 <-load_data("https://raw.githubusercontent.com/ds-wm/ds-wm.github.io/master/course/atsa/data/MDA300BKUP_resultsConverted-2YR_2045.txt")
id055 <-load_data("https://raw.githubusercontent.com/ds-wm/ds-wm.github.io/master/course/atsa/data/MDA300BKUP_resultsConverted-2YR_2055.txt")
id065 <-load_data("https://raw.githubusercontent.com/ds-wm/ds-wm.github.io/master/course/atsa/data/MDA300BKUP_resultsConverted-2YR_2065.txt")
id085 <-load_data("https://raw.githubusercontent.com/ds-wm/ds-wm.github.io/master/course/atsa/data/MDA300BKUP_resultsConverted-2YR_2085.txt")
id095 <-load_data("https://raw.githubusercontent.com/ds-wm/ds-wm.github.io/master/course/atsa/data/MDA300BKUP_resultsConverted-2YR_2095.txt")
id103 <-load_data("https://raw.githubusercontent.com/ds-wm/ds-wm.github.io/master/course/atsa/data/MDA300BKUP_resultsConverted-2YR_2103.txt")
id115 <-load_data("https://raw.githubusercontent.com/ds-wm/ds-wm.github.io/master/course/atsa/data/MDA300BKUP_resultsConverted-2YR_2115.txt")
id125 <-load_data("https://raw.githubusercontent.com/ds-wm/ds-wm.github.io/master/course/atsa/data/MDA300BKUP_resultsConverted-2YR_2125.txt")
id135 <-load_data("https://raw.githubusercontent.com/ds-wm/ds-wm.github.io/master/course/atsa/data/MDA300BKUP_resultsConverted-2YR_2135.txt")
listofdf <- list(id003,id015,id025,id045,id055,id065,id085,id095,id103,id115,id125,id135)
ogdf <- aggregate(listofdf)
# De-duplication
duplicateVector = c()
numRow <- length(id003$result_time)
|
637e6f3bc375cd5c099895378fbaa134420df80d
|
86430fba0fbd2f422a90a4fced69fe7181f23147
|
/tests/testthat/test_evaluate.R
|
f72efdac5e145ef0c06fce6c0dc36849e55d1954
|
[] |
no_license
|
Karel-Kroeze/MultiGHQuad
|
3d26dff9975b3cc30786d24307abe1cef3204807
|
0527705142d40fc567f73d4ca6aa8dfac85ab251
|
refs/heads/master
| 2021-01-21T04:25:42.683975
| 2016-08-11T13:22:57
| 2016-08-11T13:22:57
| 25,973,058
| 1
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,074
|
r
|
test_evaluate.R
|
library(MultiGHQuad)
context("Expectation is evaluated correctly")
pos.def <- function(n, ev = runif(n, 0, 10))
{
if (n == 1) return(matrix(runif(1)))
Z <- matrix(ncol = n, rnorm(n^2))
decomp <- qr(Z)
Q <- qr.Q(decomp)
R <- qr.R(decomp)
d <- diag(R)
ph <- d / abs(d)
O <- Q %*% diag(ph)
Z <- t(O) %*% diag(ev) %*% O
return(Z)
}
for (q in 1:3) for (mu in list(rep(0,q), seq(-3,3,length.out = q))) for (Sigma in list(diag(q), pos.def(q))) {
qp <- init.quad(q, prior = list(mu = mu, Sigma = Sigma), ip = 20)
est <- eval.quad(X = qp)
var <- attr(est, 'variance')
attr(est, 'variance') <- NULL
# should be mean
test_that("Normal expectation", {
expect_equal(est, mu, tolerance = 1E-5)
})
# variance should be prior
test_that("Variance", {
expect_equivalent(var, Sigma)
})
# X^2
est <- eval.quad(dmvnorm, qp, mean = mu, sigma = Sigma, log = TRUE)
var <- attr(est, "variance")
attr(est, "variance") <- NULL
# should be mean
test_that("Normal expectation", {
expect_equal(est, mu, tolerance = 1E-5)
})
}
|
8aff43b5d8552699d77e1fd7e765294ad8e28bb8
|
cb46ebb26c03283012773860d143d77ff62043da
|
/code/13. scatterplot.r
|
43817b3b1aed14aeeb1bf258cda160f45b118212
|
[] |
no_license
|
KaranSShakya/honors-fert_pest
|
6b20916004b2ef107c6aa21f7dd6d4cdba6eb9fc
|
408ff13290abaf865e729442507238b3d37140f2
|
refs/heads/master
| 2022-10-18T23:54:11.172966
| 2020-06-10T15:07:52
| 2020-06-10T15:07:52
| 264,244,685
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,443
|
r
|
13. scatterplot.r
|
#Library
library(tidyverse)
library(readr)
library(ggplot2)
library(readxl)
library(ggpubr)
data0 <- read_csv("BASELINE.csv",
na = "NA")
#Scatterplot - Fertilizer + Pesticide (2016)---------
data1 <- data0 %>%
select(2,3,5,6, 13)
data2 <- data1 %>%
filter(Year=="2016")
ggplot(data2, aes(x=log(GDP_capita), y=log(Fertilizer_use), color=Region))+
geom_point()+
geom_text(aes(label=Country), hjust=1, vjust=0)+
labs(title="Fertilizer Use (2016)", x="Log(GDP per Capita)", y="Log(Fertilizer Use)")+
theme_classic(base_size = 12)+
coord_cartesian(xlim=c(5.5, 11.2))
data3 <- data0 %>%
select(2,3,4,6, 13)
data4 <- data3 %>%
filter(Year=="2016")
ggplot(data4, aes(x=log(GDP_capita), y=log(Pesticide_use), color=Region))+
geom_point()+
geom_text(aes(label=Country), hjust=1, vjust=0)+
labs(title="Pesticide Use (2016)", x="Log(GDP per Capita)", y="Log(Fertilizer Use)")+
theme_classic(base_size = 12)+
coord_cartesian(xlim=c(5.5, 11.2))
#Other Variable Trend--------------
region <- data0 %>%
select(-1,-2,-10)
region$Region <- gsub("Europe & Central Asia", "Europe", region$Region)
region$Region <- gsub("Middle East & North Africa", "Africa", region$Region)
region$Region <- gsub("Sub-Saharan Africa", "Africa", region$Region)
region$Region <- gsub("Latin America & Caribbean", "America", region$Region)
region$Region <- gsub("East Asia & Pacific", "Asia", region$Region)
region$Region <- gsub("South Asia", "Asia", region$Region)
region$Region <- gsub("North America", "America", region$Region)
region1 <- region %>%
group_by(Year, Region) %>%
summarise(a_pest=mean(Pesticide_use), a_fert=mean(Fertilizer_use),
a_area=mean(Crop_area), a_exp=mean(Crop_exp_quantity),
a_rur=mean(Rural_a), a_emp=mean(Employ_per),
a_yield=mean(Crop_yield)) %>%
ungroup()
ggplot(region1, aes(x=Year, y=log(a_fert), color=Region))+
geom_line()
ggplot(region1, aes(x=Year, y=log(a_pest), color=Region))+
geom_line()
ggplot(region1, aes(x=Year, y=log(a_area), color=Region))+
geom_line()
ggplot(region1, aes(x=Year, y=log(a_exp), color=Region))+
geom_line()
ggplot(region1, aes(x=Year, y=log(a_rur), color=Region))+
geom_line()
ggplot(region1, aes(x=Year, y=log(a_emp), color=Region))+
geom_line()
ggplot(region1, aes(x=Year, y=log(a_yield), color=Region))+
geom_line()
|
d5e0018132943523f81d4c04e720c3efdfce42e6
|
21d5487563190c3b63a249eba7e0b456de498677
|
/simulation_functions.R
|
7f3bbaf796ca89c8226491907ccba1e20ba81e8f
|
[] |
no_license
|
brock-taute/edge-optimization
|
3d075b2d553f8d20073c01726bd92282de9ea4f9
|
1254a1e23c54a11256bc61957621ee8b995aea17
|
refs/heads/master
| 2022-06-09T14:37:28.309711
| 2020-04-30T14:37:54
| 2020-04-30T14:37:54
| 260,231,977
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 9,390
|
r
|
simulation_functions.R
|
# Functions to setup and run simulations with the optimization engine
get_forecast_sequences <- function(first_forecast_point = 1,
first_forecast_length = 24,
first_forecast_gap = 11,
recurring_forecast_length = 37,
recurring_forecast_gap = 24,
trial_length = 8760) {
# Prepares a sequence modeling the unveiling of forecasted information
# over time. This can be used to provide inputs to the optimize_dispatch
# function when simulating an operating solar + storage system.
# Parameters:
# first_forecast_point: Data point of first optimization
# first_forecast_length: Hours of data in first optimization
# first_forecast_gap: Hours until second optimization
# recurring_forecast_length: Hours of data in recurring optimizations
# recurring_forecast_gap: Hours between recurring optimizations
# trial_length: Number of hours in analysis
# Default Parameters correspond to the NYSERDA Day-Ahead Pricing Procedure
# with simulation lasting one year
# Returns:
# forecast_sequence: A vector of ints indicating the indices where new
# forecast info is released
# forecast_lengths: A vector of ints indicating the length of each
# corresponding forecast
# Sequence Algorithm #
# forecast_sequence:
# first_forecast_point, first_forecast_point + first_forecast_gap,
# sequence from second point to last point offset by recurring_forecast_gap
# Last point will always be trial_length - forecast_length
# Note: (+1 because of R indexing)
# forecast_lengths:
# first_forecast_length, recurring_forecast_length,
# ...., recurring_forecast_length - 1
## Example Calculation ##
# NYSERDA Full-Year Example: Day Ahead pricing comes in at 11AM.
# So, 11AM, we know pricing for next day (midnight to midnight)
# 1. First optimization is at start of data (midnight but index of 1 implied
# Hour Ending) and has a forecast_length until next midnight (24 hours)
# 2. Second optimization is when DA pricing comes in (11am, or index 12
# (Hour Ending)). Forecast_length is now rest of day + midnight to midnight
# (13 + 24 = 37)
# 3. The next very many optimizations are spaced 24 hours apart, every day
# at 11AM. With 24 hours of new information each, each optimization is for
# the next 37 hours.
# 4. On the very last day, we won't get any new pricing at 11am because we
# don't have that data. So the optimization from the day before is actually
# the very last dispatch. However it is only for 36 hours (37 - 1) because
# of the way SOC is calculated.
# (This introduces a degree of healthy conservatism as well.)
# Example Results:
# forecast_sequence = 1, 12, 36, ..., 8700, 8724
# forecast_lengths = 24, 37, 37, ..., 37, 36
# Note that because of R indexing,
# an optimization of length 37 at point 8724 ends in a vector
# of length 8760 (8724 gets included in the interval)
# ----------
# The last optimization point will always be the recurring_forecast_length
# away from the final available forecast data point, as no new information
# will arive.
# Note: The +1 is because of R base-1 indexing
last_forecast_point <- trial_length - recurring_forecast_length + 1
# Put together the sequence of optimization points:
# First, second, recurring...
forecast_sequence <- c(first_forecast_point,
seq(from = first_forecast_point + first_forecast_gap,
to = last_forecast_point,
by = recurring_forecast_gap))
# If the last datapoint didn't get included (because of an odd timestep
# interval), add it.
if (forecast_sequence[length(forecast_sequence)] != last_forecast_point) {
forecast_sequence <- c(forecast_sequence, last_forecast_point)
}
# Put together the vector of forecast_lengths
# The last 1 is 1 short so SOC can be calculated on that last dispatch hour
forecast_lengths <- rep(recurring_forecast_length,
length(forecast_sequence))
forecast_lengths[1] <- first_forecast_length
forecast_lengths[length(forecast_lengths)] <- recurring_forecast_length - 1
return(list(forecast_sequence = forecast_sequence,
forecast_lengths = forecast_lengths))
}
simulate_remote_net_meter <- function(forecast_sequence, forecast_lengths,
price_kWh, solar,
storage_power, storage_energy, poi_limit,
charge_efficiency, discharge_efficiency,
soc_start) {
# Model the optimal dispatch of a solar + storage system participating in
# Remote Net Metering.
# This system can only charge when offsetting solar production and has no
# load connected to the same meter. So there are no utility costs.
# An optimal dispatch will shift solar production to take full advantage of
# the variations in the Day Ahead electricity pricing market.
# Optimal dispatch will be determined with each release of data, and storage
# will follow this dispatch until it gets overwritten.
# Vector Parameters:
# forecast_sequence: sequence of indicies where each optimization occurs
# forecast_lengths: sequence of the amount of data points in each
# optimization
# price_kWh: Timeseries electricity price for full simulation period ($/kWh)
# solar: Timeseries solar production for full simulation period (kWhac)
# Scalar Parameters:
# storage_power: The AC Capacity of storage inverter (kW)
# storage_energy: The Nominal (DC) Capacity of energy storage (kWh)
# poi_limit: The max amount of power that can be exported to grid (kWac)
# charge_efficiency: Percent of power converted from AC power to DC charge
# discharge_efficiency: Percent of power converted from DC to AC
# soc_start: The state of charge of storage system at simulation start (kWh)
# Returns:
# storage: timeseries vector of storage dispatch for each interval of the
# simulation; Charge is positive; Discharge is negative (kWhac)
# soc: timeseries vector of the state of charge of the storage system for
# each interval of the simulation; State of charge is interval_beginning
# (nominal kWhdc)
# ----------
# Set Timer and Check Inputs ----
start_time <- Sys.time()
trial_length <- forecast_sequence[length(forecast_sequence)] +
forecast_lengths[length(forecast_lengths)]
if (trial_length < length(solar) | trial_length < length(price_kWh)) {
stop('The analysis period is longer than the available data.')
}
# Loop through the sequence ----
storage <- rep(0, length(price_kWh))
soc <- rep(0, length(price_kWh))
# soc always needs to be one ahead of storage since it's hour beginning
soc[forecast_sequence[1]] <- soc_start
for (n in 1:length(forecast_sequence)) {
i <- forecast_sequence[n]
forecast_length <- forecast_lengths[n]
# Keep track of progress
print(paste0(n / length(forecast_sequence) * 100, '% Complete!'))
# Set up optimization parameters #
# Note: -1 because of base-1 indexing
forecast_range <- i:(i + forecast_length - 1)
forecast_solar <- solar[forecast_range]
forecast_price <- price_kWh[forecast_range]
soc_i <- soc[i]
# Get optimal storage dispatch given current information
optimal_storage <- optimize_dispatch(storage_power, storage_energy,
poi_limit, charge_efficiency,
discharge_efficiency, soc_i,
fees_kWh = 0, tax_kWh = 0,
charge_solar_only = TRUE,
forecast_price,
forecast_solar)
storage[forecast_range] <- optimal_storage
# To place SOC results, I need to first convert storage back to DC
dc_storage <- optimal_storage
dc_storage[dc_storage > 0] <- dc_storage[dc_storage > 0] *
charge_efficiency
dc_storage[dc_storage < 0] <- dc_storage[dc_storage < 0] /
discharge_efficiency
# And save SOC results where SOC is start of the hour
# (and thus forward looking soc[i+1] = soc[i] + dc_storage[i])
soc[forecast_range + 1] <- soc[i] + cumsum(dc_storage)
}
print(Sys.time() - start_time)
return(list(storage = storage, soc = soc))
}
|
06e9283020b87bae3fd47ea5ab8086ff6978f5e7
|
c3852480b1ccf9c53cc9777d9d6227b33881556a
|
/tests/testthat/test_normCytof.R
|
c57b9d7812a0f6a3466389bfa9d28fa4400c35d6
|
[] |
no_license
|
HelenaLC/CATALYST
|
f4506a621f81fef74911661d02330253d37aee9e
|
f3e294ed9a4d3f300feb994bb381df6a6b2c8309
|
refs/heads/main
| 2023-08-23T13:30:52.504230
| 2023-04-25T14:53:40
| 2023-04-25T14:53:40
| 75,288,871
| 60
| 34
| null | 2022-05-12T08:25:28
| 2016-12-01T12:06:28
|
R
|
UTF-8
|
R
| false
| false
| 3,444
|
r
|
test_normCytof.R
|
data(raw_data)
x <- prepData(raw_data)
test_that("normCytof()", {
y <- normCytof(x, "dvs", k = 101, remove_beads = TRUE, overwrite = FALSE)
sce <- "SingleCellExperiment"
expect_is(y$data, sce)
expect_is(y$beads, sce)
expect_is(y$removed, sce)
expect_is(y$scatter, "ggplot")
expect_is(y$lines, "ggplot")
expect_true(all(!y$data$remove))
expect_true(all(!y$data$is_bead))
expect_true(ncol(x) == ncol(y$data) + ncol(y$removed))
z <- normCytof(x, "dvs", k = 101, remove_beads = FALSE, overwrite = FALSE)
expect_identical(dim(z$data), dim(x))
expect_identical(int_colData(z$data)$Time, int_colData(x)$Time)
expect_equivalent(counts(z$data), counts(x))
expect_lte(sum(z$data$is_bead), sum(z$data$remove))
expect_true(sum(z$data$is_bead) == ncol(y$beads))
expect_true(sum(z$data$is_bead | z$data$remove) == ncol(y$removed))
# construct some mock data w/
# signal descreasing over time
set.seed(42)
x <- prepData(raw_data)
chs <- channels(x)
bead_chs <- sample(chs, (n_beads <- 3))
bead_ms <- .get_ms_from_chs(bead_chs)
# amount time-drift (slope) & time points
s <- -0.5
t <- seq(0, 10, l = (n <- 2e3))
bl <- runif(n_beads, 5, 10) # baselines
z <- outer(s*t, bl-s*max(t)/2, "+")
# add time, DNA & some other channels
z <- cbind(z, t, 0, 0, replicate((n_chs <- 5), runif(n)))
# set random non-bead events
i <- sample(n, (n_cells <- 800))
j <- replicate(n_cells, sample(seq_len(n_beads), 1))
z[cbind(i, j)] <- 0
# set colnames to beads, dna, random channels
dna <- c("Ir191Di", "Ir193Di")
chs <- channels(x)
chs <- sample(setdiff(chs, c(bead_chs, dna)), n_chs)
colnames(z) <- c(bead_chs, "time", dna, chs)
# consruct SCE & apply normalization
sce <- prepData(flowFrame(z))
assay(sce, "exprs", FALSE) <- assay(sce)
res <- normCytof(sce, bead_ms, k = 7, plot = FALSE, overwrite = TRUE)
# check number of identified beads
expect_equal(ncol(res$beads), n-n_cells)
# fit LM model through normalized beads
normed <- data.frame(t = t[setdiff(seq_len(n), i)],
t(assay(res$beads, "counts")[bead_chs, ]))
coefs <- vapply(bead_chs, function(u) {
f <- as.formula(paste(u, "~ t"))
coef(lm(f, data = normed))
}, numeric(2))
# LM fit intercepts should be similar to simulated baselines
expect_true(all(abs(coefs[1, ]-bl) < 0.25))
# LM fit slopes should be close to zero after normalization
expect_true(all(abs(coefs[2, ]) < 0.1))
})
test_that("normCytof() - overwrite = TRUE", {
i <- sample(ncol(x), 1e3)
y <- normCytof(x[, i], beads = "dvs", k = 21,
overwrite = FALSE, transform = TRUE, plot = FALSE)
z <- normCytof(x[, i], beads = "dvs", k = 21,
overwrite = TRUE, transform = TRUE, plot = FALSE)
expect_true(!"ncounts" %in% assayNames(z$data))
expect_true(2*length(assays(z$data)) == length(assays(y$data)))
expect_identical(assay(y$data, "normcounts"), assay(z$data, "counts"))
expect_identical(assay(y$data, "normexprs"), assay(z$data, "exprs"))
})
test_that("normCytof() - cofactor = NULL", {
cfs <- sample(10, nrow(x), TRUE)
names(cfs) <- channels(x)
x <- prepData(raw_data[[1]], cofactor = cfs)
i <- sample(ncol(x), 1e3)
y <- normCytof(x[, i], beads = "dvs", k = 21, cofactor = NULL)
expect_identical(int_metadata(y$data)$cofactor, cfs)
})
|
09397f2ec6e565a82ce5e594ee7d421f0e6f1c60
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/midasr/examples/expand_amidas.Rd.R
|
66aa4fe2dc8b527ed8ed4445f88bc5e118bfe108
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 233
|
r
|
expand_amidas.Rd.R
|
library(midasr)
### Name: expand_amidas
### Title: Create table of weights, lags and starting values for Ghysels
### weight schema
### Aliases: expand_amidas
### ** Examples
expand_amidas("nealmon","A",0,c(1,2),12,c(0,0,0))
|
a0e1681b1e4d58cc4652ac4f28411f865295163e
|
04d93fbc0fb3a160cdfbc8aa3d5c258df7b0d0af
|
/man/convertToSGFeatures.Rd
|
f28048b76bb0ea814a36c78ed251822cb9dd5360
|
[] |
no_license
|
ldg21/SGSeq
|
b279000a73e58514a681d3aa802cdf7ec91a3716
|
6c67388c39853ba5df50c94b5c3fd2457288e825
|
refs/heads/master
| 2021-01-24T08:49:50.563432
| 2020-10-14T19:36:26
| 2020-10-14T19:36:26
| 122,996,617
| 5
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,886
|
rd
|
convertToSGFeatures.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convert.R
\name{convertToSGFeatures}
\alias{convertToSGFeatures}
\title{Convert transcript features to splice graph features}
\usage{
convertToSGFeatures(x, coerce = FALSE)
}
\arguments{
\item{x}{\code{TxFeatures} object}
\item{coerce}{Logical indicating whether transcript features
should be coerced to splice graph features without disjoining
exons and omitting splice donor and acceptor sites}
}
\value{
\code{SGFeatures} object
}
\description{
Convert transcript features (predicted from RNA-seq data or
extracted from transcript annotation) to splice graph features.
}
\details{
Splice junctions are unaltered. Exons are disjoined into
non-overlapping exon bins. Adjacent exon bins without a splice site
at the shared boundary are merged.
Entries for splice donor and acceptor sites (positions immediately
upstream and downstream of introns, respectively) are added.
In the returned \code{SGFeatures} object, column \code{type} takes
values \dQuote{J} (splice junction), \dQuote{E} (exon bin),
\dQuote{D} (splice donor) or \dQuote{A} (splice acceptor).
Columns \code{splice5p} and \code{splice3p} indicate mandatory
splices at the 5' and 3' end of exon bins, respectively
(determining whether reads overlapping exon boundaries must be
spliced at the boundary to be considered compatible).
\code{splice5p} (\code{splice3p}) is \code{TRUE} if the first (last)
position of the exon coincides with a splice acceptor (donor)
and it is not adjacent to a neighboring exon bin.
Each feature is assigned a unique feature and gene identifier,
stored in columns \code{featureID} and \code{geneID},
respectively. The latter indicates features that belong to the
same gene, represented by a connected component in the splice graph.
}
\examples{
sgf <- convertToSGFeatures(txf_ann)
}
\author{
Leonard Goldstein
}
|
3abb12ce87be74739c38f91d433738198cb30cbe
|
a636bc0e4e3ceb1fbc5e86e30ae0e41b467391d0
|
/r/variantkey/R/uint64.R
|
572bad8478a0bcd36511a813259a928ffa06f760
|
[
"MIT"
] |
permissive
|
Genomicsplc/variantkey
|
292a42e3de15d8b2bbee8aa05b5090790c00d20c
|
4ee491c11cd1990ca628eab207054da028f8910a
|
refs/heads/master
| 2023-06-09T18:56:56.935122
| 2023-06-08T12:53:06
| 2023-06-08T12:53:06
| 110,544,338
| 35
| 6
|
MIT
| 2023-06-08T12:53:07
| 2017-11-13T12:16:35
|
C
|
UTF-8
|
R
| false
| false
| 9,383
|
r
|
uint64.R
|
UINT64 <- "uint64"
setOldClass(UINT64)
#' Create a new uint64 vector.
#' @param length vector length
#' @export
uint64 <- function(length=0) {
ret <- double(length)
oldClass(ret) <- UINT64
return(ret)
}
#' Check if the object x is of uint64 class.
#' @param x object
#' @export
is.uint64 <- function(x) {
return(inherits(x, UINT64))
}
#' Identity function for class uint64.
#' @export
identical.uint64 <- function(x, y, num.eq=FALSE, single.NA=FALSE, attrib.as.set=TRUE, ignore.bytecode=TRUE) {
return(identical(x=x, y=y, num.eq=num.eq, single.NA=single.NA, attrib.as.set=attrib.as.set, ignore.bytecode=ignore.bytecode))
}
#' Coerce to uint64.
#' @param x vector
#' @export
as.uint64 <- function(x, ...) {
UseMethod("as.uint64")
}
#' Coerce from factor to uint64.
#' @param x factor vector
#' @export
as.uint64.factor <- function(x, ...) {
return(as.uint64(unclass(x), ...))
}
#' Coerce from NULL to uint64.
#' @param x NULL vector
#' @export
as.uint64.NULL <- function(x, ...) {
return(uint64())
}
#' Coerce from uint64 to uint64.
#' @param x uint64 vector
#' @export
as.uint64.uint64 <- function(x, ...) {
return(x)
}
#' Coerce double vector to uint64
#' @param x double vector
#' @useDynLib variantkey R_double_to_uint64
#' @export
as.uint64.double <- function(x, ...) {
ret <- uint64(length(x))
return(.Call("R_double_to_uint64", x, ret))
}
#' Coerce integer vector to uint64
#' @param x integer vector
#' @useDynLib variantkey R_integer_to_uint64
#' @export
as.uint64.integer <- function(x, ...) {
ret <- uint64(length(x))
return(.Call("R_integer_to_uint64", x, ret))
}
#' Coerce character vector to uint64
#' @param x character vector
#' @useDynLib variantkey R_decstr_to_uint64
#' @export
as.uint64.character <- function(x, ...) {
ret <- uint64(length(x))
return(.Call("R_decstr_to_uint64", x, ret))
}
setAs("character", UINT64, function(from)as.uint64.character(from))
#' Coerce uint64 vector to character
#' @param x uint64 vector
#' @useDynLib variantkey R_uint64_to_decstr
#' @export
as.character.uint64 <- function(x, ...) {
ret <- character(length(x))
return(.Call("R_uint64_to_decstr", x, ret))
}
setAs(UINT64, "character", function(from)as.character.uint64(from))
#' Convert hexadecimal character vector to uint64.
#' @param x hexadecimal character vector (16 characters per item)
#' @useDynLib variantkey R_parse_hex_uint64_t
#' @export
as.uint64.hex64 <- function(x, ...) {
ret <- uint64(length(x))
return(.Call("R_parse_hex_uint64_t", as.character(x), ret))
}
#' Replicate elements of uint64 vectors.
#' @param x uint64 vector to be replicated
#' @export
"rep.uint64" <- function(x, ...) {
cx <- oldClass(x)
ret <- NextMethod()
oldClass(ret) <- cx
return(ret)
}
#' Set the length of uint64 vector.
#' @param x uint64 vector
#' @param value value to set the new length
#' @export
"length<-.uint64" <- function(x, value) {
cx <- oldClass(x)
n <- length(x)
x <- NextMethod()
oldClass(x) <- cx
if (value > n) {
x[(n + 1):value] <- as.uint64(0)
}
return(x)
}
#' Extract uint64 vector parts
#' @param x uint64 vector
#' @export
"[.uint64" <- function(x,...) {
cx <- oldClass(x)
ret <- NextMethod()
oldClass(ret) <- cx
return(ret)
}
#' Extract uint64 vector parts
#' @param x uint64 vector
#' @export
"[[.uint64" <- function(x,...) {
cx <- oldClass(x)
ret <- NextMethod()
oldClass(ret) <- cx
return(ret)
}
#' Replace parts of uint64 vector
#' @param x uint64 vector
#' @param value uint64 replacement value
#' @export
"[<-.uint64" <- function(x,...,value) {
cx <- oldClass(x)
value <- as.uint64(value)
ret <- NextMethod()
oldClass(ret) <- cx
return(ret)
}
#' Replace parts of uint64 vector
#' @param x uint64 vector
#' @param value uint64 replacement value
#' @export
"[[<-.uint64" <- function(x,...,value) {
cx <- oldClass(x)
value <- as.uint64(value)
ret <- NextMethod()
oldClass(ret) <- cx
return(ret)
}
#' Returns true if x and y are equal.
#' @param x uint64 vector
#' @param y uint64 vector
#' @useDynLib variantkey R_EQ_uint64
#' @export
"==.uint64" <- function(x, y) {
ret <- logical(max(length(x), length(y)))
return(.Call("R_EQ_uint64", as.uint64(x), as.uint64(y), ret))
}
#' Returns true if x and y are different.
#' @param x uint64 vector
#' @param y uint64 vector
#' @useDynLib variantkey R_NE_uint64
#' @export
"!=.uint64" <- function(x, y) {
ret <- logical(max(length(x), length(y)))
return(.Call("R_NE_uint64", as.uint64(x), as.uint64(y), ret))
}
#' Returns true if x is less than y.
#' @param x uint64 vector
#' @param y uint64 vector
#' @useDynLib variantkey R_LT_uint64
#' @export
"<.uint64" <- function(x, y) {
ret <- logical(max(length(x), length(y)))
return(.Call("R_LT_uint64", as.uint64(x), as.uint64(y), ret))
}
#' Returns true if x is less or equal than y.
#' @param x uint64 vector
#' @param y uint64 vector
#' @useDynLib variantkey R_LE_uint64
#' @export
"<=.uint64" <- function(x, y) {
ret <- logical(max(length(x), length(y)))
return(.Call("R_LE_uint64", as.uint64(x), as.uint64(y), ret))
}
#' Returns true if x is greater than y.
#' @param x uint64 vector
#' @param y uint64 vector
#' @useDynLib variantkey R_GT_uint64
#' @export
">.uint64" <- function(x, y) {
ret <- logical(max(length(x), length(y)))
return(.Call("R_GT_uint64", as.uint64(x), as.uint64(y), ret))
}
#' Returns true if x is greater or equal than y.
#' @param x uint64 vector
#' @param y uint64 vector
#' @useDynLib variantkey R_GE_uint64
#' @export
">=.uint64" <- function(x, y) {
ret <- logical(max(length(x), length(y)))
return(.Call("R_GE_uint64", as.uint64(x), as.uint64(y), ret))
}
#' Format uint64 vector for pretty printing.
#' @export
format.uint64 <- function(x, ...) {
return(format(as.character(x), ...))
}
#' Prints uint64 argument and returns it invisibly.
#' @export
print.uint64 <- function(x, ...) {
return(print(as.character(x), ...))
}
bindUint64 <- function(mode, recursive=FALSE, ...) {
x <- list(...)
n <- length(x)
for (i in 1:n) {
if (recursive && is.list(x[[i]])) {
x[[i]] <- do.call("c.uint64", c(x[[i]], list(recursive=TRUE)))
} else {
if (!is.uint64(x[[i]])) {
m <- names(x[[i]])
x[[i]] <- as.uint64(x[[i]])
names(x[[i]]) <- m
}
oldClass(x[[i]]) <- NULL
}
}
ret <- do.call(mode, x)
oldClass(ret) <- UINT64
return(ret)
}
#' Concatenate uint64 vectors.
#' @param Two or more vectors coerced to uint64
#' @export
c.uint64 <- function(..., recursive=FALSE) {
return(bindUint64(mode="c", recursive=recursive, ...))
}
#' Combine uint64 vectors by columns.
#' @export
cbind.uint64 <- function(...) {
return(bindUint64(mode="cbind", recursive=FALSE, ...))
}
#' Combine uint64 vectors by rows.
#' @export
rbind.uint64 <- function(...) {
return(bindUint64(mode="rbind", recursive=FALSE, ...))
}
remUint64Class <- function(x) {
if (length(x)) {
i <- (x == UINT64)
if (any(i)) {
return(x[!i])
}
}
return(x)
}
#' Coerce uint64 vector to data.frame.
#' @param x uint64 vector
#' @export
as.data.frame.uint64 <- function(x, ...) {
cx <- oldClass(x)
on.exit(setattr(x, "class", cx))
setattr(x, "class", remUint64Class(cx))
ret <- as.data.frame(x, ...)
n <- length(ret)
for (i in 1:n) {
setattr(ret[[i]], "class", cx)
}
return(ret)
}
#' Sorts a uint64 vector in ascending order.
#' @param x uint64 vector
#' @useDynLib variantkey R_sort_uint64
#' @export
sort.uint64 <- function(x, ...) {
n <- length(x)
tmp <- uint64(n)
ret <- uint64(n)
return(.Call("R_sort_uint64", as.uint64(x), tmp, ret))
}
#' Returns a permutation which rearranges its first argument into ascending order.
#' @param x uint64 vector
#' @useDynLib variantkey R_order_uint64
#' @export
order.uint64 <- function(x) {
n <- length(x)
tmp <- uint64(n)
ret <- uint64(n)
idx <- integer(n)
tdx <- integer(n)
return(.Call("R_order_uint64", as.uint64(x), tmp, ret, idx, tdx) + 1)
}
#' Reverse a uint64 vector.
#' @param x uint64 vector
#' @useDynLib variantkey R_reverse_uint64
#' @export
rev.uint64 <- function(x, ...) {
ret <- uint64(length(x))
return(.Call("R_reverse_uint64", as.uint64(x), ret))
}
#' Eliminates all but the first element from every consecutive group of equal values.
#' @param x uint64 vector
#' @useDynLib variantkey R_unique_uint64
#' @export
unique.uint64 <- function(x, ...) {
ret <- uint64(length(x))
return(.Call("R_unique_uint64", as.uint64(x), ret))
}
#' Returns the intersection of two sorted uint64 vectors.
#' @param x uint64 vector
#' @param y uint64 vector
#' @useDynLib variantkey R_intersect_uint64
#' @export
intersect.uint64 <- function(x, y) {
ret <- uint64(min(length(x), length(y)))
return(.Call("R_intersect_uint64", as.uint64(x), as.uint64(y), ret))
}
#' Returns the union of two sorted uint64 vectors.
#' @param x uint64 vector
#' @param y uint64 vector
#' @useDynLib variantkey R_union_uint64
#' @export
union.uint64 <- function(x, y) {
ret <- uint64(length(x) + length(y))
return(.Call("R_union_uint64", as.uint64(x), as.uint64(y), ret))
}
|
222dc63f1d0a60da6b2e8be4c864e1add8f10164
|
03de17f27e1ab2dc3b5dc922bb782d589156737a
|
/coffeeBranch.R
|
580e777ab0db5d32e3be4b67ab9aec04e1c85851
|
[] |
no_license
|
morellybutton/Yayu.datacleaning
|
7cf9e9bf43a3f2b5169383d389ffba75758173c6
|
ed00e1fdfa72b84a4a51e7c7af5509ad3ca0b1be
|
refs/heads/master
| 2021-09-09T13:41:52.736490
| 2018-03-16T16:16:43
| 2018-03-16T16:16:43
| 114,379,994
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,013
|
r
|
coffeeBranch.R
|
#combination of monitored branches over year (from flowers to yield)
library(gdata)
library(stringr)
library(tidyverse)
setwd("/Volumes/ELDS/ECOLIMITS/Ethiopia/Yayu")
years<-c("2014","2015","2016")
plts<-read.csv(paste0(getwd(),"/plotnums.csv"))
#remove Met station and Forest plots
plts<-plts[grep("Met_",plts$name,invert=T),]
plts<-plts[grep("FC",plts$name,invert=T),]
p<-as.character(plts$name)
p<-p[p!="H7"]
for(j in 1:length(years)){
for(i in 1:length(p)){
#open flower data
if(years[j]!=2014) dataFs <- data.frame(read.xls(paste0(getwd(),"/Yield/",years[j],"/FruitSet_Combined.xlsx"), sheet=p[i]),stringsAsFactors = F) else dataFs<-data.frame(Plot=as.character(),Coordinate=as.character(),Shrub.id=as.character(),Branch.cm=as.character(),No.of.buds=as.character(),No.of.flower=as.character(),No.of.eberry=as.character(),No.of.leaves=as.character(),NoCLR=as.character(),iCLR=as.character(),NoChl=as.character(),NoHerb=as.character(),SoilMoist=as.character(),Notes=as.character())
#add column names
if(nrow(dataFs)!=0) {
colnames(dataFs)<-c("Plot","Coordinate","Shrub.id","Branch.cm","No.of.buds","No.of.flower","No.of.eberry","No.of.leaves","NoCLR","iCLR","NoChl","NoHerb","SoilMoist","Notes")
#find entry days
d1 <- data.frame(grep("DATE",dataFs[,4]))
d1[(nrow(d1)+1),1]<-nrow(dataFs)
d1$date<-as.Date(dataFs[d1[,1],5])
dataFs$Date<-NA
for(x in 1:(nrow(d1)-1)){
if(x < (nrow(d1)-1)) dataFs[(d1[x,1]+2):(d1[(x+1),1]-1),"Date"]<-d1[x,2] else dataFs[(d1[x,1]+2):(d1[(x+1),1]),"Date"]<-d1[x,2]
}
dataFs <- dataFs[!is.na(dataFs$Date),]
dataFs$Date<-as.Date(dataFs$Date,origin = "1970-01-01")
dataFs<-data.frame(sapply(dataFs,as.character),stringsAsFactors = F)
}
#open disease data
dataBr <- data.frame(read.xls(paste0(getwd(),"/Disease/",years[j],"/Disease Survey_Combined.xlsx"), sheet=paste0(p[i]," (branches)")),stringsAsFactors = F)
dataBr <- data.frame(lapply(dataBr, as.character), stringsAsFactors=FALSE)
dataBr<-dataBr[,!is.na(dataBr[1,])]
#add column names
colnames(dataBr)<-c("Plot","Coordinate","Shrub.id","Branch.cm","No.of.fruits","NoCBB","NoCBD","No.of.leaves","NoLM","NoCLR","iCLR","NoChl","NoWilt","NoHerb","Notes")
#find entry days
d1 <- data.frame(grep("DATE",dataBr[,4]))
d1[(nrow(d1)+1),1]<-nrow(dataBr)
d1$date<-as.Date(dataBr[d1[,1],5])
for(x in 1:(nrow(d1)-1)){
if(x < (nrow(d1)-1)) dataBr[(d1[x,1]+2):(d1[(x+1),1]-1),"Date"]<-d1[x,2] else dataBr[(d1[x,1]+2):(d1[(x+1),1]),"Date"]<-d1[x,2]
}
dataBr$Date<-as.Date(dataBr$Date,origin = "1970-01-01")
dataBr <- dataBr[!is.na(dataBr$Date),]
dataBr <- dataBr[!is.na(dataBr$Plot),]
dataBr<-data.frame(sapply(dataBr,as.character),stringsAsFactors = F)
branch<-data_frame(plot=c(dataFs$Plot,dataBr$Plot),
Shrub.id=c(dataFs$Shrub.id,dataBr$Shrub.id),
Branch.cm=c(dataFs$Branch.cm,dataBr$Branch.cm),
No.of.buds=c(dataFs$No.of.buds,rep(0,nrow(dataBr))),
No.of.flower=c(dataFs$No.of.flower,rep(0,nrow(dataBr))),
No.of.eberry=c(dataFs$No.of.eberry,rep(0,nrow(dataBr))),
No.of.fruits=c(rep(0,nrow(dataFs)),dataBr$No.of.fruits),
NoCBB=c(rep(0,nrow(dataFs)),dataBr$NoCBB),
NoCBD=c(rep(0,nrow(dataFs)),dataBr$NoCBD),
No.of.leaves=c(dataFs$No.of.leaves,dataBr$No.of.leaves),
NoLM=c(rep(0,nrow(dataFs)),dataBr$NoLM),
NoCLR=c(dataFs$NoCLR,dataBr$NoCLR),
iCLR=c(dataFs$iCLR,dataBr$iCLR),
NoChl=c(dataFs$NoChl,dataBr$NoChl),
NoWilt=c(rep(0,nrow(dataFs)),dataBr$NoWilt),
NoHerb=c(dataFs$NoHerb,dataBr$NoHerb),
Notes=c(dataFs$Notes,dataBr$Notes),
Date=c(dataFs$Date,dataBr$Date))
tmp<- branch %>% arrange(Shrub.id,Branch.cm)
write.csv(tmp,paste0(getwd(),"/Yield/",years[j],"/Branchdata_",p[i],".csv"))
}
}
|
102bd8a0729df01f1acd7fe78c3c89642f019371
|
c780cd13e0c4d9d50ea776a2701a6a2b322bb548
|
/Proyecto_Final.R
|
2eeba01389bdf302d30b2ea2d71c4b3f57e54556
|
[
"MIT"
] |
permissive
|
RofoRojas/ProyectoFinal_DataWrangling
|
110c086a2deadf651238556e9a3dc7479ecb841f
|
0fecbe14553944e5eaf33ac50949cb082e817f25
|
refs/heads/master
| 2020-09-09T19:16:13.073915
| 2019-11-22T16:07:49
| 2019-11-22T16:07:49
| 221,539,585
| 0
| 1
|
MIT
| 2019-11-21T22:00:21
| 2019-11-13T19:51:45
|
HTML
|
UTF-8
|
R
| false
| false
| 28,488
|
r
|
Proyecto_Final.R
|
library(readxl)
library(dplyr)
library(lubridate)
library(stringr)
library(tidyverse)
library(DataExplorer)
library(ggplot2)
library(plotly)
library(corrplot)
rm(list = ls())
# Abrir todos los archivos que existen en la carpeta data
archivos <- list.files("Data")
for (nombre in archivos) {
temp_df <- read_excel(path = paste("Data", nombre, sep = "/"), skip = 9) %>% select(-X__1)
temp_df <- temp_df %>% filter(complete.cases(temp_df))
temp_df[1] <- as.integer(pull(temp_df, 1))
temp_df[2] <- factor(pull(temp_df,2), ordered= TRUE)
pre<- ifelse(str_detect(nombre, "municipal"), "m_", "d_")
nombre <- str_remove(nombre, "(_municipal|_departamental)\\.xlsx$")
nombre <- paste(pre,nombre, sep = "")
assign(nombre, temp_df)
}
rm(nombre, pre, archivos, temp_df)
#EDA
introduce(d_poblacion)
introduce(m_poblacion)
## Formatear tablas de Caracteristicas
# Las tablas de caracteristicas se convirtieron en cuatro tablas: la de lugar de nacimiento,
# residencia en 2013, dificultades y por ultimo tambien la de hijos por mujeres fertiles
##### Departamental
d_lugar_nacimiento <- d_caracteristicas %>% select(1:7) %>%
gather(`Lugar de Nacimiento`, `Personas`, 4:7) %>%
mutate(`Porcentaje de la Poblacion`= round(`Personas`/`Total de personas`*100, 2))
d_recidencia_2013 <- d_caracteristicas %>% select(1:3, 8:12) %>%
gather(key=`Lugar de Residencia en 2013`, value = `Personas`, 4:8) %>%
mutate(`Porcentaje de la Poblacion`= round(`Personas`/`Total de personas`*100, 2)) %>%
mutate(`Lugar de Residencia en 2013`= str_remove(`Lugar de Residencia en 2013`, '__1$'))
# Cambiar Nombres de Columnas
names(d_caracteristicas)[14] <- "Sin dificultad para Ver"
names(d_caracteristicas)[15] <- "Con dificultad para Ver"
names(d_caracteristicas)[16] <- "NA_Ver"
names(d_caracteristicas)[17] <- "Sin dificultad para Oir"
names(d_caracteristicas)[18] <- "Con dificultad para Oir"
names(d_caracteristicas)[19] <- "NA_Oir"
names(d_caracteristicas)[20] <- "Sin dificultad para Caminar"
names(d_caracteristicas)[21] <- "Con dificultad para Caminar"
names(d_caracteristicas)[22] <- "NA_Caminar"
names(d_caracteristicas)[23] <- "Sin dificultad para Recordar"
names(d_caracteristicas)[24] <- "Con dificultad para Recordar"
names(d_caracteristicas)[25] <- "NA_Recordar"
names(d_caracteristicas)[26] <- "Sin dificultad de Cuidado Personal"
names(d_caracteristicas)[27] <- "Con dificultad de Cuidado Personal"
names(d_caracteristicas)[28] <- "NA_Cuidado_Personal"
names(d_caracteristicas)[29] <- "Sin dificultad para Comunicarse"
names(d_caracteristicas)[30] <- "Con dificultad para Comunicarse"
names(d_caracteristicas)[31] <- "NA_Comunicarse"
names(d_caracteristicas)[33] <- "Con 0 hijos nacidos"
names(d_caracteristicas)[34] <- "Con 1 hijos nacidos"
names(d_caracteristicas)[35] <- "Con 2 hijos nacidos"
names(d_caracteristicas)[36] <- "Con 3 hijos nacidos"
names(d_caracteristicas)[37] <- "Con 4 hijos nacidos"
names(d_caracteristicas)[38] <- "Con 5 o más hijos nacidos"
names(d_caracteristicas)[39] <- "NA_Hijos_Nacidos"
names(d_caracteristicas)[40] <- "Con 0 hijos sobrevivientes"
names(d_caracteristicas)[41] <- "Con 1 hijos sobrevivientes"
names(d_caracteristicas)[42] <- "Con 2 hijos sobrevivientes"
names(d_caracteristicas)[43] <- "Con 3 hijos sobrevivientes"
names(d_caracteristicas)[44] <- "Con 4 hijos sobrevivientes"
names(d_caracteristicas)[45] <- "Con 5 o más hijos sobrevivientes"
d_dificultades <- d_caracteristicas %>% gather(key = "Dificultades", value = "Cantidad de Personas", 14:31) %>%
select(1:2,13, Dificultades, `Cantidad de Personas`) %>%
mutate(`Porcentaje con Dificultad`= round(`Cantidad de Personas`/ `Población de 4 años o más` * 100 ,2))
d_hijos_x_mujeres <- d_caracteristicas %>% gather(key = 'Situación', value = 'Cantidad de Mujeres', 33:45) %>%
select(1:2, 32, `Situación`, `Cantidad de Mujeres`) %>%
mutate(`Porcentaje de Mujeres`= round(`Cantidad de Mujeres`/`Total de mujeres en edad fértil` ,2))
rm(d_caracteristicas)
#### Municipal
m_lugar_nacimiento <- m_caracteristicas %>% select(1:7) %>%
gather(`Lugar de Nacimiento`, `Personas`, 4:7) %>%
mutate(`Porcentaje de la Poblacion`= round(`Personas`/`Total de personas`*100, 2))
m_recidencia_2013 <- m_caracteristicas %>% select(1:3, 8:12) %>%
gather(key=`Lugar de Residencia en 2013`, value = `Personas`, 4:8) %>%
mutate(`Porcentaje de la Poblacion`= round(`Personas`/`Total de personas`*100, 2)) %>%
mutate(`Lugar de Residencia en 2013`= str_remove(`Lugar de Residencia en 2013`, '__1$'))
# Cambiar Nombres de Columnas
names(m_caracteristicas)[14] <- "Sin dificultad para Ver"
names(m_caracteristicas)[15] <- "Con dificultad para Ver"
names(m_caracteristicas)[16] <- "NA_Ver"
names(m_caracteristicas)[17] <- "Sin dificultad para Oir"
names(m_caracteristicas)[18] <- "Con dificultad para Oir"
names(m_caracteristicas)[19] <- "NA_Oir"
names(m_caracteristicas)[20] <- "Sin dificultad para Caminar"
names(m_caracteristicas)[21] <- "Con dificultad para Caminar"
names(m_caracteristicas)[22] <- "NA_Caminar"
names(m_caracteristicas)[23] <- "Sin dificultad para Recordar"
names(m_caracteristicas)[24] <- "Con dificultad para Recordar"
names(m_caracteristicas)[25] <- "NA_Recordar"
names(m_caracteristicas)[26] <- "Sin dificultad de Cuidado Personal"
names(m_caracteristicas)[27] <- "Con dificultad de Cuidado Personal"
names(m_caracteristicas)[28] <- "NA_Cuidado_Personal"
names(m_caracteristicas)[29] <- "Sin dificultad para Comunicarse"
names(m_caracteristicas)[30] <- "Con dificultad para Comunicarse"
names(m_caracteristicas)[31] <- "NA_Comunicarse"
names(m_caracteristicas)[33] <- "Con 0 hijos nacidos"
names(m_caracteristicas)[34] <- "Con 1 hijos nacidos"
names(m_caracteristicas)[35] <- "Con 2 hijos nacidos"
names(m_caracteristicas)[36] <- "Con 3 hijos nacidos"
names(m_caracteristicas)[37] <- "Con 4 hijos nacidos"
names(m_caracteristicas)[38] <- "Con 5 o más hijos nacidos"
names(m_caracteristicas)[39] <- "NA_Hijos_Nacidos"
names(m_caracteristicas)[40] <- "Con 0 hijos sobrevivientes"
names(m_caracteristicas)[41] <- "Con 1 hijos sobrevivientes"
names(m_caracteristicas)[42] <- "Con 2 hijos sobrevivientes"
names(m_caracteristicas)[43] <- "Con 3 hijos sobrevivientes"
names(m_caracteristicas)[44] <- "Con 4 hijos sobrevivientes"
names(m_caracteristicas)[45] <- "Con 5 o más hijos sobrevivientes"
m_dificultades <- m_caracteristicas %>% gather(key = "Dificultades", value = "Cantidad de Personas", 14:31) %>%
select(1:2,13, Dificultades, `Cantidad de Personas`) %>%
mutate(`Porcentaje con Dificultad`= round(`Cantidad de Personas`/ `Población de 4 años o más` * 100 ,2))
m_hijos_x_mujeres <- m_caracteristicas %>% gather(key = 'Situación', value = 'Cantidad de Mujeres', 33:45) %>%
select(1:2, 32, `Situación`, `Cantidad de Mujeres`) %>%
mutate(`Porcentaje de Mujeres`= round(`Cantidad de Mujeres`/`Total de mujeres en edad fértil` ,2))
rm(m_caracteristicas)
## Formatear tablas de Educacion
# La tabla de educacion se combirtio en cinco tablas, la de 1. educacion que contiene el nivel de educacion,
# 2. causa de inasistencias, 3. alfabetizacion, 4. asistencia y por ultimo 5. lugar de estudio
### Departamental
d_causas_inasistencia <- d_educacion %>%
gather(key = 'Causa de Inasistencia 4-29 años', value = 'Cantidad de Personas', 12:20) %>%
select(1:2, `Causa de Inasistencia 4-29 años`, `Cantidad de Personas`) %>% arrange(Código)
d_alfabetizacion <- d_educacion %>%
gather(key = 'Situacion' ,value= 'Personas', 22, 23) %>% arrange(Código) %>%
select(1:2, 21, `Situacion`, `Personas`) %>%
mutate(`Porcentaje Alfabetizacion`= round(`Personas`/`Población de 7 años o más`*100,2))
d_asistencia <- d_educacion %>%
gather(key = 'Situacion' ,value= 'Personas', 24, 25) %>% arrange(Código) %>%
select(1:2, 21, `Situacion`, `Personas`) %>%
mutate(`Porcentaje Asistencia`= round(`Personas`/`Población de 7 años o más`*100,2))
d_lugar_estudio <- d_educacion %>%
gather(key = 'Lugar de Estudio' ,value= 'Cantidad de Personas x Lugar', 26:29) %>% arrange(Código) %>%
select(1:2, 21, `Lugar de Estudio`, `Cantidad de Personas x Lugar`) %>%
mutate(`Porcentaje por Lugar de Estudio`= round(`Cantidad de Personas x Lugar`/`Población de 7 años o más`*100,2)) %>%
mutate(`Lugar de Estudio` = recode(`Lugar de Estudio`, 'No especificado__1'= 'NA_Lugar_Estudio'))
d_educacion<- d_educacion %>% gather(key = 'Nivel Educativo', value = 'Cantidad de Personas', 3:11) %>%
select(1:2, `Nivel Educativo`,`Cantidad de Personas`) %>% arrange(Código)
### Municipal
m_causas_inasistencia <- m_educacion %>%
gather(key = 'Causa de Inasistencia 4-29 años', value = 'Cantidad de Personas', 12:20) %>%
select(1:2, `Causa de Inasistencia 4-29 años`, `Cantidad de Personas`) %>% arrange(Código)
m_alfabetizacion <- m_educacion %>%
gather(key = 'Situacion' ,value= 'Personas', 22, 23) %>% arrange(Código) %>%
select(1:2, 21, `Situacion`, `Personas`) %>%
mutate(`Porcentaje Alfabetizacion`= round(`Personas`/`Población de 7 años o más`*100,2))
m_asistencia <- m_educacion %>%
gather(key = 'Situacion' ,value= 'Personas', 24, 25) %>% arrange(Código) %>%
select(1:2, 21, `Situacion`, `Personas`) %>%
mutate(`Porcentaje Asistencia`= round(`Personas`/`Población de 7 años o más`*100,2))
m_lugar_estudio <- m_educacion %>%
gather(key = 'Lugar de Estudio' ,value= 'Cantidad de Personas x Lugar', 26:29) %>% arrange(Código) %>%
select(1:2, 21, `Lugar de Estudio`, `Cantidad de Personas x Lugar`) %>%
mutate(`Porcentaje por Lugar de Estudio`= round(`Cantidad de Personas x Lugar`/`Población de 7 años o más`*100,2)) %>%
mutate(`Lugar de Estudio` = recode(`Lugar de Estudio`, 'No especificado__1'= 'NA_Lugar_Estudio'))
m_educacion<- m_educacion %>% gather(key = 'Nivel Educativo', value = 'Cantidad de Personas', 3:11) %>%
select(1:2, `Nivel Educativo`,`Cantidad de Personas`) %>% arrange(Código)
## Formatear tablas de Empleo
# Se genera una sola tabla quye basicamente es la misma pero cambiada de forma.
# Depertamental
d_empleo <- d_empleo %>% gather(key = 'Situación Laboral', value = 'Personas', 5:7) %>% arrange(`Código`) %>%
select(1:2, 4, `Situación Laboral`, `Personas`) %>%
mutate(`Porcentaje de Economicamente Activa`=round(`Personas`/`Población Económicamente Activa`*100, 2))
# Municipal
m_empleo <- m_empleo %>% gather(key = 'Situación Laboral', value = 'Personas', 5:7) %>% arrange(`Código`) %>%
select(1:2, 4, `Situación Laboral`, `Personas`) %>%
mutate(`Porcentaje de Economicamente Activa`=round(`Personas`/`Población Económicamente Activa`*100, 2))
## Formatear tablas de Hogares
# Departamental
d_distribucion_hogares <- d_hogares %>% gather(key = 'Area', value = 'Distribución de Hogares', 3:4) %>% arrange(`Código`) %>%
select(1:2, `Area`, `Distribución de Hogares`) %>%
mutate(Area= str_remove(`Area`, "Distribución de hogares por área"))
d_hogares <- d_hogares %>% select(1,2,5,6)
# Municipal
m_distribucion_hogares <- m_hogares %>% gather(key = 'Area', value = 'Distribución de Hogares', 3:4) %>% arrange(`Código`) %>%
select(1:2, `Area`, `Distribución de Hogares`) %>%
mutate(Area= str_remove(`Area`, "Distribución de hogares por área"))
m_hogares <- m_hogares %>% select(1,2,5,6)
## Formatear tablas de Poblacion
# De la tabla de poblacion se generaron siete tablas nuevas siendo estas: 1. genero 2.Edad grupos de 15 años
# 3. Edad grupo de 5 años 4. Zona demografica 5. relación con el Jefe del Hogar 6.Personas en Situación de Calle y
# finalmente 7. Estado Civil
# Departamental
d_genero <- d_poblacion %>% gather(key = 'Genero' , value= 'Cantidad', 4:5) %>% arrange(`Código`) %>%
select(1:3, `Genero`, `Cantidad`)
d_edad_15 <- d_poblacion %>% gather(key = 'Grupo de Edades 15 años', value='Cantidad', 6:10, factor_key = TRUE) %>% arrange(`Código`) %>%
select(1:3, `Grupo de Edades 15 años`, `Cantidad`) %>%
mutate(`Porcentaje de Poblacion`= round(`Cantidad`/ `Total de personas`*100, 2))
d_edad_5 <- d_poblacion %>% gather(key = 'Grupo de Edades 5 años', value='Cantidad', 11:31, factor_key = TRUE) %>% arrange(`Código`) %>%
select(1:3, `Grupo de Edades 5 años`, `Cantidad`) %>%
mutate(`Porcentaje de Poblacion`= round(`Cantidad`/ `Total de personas`*100, 2))
d_zona <- d_poblacion %>% gather(key = 'Zona', value='Cantidad de Personas', 32:33) %>% arrange(`Código`) %>%
select(1:3, `Zona`, `Cantidad de Personas`) %>%
mutate(`Porcentaje de Poblacion`= round(`Cantidad de Personas`/ `Total de personas`*100, 2))
d_relacion_jefe <- d_poblacion %>% gather(key = 'Relacion con el Jefe', value = 'Cantidad de Personas', 34:44) %>% arrange(`Código`) %>%
select(1:3, `Relacion con el Jefe`, `Cantidad de Personas`) %>%
mutate(`Porcentaje de Poblacion`= round(`Cantidad de Personas`/ `Total de personas`*100, 2))
d_situacion_calle <- d_poblacion %>%
select(1:3, 45)
d_estado_civil <- d_poblacion %>% gather(key = 'Estado Civil', value = 'Cantidad', 47:52) %>% arrange(`Código`) %>%
select(1:2,46, `Estado Civil`, `Cantidad`) %>%
mutate(`Porcentaje de Poblacion`= round(`Cantidad`/`Población de 10 años o más`*100, 2))
rm(d_poblacion)
# Municipal
m_genero <- m_poblacion %>% gather(key = 'Genero' , value= 'Cantidad', 4:5) %>% arrange(`Código`) %>%
select(1:3, `Genero`, `Cantidad`)
m_edad_15 <- m_poblacion %>% gather(key = 'Grupo de Edades 15 años', value='Cantidad', 6:10, factor_key = TRUE) %>% arrange(`Código`) %>%
select(1:3, `Grupo de Edades 15 años`, `Cantidad`) %>%
mutate(`Porcentaje de Poblacion`= round(`Cantidad`/ `Total de personas`*100, 2))
m_edad_5 <- m_poblacion %>% gather(key = 'Grupo de Edades 5 años', value='Cantidad', 11:31, factor_key = TRUE) %>% arrange(`Código`) %>%
select(1:3, `Grupo de Edades 5 años`, `Cantidad`) %>%
mutate(`Porcentaje de Poblacion`= round(`Cantidad`/ `Total de personas`*100, 2))
m_zona <- m_poblacion %>% gather(key = 'Zona', value='Cantidad de Personas', 32:33) %>% arrange(`Código`) %>%
select(1:3, `Zona`, `Cantidad de Personas`) %>%
mutate(`Porcentaje de Poblacion`= round(`Cantidad de Personas`/ `Total de personas`*100, 2))
m_relacion_jefe <- m_poblacion %>% gather(key = 'Relacion con el Jefe', value = 'Cantidad de Personas', 34:44) %>% arrange(`Código`) %>%
select(1:3, `Relacion con el Jefe`, `Cantidad de Personas`) %>%
mutate(`Porcentaje de Poblacion`= round(`Cantidad de Personas`/ `Total de personas`*100, 2))
m_situacion_calle <- m_poblacion %>%
select(1:3, 45)
m_estado_civil <- m_poblacion %>% gather(key = 'Estado Civil', value = 'Cantidad', 47:52) %>% arrange(`Código`) %>%
select(1:2,46, `Estado Civil`, `Cantidad`) %>%
mutate(`Porcentaje de Poblacion`= round(`Cantidad`/`Población de 10 años o más`*100, 2))
rm(m_poblacion)
## Formatear tablas de pueblo
# La tabla de pueblo se formatea y se generan dos nuevas tablas, una de comunidad linguistica y la otra con el
# idioma materno, y la de pueblo queda unicamente con los pueblos de pertenencia
# Departamental
d_comunidad_linguistica <- d_pueblo %>% gather(key = 'Comunidad Linguistica', value = 'Cantidad', 10:31) %>% arrange(`Código`) %>%
select(1:3, `Comunidad Linguistica`, `Cantidad`) %>%
mutate(`Porcentaje de Total`= round(`Cantidad`/ `Total de personas`*100, 2))
d_idioma_materno <- d_pueblo %>%
gather(key = 'Idioma Materno', value = 'Cantidad', 32:61) %>% arrange(`Código`) %>%
select(1:3, `Idioma Materno`, `Cantidad`) %>%
mutate(`Porcentaje de Total`= round(`Cantidad`/`Total de personas`*100, 2)) %>%
mutate(`Idioma Materno` = str_remove(`Idioma Materno`, "...1"))
d_pueblo <- d_pueblo %>% gather(key = 'Pueblo de Pertenencia', value='Cantidad', 4:9) %>% arrange(`Código`) %>%
select(1:3, `Pueblo de Pertenencia`, `Cantidad`) %>%
mutate(`Porcentaje de Total`= round(`Cantidad`/ `Total de personas`*100, 2))
## Formatear tablas de tecnologia
# Esta tabla se formatea y se separa en dos tablas, una con información individual y la otra con conglomerado
#### Departamental
d_tecnologia <- d_tecnologia %>% rename('NA_Celular'=`No Declarado`, 'NA_Computadora'=`No Declarado__1`, 'NA_Internet'=`No Declarado__2`)
d_tecnologia_agrupado <- d_tecnologia %>% select(1:3, 13:16)
d_tecnologia <-d_tecnologia %>% select(-(13:16)) %>%
gather(key= 'Uso de Tecnologias', value= 'Cantidad de Personas', `Usa Celular`:`NA_Internet`) %>% arrange(`Código`) %>%
mutate(`Porcentaje de Poblacion`= round(`Cantidad de Personas`/ `Población de 7 años o más`*100, 2))
#### Municipal
m_tecnologia <- m_tecnologia %>% rename('NA_Celular'=`No Declarado`, 'NA_Computadora'=`No Declarado__1`, 'NA_Internet'=`No Declarado__2`)
m_tecnologia_agrupado <- m_tecnologia %>% select(1:3, 13:16)
m_tecnologia <-m_tecnologia %>% select(-(13:16)) %>%
gather(key= 'Uso de Tecnologias', value= 'Cantidad de Personas', `Usa Celular`:`NA_Internet`) %>% arrange(`Código`) %>%
mutate(`Porcentaje de Poblacion`= round(`Cantidad de Personas`/ `Población de 7 años o más`*100, 2))
## Formatear tablas de Vivienda
#### Departamental
d_tipo_ocupacion <- d_vivienda %>% gather(key = 'Tipo de Ocupacion', value = 'Cantidad de Viviendas', 13:16) %>% arrange(`Código`) %>%
select(1:2,4, `Tipo de Ocupacion`, `Cantidad de Viviendas`) %>%
mutate(`Porcentaje de Viviendas Particulares`= round(`Cantidad de Viviendas`/ `Total de viviendas particulares`*100, 2))
d_material_pared <- d_vivienda %>% gather(key = 'Material Predominante de la Pared', value = 'Cantidad de Viviendas', 17:27) %>% arrange(`Código`) %>%
select(1:2,4, `Material Predominante de la Pared`, `Cantidad de Viviendas`) %>%
mutate(`Porcentaje de Viviendas Particulares`= round(`Cantidad de Viviendas`/ `Total de viviendas particulares`*100, 2)) %>%
mutate(`Material Predominante de la Pared` = str_remove(`Material Predominante de la Pared`, "__\\d"))
d_material_techo <- d_vivienda %>% gather(key = 'Material Predominante del Techo', value = 'Cantidad de Viviendas', 28:35) %>% arrange(`Código`) %>%
select(1:2,4, `Material Predominante del Techo`, `Cantidad de Viviendas`) %>%
mutate(`Porcentaje de Viviendas Particulares`= round(`Cantidad de Viviendas`/ `Total de viviendas particulares`*100, 2)) %>%
mutate(`Material Predominante del Techo` = str_remove(`Material Predominante del Techo`, "__\\d"))
d_material_piso <- d_vivienda %>% gather(key = 'Material Predominante del Piso', value = 'Cantidad de Viviendas', 36:43) %>% arrange(`Código`) %>%
select(1:2,4, `Material Predominante del Piso`, `Cantidad de Viviendas`) %>%
mutate(`Porcentaje de Viviendas Particulares`= round(`Cantidad de Viviendas`/ `Total de viviendas particulares`*100, 2)) %>%
mutate(`Material Predominante del Piso` = str_remove(`Material Predominante del Piso`, "__\\d"))
d_vivienda <- d_vivienda %>% gather(key = 'Tipo de Vivienda', value = 'Cantidad de Viviendas', 5:12) %>% arrange(`Código`) %>%
select(1:3, `Tipo de Vivienda`, `Cantidad de Viviendas`) %>%
mutate(`Porcentaje de Viviendas`= round(`Cantidad de Viviendas`/ `Total de viviendas`*100, 2))
#### Municipal
m_tipo_ocupacion <- m_vivienda %>% gather(key = 'Tipo de Ocupacion', value = 'Cantidad de Viviendas', 13:16) %>% arrange(`Código`) %>%
select(1:2,4, `Tipo de Ocupacion`, `Cantidad de Viviendas`) %>%
mutate(`Porcentaje de Viviendas Particulares`= round(`Cantidad de Viviendas`/ `Total de viviendas particulares`*100, 2))
m_material_pared <- m_vivienda %>% gather(key = 'Material Predominante de la Pared', value = 'Cantidad de Viviendas', 17:27) %>% arrange(`Código`) %>%
select(1:2,4, `Material Predominante de la Pared`, `Cantidad de Viviendas`) %>%
mutate(`Porcentaje de Viviendas Particulares`= round(`Cantidad de Viviendas`/ `Total de viviendas particulares`*100, 2)) %>%
mutate(`Material Predominante de la Pared` = str_remove(`Material Predominante de la Pared`, "__\\d"))
m_material_techo <- m_vivienda %>% gather(key = 'Material Predominante del Techo', value = 'Cantidad de Viviendas', 28:35) %>% arrange(`Código`) %>%
select(1:2,4, `Material Predominante del Techo`, `Cantidad de Viviendas`) %>%
mutate(`Porcentaje de Viviendas Particulares`= round(`Cantidad de Viviendas`/ `Total de viviendas particulares`*100, 2)) %>%
mutate(`Material Predominante del Techo` = str_remove(`Material Predominante del Techo`, "__\\d"))
m_material_piso <- m_vivienda %>% gather(key = 'Material Predominante del Piso', value = 'Cantidad de Viviendas', 36:43) %>% arrange(`Código`) %>%
select(1:2,4, `Material Predominante del Piso`, `Cantidad de Viviendas`) %>%
mutate(`Porcentaje de Viviendas Particulares`= round(`Cantidad de Viviendas`/ `Total de viviendas particulares`*100, 2)) %>%
mutate(`Material Predominante del Piso` = str_remove(`Material Predominante del Piso`, "__\\d"))
m_vivienda <- m_vivienda %>% gather(key = 'Tipo de Vivienda', value = 'Cantidad de Viviendas', 5:12) %>% arrange(`Código`) %>%
select(1:3, `Tipo de Vivienda`, `Cantidad de Viviendas`) %>%
mutate(`Porcentaje de Viviendas`= round(`Cantidad de Viviendas`/ `Total de viviendas`*100, 2))
write.csv(d_alfabetizacion, file = 'Base de Datos/d_alfabetizacion.csv', row.names = FALSE)
write.csv(d_asistencia, file = 'Base de Datos/d_asistencia.csv', row.names = FALSE)
write.csv(d_causas_inasistencia, file = 'Base de Datos/d_causas_inasistencia.csv', row.names = FALSE)
write.csv(d_dificultades, file = 'Base de Datos/d_dificultades.csv', row.names = FALSE)
write.csv(d_distribucion_hogares, file = 'Base de Datos/d_distribucion_hogares.csv', row.names = FALSE)
write.csv(d_edad_15, file = 'Base de Datos/d_edad_15.csv', row.names = FALSE)
write.csv(d_edad_5, file = 'Base de Datos/d_edad_5.csv', row.names = FALSE)
write.csv(d_educacion, file = 'Base de Datos/d_educacion.csv', row.names = FALSE)
write.csv(d_empleo, file = 'Base de Datos/d_empleo.csv', row.names = FALSE)
write.csv(d_estado_civil, file = 'Base de Datos/d_estado_civil.csv', row.names = FALSE)
write.csv(d_genero, file = 'Base de Datos/d_genero.csv', row.names = FALSE)
write.csv(d_hijos_x_mujeres, file = 'Base de Datos/d_hijos_x_mujeres.csv', row.names = FALSE)
write.csv(d_hogares, file = 'Base de Datos/d_hogares.csv', row.names = FALSE)
write.csv(d_lugar_estudio, file = 'Base de Datos/d_lugar_estudio.csv', row.names = FALSE)
write.csv(d_lugar_nacimiento, file = 'Base de Datos/d_lugar_nacimiento.csv', row.names = FALSE)
write.csv(d_material_pared, file = 'Base de Datos/d_material_pared.csv', row.names = FALSE)
write.csv(d_material_piso, file = 'Base de Datos/d_material_piso.csv', row.names = FALSE)
write.csv(d_material_techo, file = 'Base de Datos/d_material_techo.csv', row.names = FALSE)
write.csv(d_recidencia_2013, file = 'Base de Datos/d_recidencia_2013.csv', row.names = FALSE)
write.csv(d_relacion_jefe, file = 'Base de Datos/d_relacion_jefe.csv', row.names = FALSE)
write.csv(d_situacion_calle, file = 'Base de Datos/d_situacion_calle.csv', row.names = FALSE)
write.csv(d_tecnologia, file = 'Base de Datos/d_tecnologia.csv', row.names = FALSE)
write.csv(d_tecnologia_agrupado, file = 'Base de Datos/d_tecnologia_agrupado', row.names = FALSE)
write.csv(d_tipo_ocupacion, file = 'Base de Datos/d_tipo_ocupacion.csv', row.names = FALSE)
write.csv(d_vivienda, file = 'Base de Datos/d_vivienda.csv', row.names = FALSE)
write.csv(d_zona, file = 'Base de Datos/d_zona.csv', row.names = FALSE)
write.csv(d_comunidad_linguistica, file = 'Base de Datos/d_comunidad_linguistica.csv', row.names = FALSE)
write.csv(d_pueblo, file = 'Base de Datos/d_pueblo.csv', row.names = FALSE)
write.csv(d_idioma_materno, file = 'Base de Datos/d_idioma_materno.csv', row.names = FALSE)
write.csv(m_alfabetizacion, file = 'Base de Datos/m_alfabetizacion.csv', row.names = FALSE)
write.csv(m_asistencia, file = 'Base de Datos/m_asistencia.csv', row.names = FALSE)
write.csv(m_causas_inasistencia, file = 'Base de Datos/m_causas_inasistencia.csv', row.names = FALSE)
write.csv(m_dificultades, file = 'Base de Datos/m_dificultades.csv', row.names = FALSE)
write.csv(m_distribucion_hogares, file = 'Base de Datos/m_distribucion_hogares.csv', row.names = FALSE)
write.csv(m_edad_15, file = 'Base de Datos/m_edad_15.csv', row.names = FALSE)
write.csv(m_edad_5, file = 'Base de Datos/m_edad_5.csv', row.names = FALSE)
write.csv(m_educacion, file = 'Base de Datos/m_educacion.csv', row.names = FALSE)
write.csv(m_empleo, file = 'Base de Datos/m_empleo.csv', row.names = FALSE)
write.csv(m_estado_civil, file = 'Base de Datos/m_estado_civil.csv', row.names = FALSE)
write.csv(m_genero, file = 'Base de Datos/m_genero.csv', row.names = FALSE)
write.csv(m_hijos_x_mujeres, file = 'Base de Datos/m_hijos_x_mujeres.csv', row.names = FALSE)
write.csv(m_hogares, file = 'Base de Datos/m_hogares.csv', row.names = FALSE)
write.csv(m_lugar_estudio, file = 'Base de Datos/m_lugar_estudio.csv', row.names = FALSE)
write.csv(m_lugar_nacimiento, file = 'Base de Datos/m_lugar_nacimiento.csv', row.names = FALSE)
write.csv(m_material_pared, file = 'Base de Datos/m_material_pared.csv', row.names = FALSE)
write.csv(m_material_piso, file = 'Base de Datos/m_material_piso.csv', row.names = FALSE)
write.csv(m_material_techo, file = 'Base de Datos/m_material_techo.csv', row.names = FALSE)
write.csv(m_recidencia_2013, file = 'Base de Datos/m_recidencia_2013.csv', row.names = FALSE)
write.csv(m_relacion_jefe, file = 'Base de Datos/m_relacion_jefe.csv', row.names = FALSE)
write.csv(m_situacion_calle, file = 'Base de Datos/m_situacion_calle.csv', row.names = FALSE)
write.csv(m_tecnologia, file = 'Base de Datos/m_tecnologia.csv', row.names = FALSE)
write.csv(m_tecnologia_agrupado, file = 'Base de Datos/m_tecnologia_agrupado', row.names = FALSE)
write.csv(m_tipo_ocupacion, file = 'Base de Datos/m_tipo_ocupacion.csv', row.names = FALSE)
write.csv(m_vivienda, file = 'Base de Datos/m_vivienda.csv', row.names = FALSE)
write.csv(m_zona, file = 'Base de Datos/m_zona.csv', row.names = FALSE)
d_edad_5 %>% plot_ly(x =~`Grupo de Edades 5 años`, y = ~`Porcentaje de Poblacion`, color= ~Departamento,colors = "Accent", barmode = 'group', type = 'bar')%>%
layout(title = 'Grupos de Edades')
d_estado_civil %>% plot_ly(x =~`Estado Civil`, y = ~`Porcentaje de Poblacion`, color= ~Departamento,colors = "Accent", barmode = 'group', type = 'bar')%>%
layout(title = 'Estado Civil')
d_estado_civil %>% plot_ly(labels =~`Estado Civil`, values = ~`Porcentaje de Poblacion` ,colors = "Accent", type = 'pie')%>%
layout(title = 'Estado Civil Pie')
d_educacion %>% plot_ly(labels = ~`Nivel Educativo`, values = ~`Cantidad de Personas`, type = 'pie') %>%
layout(title = 'Personas por Nivel Educativo')
d_empleo %>% plot_ly(labels = ~`Situación Laboral`, values = ~`Personas`, type = 'pie') %>%
layout(title = 'Situación Laboral')
m_empleo %>% plot_ly(x = ~`Situación Laboral`, y = ~`Porcentaje de Economicamente Activa`, color= ~Municipio,colors = "Accent", barmode = 'group', type = 'bar')%>%
layout(title = 'Situación Laboral por Municipio')
m_lugar_nacimiento %>% arrange(`Porcentaje de la Poblacion`) %>% head(3)
d_lugar_nacimiento %>% filter(`Lugar de Nacimiento`=="En otro paÍs")%>% arrange(desc(`Porcentaje de la Poblacion`)) %>% head(3)
m_lugar_nacimiento %>% filter(`Lugar de Nacimiento`=="En otro paÍs")%>%
arrange(desc(`Porcentaje de la Poblacion`)) %>% head(5)
d_educacion %>% filter(`Lugar de Nacimiento`=="Nive")%>% arrange(desc(`Porcentaje de la Poblacion`)) %>% head(3)
rm(list = ls())
# Abrir todos los archivos que existen en la carpeta data
archivos <- list.files("Data")
for (nombre in archivos) {
temp_df <- read_excel(path = paste("Data", nombre, sep = "/"), skip = 9) %>% select(-X__1)
temp_df <- temp_df %>% filter(complete.cases(temp_df))
temp_df[1] <- as.integer(pull(temp_df, 1))
temp_df[2] <- factor(pull(temp_df,2), ordered= TRUE)
pre<- ifelse(str_detect(nombre, "municipal"), "m_", "d_")
nombre <- str_remove(nombre, "(_municipal|_departamental)\\.xlsx$")
nombre <- paste(pre,nombre, sep = "")
assign(nombre, temp_df)
}
d_empleo %>% full_join(d_tecnologia) %>%
select(`Población ocupada`,Cesante, Aspirante, `No declarado`, `Usa Celular`, `Usa Computadora`, `Usa Internet`) %>%
cor %>% corrplot()
# Estas correlaciones no son muy utiles pues ademas de ser una falacia en la interpretacion
# resulta que en datos demograficos Guatemala es muy parecida en forma general
|
5fb117f3d6bf2713ded83730d32e53885c18176e
|
66e6159f726d04678c83d24b06856b60ddcd6e4f
|
/src/MovieLense.R
|
e613b21ce42d4707ea4fbd4eb2fd90a91858d064
|
[] |
no_license
|
nikita-goswami/Recommender-System-Design
|
d003763ef2b37e6dad52f7e21733879ada5c327e
|
7e247564807149b1e7564184c77c8f94cf918121
|
refs/heads/master
| 2022-06-08T08:49:07.339879
| 2020-05-07T02:10:38
| 2020-05-07T02:10:38
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,548
|
r
|
MovieLense.R
|
######################################################
### Recommender Lab
### Nikita Goswami
### Created: 5/25/2020
###
######################################################
rm(list = ls())
setwd("~/R workingDir/Recommender Systems/")
install.packages("recommenderlab")
install.packages("ggplot2")
library(recommenderlab)
library(ggplot2)
data(MovieLense)
dim(getRatingMatrix(MovieLense)) #943 1664
getRatingMatrix(MovieLense)[1:10, 1:10]
# Normalization of the MovieLense matrix
MovieLense_Normalize <- normalize(MovieLense)
head(MovieLense_Normalize)
vector_ratings <- as.vector(MovieLense@data)
unique(vector_ratings)
table_ratings <- table(vector_ratings)
table_ratings
X11()
barplot(table_ratings, main="Distribution of Ratings")
vector_ratings <- vector_ratings[vector_ratings != 0] # rating == 0 are NA values
vector_ratings <- factor(vector_ratings)
table_ratings <- table(vector_ratings)
table_ratings
X11()
barplot(table_ratings, main="Distribution of Ratings")
#################################################
## Visualize raw ratings and normalized ratings
#################################################
X11()
image(MovieLense_Normalize[1:100,1:100],
main = "Normalized ratings")
X11()
image(MovieLense[1:100, 1:100], main = "Raw Ratings")
getRatingMatrix(MovieLense_Normalize)[1:10, 1:10]
#de-normalize
R_denormalize <- denormalize(MovieLense_Normalize)
# Create a Binary Matrix
MovieLense_binarize <- binarize(R_denormalize, minRating = 4)
getRatingMatrix(MovieLense_binarize)
X11()
image(MovieLense_binarize[1:100,1:100], main = "Binarized ratings")
# Visualize the ratings in the form of a histogram
X11()
hist(getRatings(MovieLense_Normalize), breaks = 100, main = "Histogram of normalized ratings")
X11()
hist(rowCounts(MovieLense_Normalize), breaks = 100, main = "ratings given by users")
######################################
## Create a recommender system
######################################
?recommenderRegistry
recommender_models <- recommenderRegistry$get_entries(dataType = "realRatingMatrix")
lapply(recommender_models, "[[", "description")
# We will use UBCF
recommender_models$UBCF_realRatingMatrix$parameters
# The parameters for UBCF are : method = cosine, nn=25, sample=false and normalize= center
# Exploring Similarity Data
similarity_users <- similarity(MovieLense[1:4, ],
method = "cosine",
which = "users",min_matching=5)
as.matrix(similarity_users)
X11()
image(as.matrix(similarity_users), main = "User similarity")
######################################################
# User based collaborative Filtering
######################################################
# Building the recommendation Engine
# Divide dataset into train and test
which_train <- sample(x = c(TRUE, FALSE),
size = nrow(ratings_movies),
replace = TRUE,
prob = c(0.8, 0.2))
#head(which_train)
train <- MovieLense[which_train, ]
test <- MovieLense[!which_train, ]
recommender_models <- recommenderRegistry$get_entries(dataType ="realRatingMatrix")
recommender_models$UBCF_realRatingMatrix$parameters
############## Experiment 1
# The default parameters for UBCF are
# similarity method = cosine
# nn = 25
# sample = FALSE
# normalize = center
UBCFrecc_model1 <- Recommender(data = train, method = "UBCF")
UBCFrecc_model1
UBCFmodel_details1 <- getModel(UBCFrecc_model1)
names(UBCFmodel_details1)
UBCFmodel_details1$data
# Applying the recommender model to predict ratings for users
# pred_model1 <- predict(object = UBCFrecc_model1, MovieLense[1:2], type="ratings")
pred_model1 <- predict(object = UBCFrecc_model1, test, type="ratings")
pred_model1
pred = as(pred_model1, "matrix")[1:5,1:5]
as( MovieLense[1:2], "matrix")[,1:5]
##########################################################
## Use different parameters for UBCF
###########################################################
recommenderRegistry$get_entry("UBCF", dataType="realRatingMatrix")
recom <- Recommender(train, method = "UBCF",
parameter = list(method = "cosine", nn = 10, normalize = "center"))
recom
as(recom, "matrix")[,1:5]
as(recom, "list")
recom <- Recommender(train, method = "UBCF",
parameter = list(method = "k-nearest_neighbors", nn = 10, normalize = "center"))
recom
#####################################################
## Testing the performance of recommender system
#####################################################
# Define Test and Train set
eval <- evaluationScheme(MovieLense,method = "split", given = 15, train=0.5, goodRating=4)
eval
##########################################################
### Building a recommender model using user based collaborative filtering
#######################################################
userbased_model<- Recommender(getData(eval,"train"), "UBCF")
userbased_model
P1<- predict(userbased_model, getData(eval, "known"), type="ratings")
################################################################################
### calculating the error between prediction and the unknown part of test set
################################################################################
?calcPredictionAccuracy
ERROR<- rbind(UBCF = calcPredictionAccuracy(P1, getData(eval,"unknown")))
ERROR
# RMSE MSE MAE
# UBCF 1.060867 1.125439 0.8420556
#################################################################################
### evaluation of top-N recommender algorithm using the Given-3 protocol
###i.e, for the test users all but 3 are withheld for evaluation.
#################################################################################
scheme<- evaluationScheme(MovieLense, method="cross",k=4, given=3, goodRating=4) ##?
scheme
results<- evaluate(scheme, method = "POPULAR", type="topNList", n=c(1,3,5,10,15,20))
results
getConfusionMatrix(results)[[1]]
##################################################################
### Plotting the ROC curve
#####################################################################
x11()
plot(results, annotate=TRUE)
##################################################################
###precision and recall plot
#####################################################################
x11()
plot(results, "prec/rec", annotate=TRUE)
graphics.off()
######################################################
## Experimentation with Model parameters
######################################################
set.seed(42) # What other seed is there!
movie <- evaluationScheme(MovieLense, method = "split", train = .8, given = 5, goodRating = 3)
movie
# 1. Neighbourhood Size
user_nn <- list(
"5 NN" = list(name="UBCF", param=list(normalize = "Z-score",
method="Cosine",
nn=5)),
"10 NN" = list(name="UBCF", param=list(normalize = "Z-score",
method="Cosine",
nn=10)),
"20 NN" = list(name="UBCF", param=list(normalize = "Z-score",
method="Cosine",
nn=20)),
"30 NN" = list(name="UBCF", param=list(normalize = "Z-score",
method="Cosine",
nn=30)),
"40 NN" = list(name="UBCF", param=list(normalize = "Z-score",
method="Cosine",
nn=40)),
"50 NN" = list(name="UBCF", param=list(normalize = "Z-score",
method="Cosine",
nn=50))
)
# Running the recommendation system and predicting n movies for evaluation
recs <- c(1,5, 10, 15, 20, 25)
user_nn_results <- evaluate(movie, user_nn, n = recs, progress = FALSE)
# Drawing the ROC plot
X11()
plot(x = user_nn_results, y = "ROC", annotate = 4, legend="topleft", main = "Z-score normalizarion and cosine distance")
# Draw the precision / recall curve
X11()
plot(x = user_nn_results, y = "prec/rec", annotate = 5)
#calculating RMSE with neighbor = 40
model <- Recommender(getData(movie, "train"), method = "UBCF",
param=list(normalize = "Z-Score", method="Cosine", nn=40))
prediction <- predict(model, getData(movie, "known"), type="ratings")
rmse_ubcf <- calcPredictionAccuracy(prediction, getData(movie, "unknown"))[1]
rmse_ubcf
# 2. Normalization - Mean or Z-score
norm <- list(
"Center" = list(name="UBCF", param=list(normalize = "center",
method="Cosine",
nn=40)),
"Z-score" = list(name="UBCF", param=list(normalize = "Z-score",
method="Cosine",
nn=40))
)
norm_results <- evaluate(movie, norm, n = recs, progress = FALSE)
X11()
plot(x = norm_results, y = "ROC", legend="topleft")
X11()
plot(x = norm_results, y = "prec/rec", annotate = 1)
# 3. Distance Methods - Pearson, Cosine and Jaccard
dist <- list(
"Pearsons" = list(name="UBCF", param=list(normalize = "z-score",
method="pearson",
nn=40)),
"Cosine" = list(name="UBCF", param=list(normalize = "Z-score",
method="Cosine",
nn=40)),
"Jaccard" = list(name="UBCF", param=list(normalize = "Z-score",
method="jaccard",
nn=40))
)
distresults <- evaluate(movie, dist, n = recs, progress = FALSE)
X11()
plot(x=distresults, y = "ROC", annotate = 3, legend="topleft")
X11()
plot(x =distresults, y = "prec/rec", annotate = c(1,3))
# Calculating RMSe with pearson distance measure
model <- Recommender(getData(movie, "train"), method = "UBCF",
param=list(normalize = "Z-Score", method="Pearson", nn=40))
prediction <- predict(model, getData(movie, "known"), type="ratings")
rmse_ubcf <- calcPredictionAccuracy(prediction, getData(movie, "unknown"))[1]
rmse_ubcf
####################################################
## Test the performance of the Recommender System using hold-out or cross-validation approach
####################################################
# Setting evaluation scheme
eval_sets <- evaluationScheme(data = MovieLense,
method = "cross-validation",
k = 5,
given = 15,
goodRating = 4)
model_to_evaluate <- "UBCF"
# Setting model parameters
model_parameters <- list(normalize = "Z-Score", method="Pearson", nn=40)
eval_recommender <- Recommender(data = getData(eval_sets, "train"),
method = model_to_evaluate,
parameter = model_parameters)
items_to_recommend <- 10
eval_prediction <- predict(object = eval_recommender,
newdata = getData(eval_sets, "known"),
n = items_to_recommend,
type = "ratings")
qplot(rowCounts(eval_prediction)) +
geom_histogram(binwidth = 10) +
ggtitle("Distribution of movies per user")
eval_accuracy <- calcPredictionAccuracy(x = eval_prediction,
data = getData(eval_sets, "unknown"),
byUser = TRUE)
X11()
qplot(eval_accuracy[, "RMSE"]) +
geom_histogram(binwidth = 0.1) +
ggtitle("Distribution of the RMSE by user")
# Evaluating the Recommendations
results <- evaluate(x = eval_sets,
method = model_to_evaluate,
n = seq(10, 100, 10))
head(getConfusionMatrix(results)[[1]])
columns_to_sum <- c("TP", "FP", "FN", "TN")
indices_summed <- Reduce("+", getConfusionMatrix(results))[, columns_to_sum]
head(indices_summed)
X11()
plot(results, annotate = TRUE, main = "ROC curve")
X11()
plot(results, "prec/rec", annotate = TRUE, main = "Precision-recall")
|
fe0f343c9c518a62c6dcced4c30b595b7b67cefe
|
2c4e38480f522f3762b6967d7cfbf8aae0a948aa
|
/R/h5ls.R
|
d152305f71ebf01f79ff875aad141157070f1298
|
[] |
no_license
|
grimbough/rhdf5
|
097f8c48f322172415308491b603ff5b6cb4521a
|
4d430edf2a95cc9a2e4dfbf857ff6ff41cdef61d
|
refs/heads/devel
| 2023-07-27T14:14:36.645790
| 2023-07-12T09:09:12
| 2023-07-12T09:09:12
| 101,306,379
| 51
| 24
| null | 2023-04-05T08:13:17
| 2017-08-24T14:51:27
|
R
|
UTF-8
|
R
| false
| false
| 4,446
|
r
|
h5ls.R
|
h5lsConvertToDataframe <- function(L, all=FALSE, native) {
if (is.data.frame(L)) {
L$ltype <- h5const2String("H5L_TYPE", L$ltype)
L$otype <- h5const2String("H5I_TYPE", L$otype)
if (!all) {
L <- L[,c("group", "name", "otype", "dclass","dim")]
}
} else {
for (i in seq_len(length(L))) {
L[i] <- list(h5lsConvertToDataframe(L[[i]],all=all, native = native))
}
}
L
}
#' List the content of an HDF5 file.
#'
#' @param file The filename (character) of the file in which the dataset will
#' be located. You can also provide an object of class [H5IdComponent-class]
#' representing a H5 location identifier (file or group). See [H5Fcreate()],
#' [H5Fopen()], [H5Gcreate()], [H5Gopen()] to create an object of this kind.
#' @param recursive If `TRUE`, the content of the whole group hierarchy is
#' listed. If `FALSE`, Only the content of the main group is shown. If a positive
#' integer is provided this indicates the maximum level of the hierarchy that
#' is shown.
#' @param all If `TRUE`, a longer list of information on each entry is provided.
#' @param datasetinfo If `FALSE`, datatype and dimensionality information is not
#' provided. This can speed up the content listing for large files.
#' @param index_type See `h5const("H5_INDEX")` for possible arguments.
#' @param order See `h5const("H5_ITER")` for possible arguments.
#' @param s3 Logical value indicating whether the file argument should be
#' treated as a URL to an Amazon S3 bucket, rather than a local file path.
#' @param s3credentials A list of length three, providing the credentials for
#' accessing files in a private Amazon S3 bucket.
#' @param native An object of class `logical`. If TRUE, array-like objects
#' are treated as stored in HDF5 row-major rather than R column-major
#' orientation. Using `native = TRUE` increases HDF5 file portability
#' between programming languages. A file written with `native = TRUE`
#' should also be read with `native = TRUE`
#'
#' @return \code{h5ls} returns a `data.frame` with the file content.
#'
#' @author Bernd Fischer, Mike L. Smith
#' @seealso [h5dump()]
#' @references \url{https://portal.hdfgroup.org/display/HDF5}
#' @keywords programming interface IO file
#' @examples
#'
#' h5File <- tempfile(pattern = "ex_dump.h5")
#' h5createFile(h5File)
#'
#' # create groups
#' h5createGroup(h5File,"foo")
#' h5createGroup(h5File,"foo/foobaa")
#'
#' # write a matrix
#' B = array(seq(0.1,2.0,by=0.1),dim=c(5,2,2))
#' attr(B, "scale") <- "liter"
#' h5write(B, h5File,"foo/B")
#'
#' # list content of hdf5 file
#' h5ls(h5File,all=TRUE)
#'
#' # list content of an hdf5 file in a public S3 bucket
#' \donttest{
#' h5ls(file = "https://rhdf5-public.s3.eu-central-1.amazonaws.com/h5ex_t_array.h5", s3 = TRUE)
#' }
#'
#' @export
h5ls <- function( file, recursive = TRUE, all=FALSE, datasetinfo=TRUE,
index_type = h5default("H5_INDEX"), order = h5default("H5_ITER"),
s3 = FALSE, s3credentials = NULL, native = FALSE) {
if(isTRUE(s3)) {
fapl <- H5Pcreate("H5P_FILE_ACCESS")
on.exit(H5Pclose(fapl))
H5Pset_fapl_ros3(fapl, s3credentials)
loc <- h5checktypeOrOpenLocS3(file, readonly = TRUE, fapl = fapl, native = native)
} else {
loc <- h5checktypeOrOpenLoc(file, readonly = TRUE, fapl = NULL, native = native)
}
on.exit(h5closeitLoc(loc), add = TRUE)
if (length(datasetinfo)!=1 || !is.logical(datasetinfo)) stop("'datasetinfo' must be a logical of length 1")
index_type <- h5checkConstants( "H5_INDEX", index_type )
order <- h5checkConstants( "H5_ITER", order )
if (is.logical(recursive)) {
if (recursive) {
depth = -1L
} else {
depth = 1L
}
} else if ( is.numeric(recursive) | is.integer(recursive) ) {
depth = as.integer(recursive)
if( length(recursive) > 1 ) {
warning("'recursive' must be of length 1. Only using first value.")
} else if (recursive == 0) {
stop("value 0 for 'recursive' is undefined, either a positive integer or negative (maximum recursion)")
}
} else {
stop("'recursive' must be number or a logical")
}
di <- ifelse(datasetinfo, 1L, 0L)
L <- .Call("_h5ls", loc$H5Identifier@ID, depth, di, index_type, order, loc$H5Identifier@native, PACKAGE='rhdf5')
h5lsConvertToDataframe(L, all=all, native = loc$H5Identifier@native)
}
|
094f870925429b4d341ae22539729f07868909af
|
b8735a2bc8e6e2f759d2fc50c73d2506c894a713
|
/DEVILSTAZ/R/Correlations.R
|
f0c4a4e75b5d32049af31f13e0a763743e329a7c
|
[] |
no_license
|
ICRAR/DEVILS-TAZ
|
3698ba8e1120d9d2c9f68a01a325f90f5fa7c259
|
e1bc67bfdf6ad0e196d612bb8023ae134ac6c2d0
|
refs/heads/master
| 2023-05-29T23:43:42.947950
| 2019-11-08T04:08:00
| 2019-11-08T04:08:00
| 111,745,574
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,256
|
r
|
Correlations.R
|
#' Compute cross corelations un AutoZ
#'
#' @description Function to compute cross-correlation strngth between a given spectrum and
#' a set of templates. Function is internal to AutoZ.
#'
#' @param spec input spectrum containing spec$lambda and spec$flux
#' @param gap gap in shifts probed in cross correlation
#' @param tempDatar structure of template data
#' @param heloval helocentric correction value
#' @param plan fast-forier tranform plan
#' @param z_prior redshift priors, c(lo,hi)
#' @examples
#' None applicable as internal function....
#' @export
DoCrossCorr = function(spec, gap, tempData, helioVel, plan, z_prior, highZ=FALSE){
corrSize <- 17000
if (highZ==T){corrSize <- 24999}
# prepare output data
dataout <- vector('list',length(tempData$templateNumbers))
count = 1
#plan = planFFT(length(spec$flux), effort = 2)
for( tempNum in tempData$templateNumbers ){
# Find index of certain template number
tempID <- which(tempData$templateNumbers == tempNum)
#takes about 0.224 to next section
crossCorrRaw <- CrossCorr(template = tempData$specData[[tempID]]$flux, spec = spec$flux,
plan = plan, corrSize = corrSize)
#takes about 0.046 to next section
# calculate redshifts
shifts <- 10^((( 0:(2*corrSize) ) - corrSize) * gap) *
(1 + tempData$specData[[tempID]]$redshift) * (1 + helioVel/2.998E5) - 1.0
#takes about 0.002 to next section
r <- GetRange(tempNum, z_prior)
rmsZRange <- r[[1]]
allowedZRange <- r[[2]]
#takes about 0.048 to next section
# set criteria
criteriaSearch <- CriteriaBetween(shifts, allowedZRange)
criteriaRMS <- CriteriaBetween(shifts, rmsZRange)
#takes about 0.214 to next section
criteriaPospeak <- CriteriaPeak(crossCorrRaw) # positive peaks
criteriaNegpeak <- CriteriaPeak(-crossCorrRaw) # negative peaks
# takes about 0.028 to next section
criteriaTP <- criteriaPospeak | criteriaNegpeak
# takes about 0.049 to next section
# Subtract trimmed mean excluding top and bottom 4% of points.
# This brings more symmetry to positive and negative peaks.
useRMS <- which(criteriaRMS)
countRMS <- length(useRMS)
crossCorr <- crossCorrRaw - MeanReject(crossCorrRaw[useRMS], countRMS/25)
# takes about 0.034 to next section
# normalisation using turning points - divide by root mean square.
useNorm <- which(criteriaRMS & criteriaTP)
numTurningpoints <- length(useNorm)
crossCorr <- crossCorr / sqrt( mean((crossCorr[useNorm])^2 ) )
# this all takes about 0.081 to next section
# TODO this is commented out in Ivans code, not sure to include
# normalization using values of positive peaks and
# negative values of negative peaks.
usePos <- which(criteriaRMS & criteriaPospeak)
countPos <- length(usePos)
useNeg <- which(criteriaRMS & criteriaNegpeak)
countNeg <- length(useNeg)
numTurningpoints <- countPos + countNeg
testVals <- c(crossCorr[usePos], -crossCorr[useNeg])
trimmedMean <- MeanReject(testVals, numTurningpoints/25)
sdEstimate <- rms(testVals - trimmedMean)
crossCorr <- (crossCorr - trimmedMean) / sdEstimate
# this take 0.041 seconds to next section
# assign information to structure for output function
maskInfo <- 1*criteriaSearch + 2*criteriaRMS + 4*criteriaPospeak +
8*criteriaNegpeak + 16*criteriaTP
dataout[[count]] <- list("templateNumber" = tempNum,"shifts"=shifts, "crossCorrRaw" = crossCorrRaw,
"crossCorr"=crossCorr,"maskInfo" = maskInfo,"numTurningpoints" = numTurningpoints)
count <- count + 1
}
#time <- proc.time()
#cat("\nCROSS_CORR totalCorrTime is:", totalCorrTime)
return = dataout
}
# Cross correlate one spectrum with a template
# "template" and "spec" must be the same length (even number).
# Return a reduced size of array centred around zero shift.
# Written ages ago in IDL by Ivan Baldry.
# Translated to R by Leon Drygala
CrossCorr = function(template, spec, plan, corrSize = 14000){
tempLength = length(template)
# take conjugate of fourier transform of rebinned template spectra
#fftTemplate <- FFT(template, plan = plan) / tempLength#TODO consider other (faster) transforms
fftTemplate <- fft(template)/ tempLength
fftTemplate <- Conj(fftTemplate)
#take fourier transform of spectra
#fftSpec <- FFT(spec, plan = plan) / tempLength
fftSpec <- fft(spec) / tempLength
# multiply by conj. of fourier transform of t_plate spectrum
fftSpec <- fftSpec * fftTemplate
#invfft <- IFFT(fftSpec, plan = plan, scale = FALSE)
invfft <- fft(fftSpec,inverse=TRUE)
#take real part of inverse FFT for cross correlation.
crC <- Re(invfft)
length <- length(crC)
halfLength <- length/2
crC <- c(crC[(halfLength+1):(length)], crC[1:(halfLength)])
#create output array: length = 2*corr_size+1
crossCorr <- crC[(halfLength-corrSize+1):(halfLength+corrSize+1)]
return = crossCorr
}
# Determine where local peak values are in data.
# Written by Ivan Baldry.
# Translated by Leon Drygala
CriteriaPeak = function(values){
num <- length(values)
out <- rep(FALSE, num)
out[2:(num-3)] = (values[2:(num-3)] >= values[1:(num-4)]) &
(values[2:(num-3)] > values[3:(num-2)])
return = out
}
# Determine where local peak values are in data.
# Attempt to increase performance over original CriteriaPeak
# Returns 1 for neg peak 2 for pos peak 0 for no peak
# Writen by Leon Drygala
CriteriaPeakFast = function(values){
num <- length(values)
out <- vector(mode = 'logical',length = num)
out[2:(num-1)] = ( (values[2:(num-1)] >= values[1:(num-2)]) ==
(values[2:(num-1)] > values[3:(num)]) )
out[out] <- out[out] + (values[out] > values[c(out[-1],F)])
return = out
}
# Take the mean of a set of values after rejecting numReject lowest
# and numReject highest values.
# This is called the 'trimmed mean' or 'truncated mean'.
# Written by Ivan Baldry.
# Translated by Leon Drygala
MeanReject = function(data, numReject){
if (numReject){
# sort data
dataValues <- sort(data)
num <- length(dataValues)
stPoint <- numReject + 1
endPoint <- num - numReject
result <- mean(dataValues[stPoint:endPoint])
} else {
result <- mean(data)
}
return = result
}
# Take the mean of a set of values after rejecting numReject lowest
# and numReject highest values.
# This is called the 'trimmed mean' or 'truncated mean'.
# Rewritten to try and improve speed and avoid sorting
# Written by Leon Drygala
MeanRejectFast = function(data, numReject){
tooLarge <- PriorityQueue(decreasing = F)
tooSmall <- PriorityQueue(decreasing = T)
for(i in 1:numReject){
tooLarge[['insert']](data[i], i)
tooSmall[['insert']](data[i], i)
}
for(i in (numReject+1):length(data)){
if(data[i] > tooLarge[['peak']]()[1]){
tooLarge[['insert']](data[i], i)
tooLarge[['pop']]()
}
if(data[i] < tooSmall[['peak']]()[1]){
tooSmall[['insert']](data[i], i)
tooSmall[['pop']]()
}
}
rejectIndex = c(tooSmall[['dump']]()[[2]], tooLarge[['dump']]()[[2]])
newData = data[-rejectIndex]
result = mean(newData)
return = newData
}
PriorityQueue <- function(decreasing = T) {
keys <<- values <<- NULL
insert <- function(key, value) {
temp <- c(keys, key)
ord <- order(temp, decreasing = decreasing)
keys <<- temp[ord]
values <<- c(values, c(value))[ord]
}
peak <- function() {
head <- c(keys[[1]], values[[1]])
return(head)
}
pop <- function() {
head <- c(keys[[1]], values[[1]])
values <<- values[-1]
keys <<- keys[-1]
return(head)
}
queueLength <- function() {
return(length(values))
}
dump <- function(){
return(list(keys,values))
}
empty <- function() length(keys) == 0
list(insert = insert, pop = pop, empty = empty, dump = dump,
peak = peak, queueLength = queueLength)
}
GetRange = function(tNum, z_prior){
#TODO what should default be?? templates 1->10, 16, 18, 20, 21 not covered
rmsZRange <- c(-0.1,0.5)
allowedZRange <- c(-0.002, 0.002)
if (sum(tNum == c(11,12,13,14,15,17,19,22))){
# late-type stellar templates - power is at red end
rmsZRange <- c(-0.2,0.4)
allowedZRange <- c(-0.002, 0.002)
} else if (tNum<=22){
# remaining stellar templates
rmsZRange <- c(-0.1,0.5)
allowedZRange <- c(-0.002, 0.002)
} else if ( tNum >= 23 && tNum <= 28){
# original galaxy templates
rmsZRange <- c(-0.1,0.8)
rmsZRange <- c(-0.1,1.5)
allowedZRange <- c(-0.005, 1.500)
} else if (tNum >= 29 && tNum <= 32){
# QSO templates - not working reliably with GAMA - needs highz set
rmsZRange <- c(-0.1,5)
allowedZRange <- c(0.8, 5.500)
} else if (tNum >= 33 && tNum <= 49){
# other galaxy templates
rmsZRange <- c(-0.1,0.9)
rmsZRange <- c(-0.1,1.5)
allowedZRange <- c(-0.005, 1.500)
} else if (tNum >= 50 && tNum < 60){
rmsZRange <- c(-0.1,2.0)
allowedZRange <- c(-0.005, 2.000)
} else if (tNum >=60 && tNum <= 72 ){
rmsZRange <- c(-0.1,5.)
allowedZRange <- c(-0.005, 2.000)
} else if (tNum >73 && tNum <= 80 ){
rmsZRange <- c(-0.1,10.0)
allowedZRange <- c(-0.005, 2.000)
} else if (tNum > 80){
rmsZRange <- c(-0.1,2.0)
allowedZRange <- c(-0.005, 2.000) #TODO this should be = z_prior, look up in do_crossvorr.pro in IDL
}
if (tNum ==64) {allowedZRange <- c(2.0, 6.5)}
if (tNum ==65) {allowedZRange <- c(2.0, 6.5)}
if (tNum ==66) {allowedZRange <- c(2.0, 6.5)}
if (tNum ==67) {allowedZRange <- c(2.0, 6.5)}
if (tNum ==76) {allowedZRange <- c(2.0, 6.5)}
if (tNum ==77) {allowedZRange <- c(2.0, 6.5)}
if (tNum ==78) {allowedZRange <- c(2.0, 6.5)}
if (tNum ==79) {allowedZRange <- c(2.0, 6.5)}
if (tNum ==80) {allowedZRange <- c(2.0, 6.5)}
if (allowedZRange[1]<z_prior[1]){allowedZRange[1]<-z_prior[1]}
if (allowedZRange[2]>z_prior[2]){allowedZRange[2]<-z_prior[2]}
return = list(rmsZRange, allowedZRange)
}
rms = function(x){
return = sqrt(sum(x^2)/length(x))
}
|
d3425c9f453dba038261eabe49b722687865e6d1
|
ae5805f9864f273b198e62a3e5a11d13f543f325
|
/R/utils_list.R
|
6a20dff0611735cf51f2a3844f9eabc257bb2d09
|
[] |
no_license
|
jacintoArias/exreport
|
a5a457e5fe594fabff2667279844b86aeead0048
|
ac61cb5a88a64458ced16b47f28f3b1abc97f0e8
|
refs/heads/master
| 2021-07-03T13:40:40.524529
| 2021-06-03T10:48:36
| 2021-06-03T10:48:36
| 37,213,723
| 9
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,115
|
r
|
utils_list.R
|
.createDepthStructure <- function(l){
# Transform a nested list to a vector of objects with two elements: $depth and $str
# $depth refers to the nested level
# $str refers to the string representation for that element in the nested list
# Parameter checking
# If l is NULL or is an empty list, it returns NULL
if(is.null(l) || length(l)==0)
return(NULL)
s <- list()
for(i in 1:length(l))
{
if(is.list(l[[i]])){
recX <- .createDepthStructure(l[[i]])
recX <- lapply(recX,function(x){
x$depth <- x$depth+1
x
})
s <- do.call(c,list(s,recX))
}
else{
x <- list(depth=0, str = l[[i]])
s <- c(s,list(x))
}
}
s
}
.nestedList2String <- function(l, numbered=TRUE){
# A toString method por nested lists.
x <- .createDepthStructure(l)
s <- ""
if(is.null(x))
return(s)
for(i in 1:length(x)){
sep <- ifelse(numbered, paste0(i," ) "), '*) ')
s <- paste(s,do.call(function(...){ paste(...,sep="") },as.list(c(rep('\t',x[[i]]$depth),sep,x[[i]]$str,'\n'))),sep="")
}
s
}
.nestedList2HTML <- function(l, numbered=TRUE){
# A toString method por nested lists.
x <- .createDepthStructure(l)
if (is.null(x))
return("")
depth <- 0
sepA <- ifelse(numbered, "<ol>\n", "<ul>\n")
sepB <- ifelse(numbered, "\n</ol>", "\n</ul>")
s <- sepA
for (i in 1:length(x)){
if (x[[i]]$depth > depth)
s <- paste0(s, sepA)
if (x[[i]]$depth < depth)
s <- paste0(s, sepB)
depth <- x[[i]]$depth
s <- paste0(s, sprintf("<li>%s</li>\n", x[[i]]$str))
}
s <- paste0(s, sepB)
s
}
.nestedList2Latex <- function(l){
# A toString method por nested lists.
x <- .createDepthStructure(l)
if (is.null(x))
return("")
depth <- 0
s <- "\\begin{enumerate}\n"
for (i in 1:length(x)){
if (x[[i]]$depth > depth)
s <- paste0(s, "\\begin{enumerate}\n")
if (x[[i]]$depth < depth)
s <- paste0(s, "\n\\end{enumerate}")
depth <- x[[i]]$depth
s <- paste0(s, sprintf("\\item %s\n", x[[i]]$str))
}
s <- paste0(s, "\n\\end{enumerate}")
s
}
|
ddeedff56567f01e85227f593c600f8a0ca02b18
|
78ddd2410b2895224654d0159e1443512784ff77
|
/code/analyze/plotting/plot_evaluate.R
|
54bc4b5c7acd09f769e53f37e990a1f8c91437a5
|
[] |
no_license
|
hyperboliccake/introgression
|
935b330e3177154275afec752966517a0d4c37e7
|
afe29d2c8db7366b43261e5a6d78829ba43f1660
|
refs/heads/master
| 2020-05-21T19:18:02.952950
| 2019-03-18T21:57:35
| 2019-03-18T21:57:35
| 63,725,778
| 1
| 1
| null | 2019-04-09T18:21:21
| 2016-07-19T20:27:07
|
Python
|
UTF-8
|
R
| false
| false
| 12,540
|
r
|
plot_evaluate.R
|
# this is going to make a bunch of plots to help evaluate which
# regions look real and which should be filtered out
# in particular, it should be useful to look at regions overlapping
# and not overlapping genes; the ones overlapping genes should on
# average be more accurate (or at least less often due to poor
# alignment)
library(ggplot2)
library(reshape2)
library(RColorBrewer)
library(viridis)
library(hexbin)
source('../../my_color_palette.R')
args = commandArgs(trailingOnly=TRUE)
tag = args[1]
suffix = ''
if (length(args) == 2) {
suffix = args[2]
}
regions = read.table(paste('/tigress/AKEY/akey_vol2/aclark4/projects/introgression/results/analysis/', tag, '/introgressed_blocks', suffix,'_par_', tag, '_summary_plus.txt', sep=''), sep='\t', header=T, stringsAsFactors=F)
regions$overlap_gene = regions$number_genes >= 1
regions$length = regions$end - regions$start + 1
regions$fraction_gap = regions$number_gaps / regions$aligned_length
regions$fraction_gap_masked = (regions$number_gaps + regions$number_masked_non_gap) / regions$aligned_length
regions$cer_id = regions$number_match_ref1 / (regions$aligned_length - regions$number_gaps)
regions$par_id = regions$number_match_ref2 / (regions$aligned_length - regions$number_gaps)
quantile(regions$cer_id)
quantile(regions$par_id, probs=seq(0,1,.01))
x1 = regions[which(regions$par_id < .9),]
x1c = sum(x1$aligned_length - x1$number_gaps)
x2c = sum(regions$aligned_length - regions$number_gaps)
print(x1c)
print(x2c)
print(x1c/x2c)
adsgasklgj
#regions_filtered = read.table(paste('/tigress/AKEY/akey_vol2/aclark4/projects/introgression/results/analysis/', tag, '/introgressed_blocks_filtered_par_', tag, '_summary_plus.txt', sep=''), sep='\t', header=T, stringsAsFactors=F)
##=====
# par id vs cer id
##=====
ggplot(regions, (aes(x=cer_id, y=par_id, label=region_id, colour='x'))) + geom_hex(bins=100) + coord_cartesian(xlim=c(.6, 1),ylim=c(.6,1)) + geom_abline(slope=1,intercept=0, linetype='dashed') +
xlab('Identity with cerevisiae reference') +
ylab('Identity with paradoxus reference') +
scale_colour_manual(values = c(my_color_palette[['introgressed']])) +
theme(panel.background=element_rect(fill="white"),
panel.grid.minor=element_line(colour="gray92"), panel.grid.major=element_line(colour="gray92"),
axis.line=element_line(),
legend.position = "none",
axis.title.x = element_text(size=18),
axis.title.y = element_text(size=18),
axis.text.x = element_text(colour="black"),
axis.text.y = element_text(colour="black"))
ggsave(paste('/tigress/AKEY/akey_vol2/aclark4/projects/introgression/results/analysis/',tag,'/plots/par_id_vs_cer_id_hex_',tag,'.pdf',sep=''), width = 8, height = 8)
ggplot(regions, (aes(x=cer_id, y=par_id, label=region_id, colour='x', size=end-start))) + geom_point(alpha=.15) +
scale_size_continuous(range = c(1, 5)) +
coord_cartesian(xlim=c(.6, 1),ylim=c(.6,1)) + geom_abline(slope=1,intercept=0, linetype='dashed') +
xlab('Identity with cerevisiae reference') +
ylab('Identity with paradoxus reference') +
scale_colour_manual(values = c(my_color_palette[['introgressed']])) +
theme(panel.background=element_rect(fill="white"),
panel.grid.minor=element_line(colour="gray92"), panel.grid.major=element_line(colour="gray92"),
axis.line=element_line(),
legend.position = "none",
axis.title.x = element_text(size=18),
axis.title.y = element_text(size=18),
axis.text.x = element_text(colour="black"),
axis.text.y = element_text(colour="black"))
ggsave(paste('/tigress/AKEY/akey_vol2/aclark4/projects/introgression/results/analysis/',tag,'/plots/par_id_vs_cer_id_prez2_',tag,'.pdf',sep=''), width = 8, height = 8)
ggplot(regions, (aes(x=cer_id, y=par_id, label=region_id, colour='x'))) + geom_point(alpha=.15, size=3.5) +
#scale_size_continuous(range = c(1, 5)) +
coord_cartesian(xlim=c(.6, 1),ylim=c(.6,1)) + geom_abline(slope=1,intercept=0, linetype='dashed') +
xlab('Identity with cerevisiae reference') +
ylab('Identity with paradoxus reference') +
scale_colour_manual(values = c(my_color_palette[['introgressed']])) +
theme(panel.background=element_rect(fill="white"),
panel.grid.minor=element_line(colour="gray92"), panel.grid.major=element_line(colour="gray92"),
axis.line=element_line(),
legend.position = "none",
axis.title.x = element_text(size=18),
axis.title.y = element_text(size=18),
axis.text.x = element_text(colour="black"),
axis.text.y = element_text(colour="black"))
ggsave(paste('/tigress/AKEY/akey_vol2/aclark4/projects/introgression/results/analysis/',tag,'/plots/par_id_vs_cer_id_prez1_',tag,'.pdf',sep=''), width = 8, height = 8)
ggplot(regions, (aes(x=cer_id, y=par_id, label=region_id, colour='x'))) + geom_point(size=.5, alpha=.4) + coord_cartesian(xlim=c(.6, 1),ylim=c(.6,1)) + geom_abline(slope=1,intercept=0, linetype='dashed') +
xlab('Identity with cerevisiae reference') +
ylab('Identity with paradoxus reference') +
scale_colour_manual(values = c(my_color_palette[['introgressed']])) +
theme(panel.background=element_rect(fill="white"),
panel.grid.minor=element_line(colour="gray92"), panel.grid.major=element_line(colour="gray92"),
axis.line=element_line(),
legend.position = "none",
axis.title.x = element_text(size=18),
axis.title.y = element_text(size=18),
axis.text.x = element_text(colour="black"),
axis.text.y = element_text(colour="black"))
ggsave(paste('/tigress/AKEY/akey_vol2/aclark4/projects/introgression/results/analysis/',tag,'/plots/par_id_vs_cer_id_',tag,'.pdf',sep=''), width = 8, height = 8)
ggplot(regions, (aes(x=cer_id, y=par_id, label=region_id))) + geom_point(size=.2, alpha=.5) + coord_cartesian(xlim=c(.6, 1),ylim=c(.6,1)) + geom_abline(slope=1,intercept=0) + geom_text(aes(label=as.character(region_id)),hjust=0,vjust=0, cex=.2)
ggsave(paste('/tigress/AKEY/akey_vol2/aclark4/projects/introgression/results/analysis/',tag,'/plots/par_id_vs_cer_id_labeled_',tag,'.pdf',sep=''), width = 8, height = 8)
asdg
##=====
# comparing patterns in regions different distances from telomeres
##=====
# fraction gaps vs distance from telomere
ggplot(regions, (aes(x=distance_from_telomere, y=fraction_gap, label=region_id))) + geom_point(size=.2, alpha=.5) + coord_cartesian(xlim=c(0,10000)) #+ geom_text(aes(label=ifelse(length>1000,as.character(region_id),'')),hjust=0,vjust=0, cex=.2)
ggsave(paste('/tigress/AKEY/akey_vol2/aclark4/projects/introgression/results/analysis/',tag,'/plots/frac_gaps_vs_dist_from_tel_',tag,'.pdf',sep=''), width = 12, height = 7)
# fraction gaps vs distance from centromere
ggplot(regions, (aes(x=distance_from_centromere, y=fraction_gap, label=region_id))) + geom_point(size=.2, alpha=.5) #+ geom_text(aes(label=ifelse(length>1000,as.character(region_id),'')),hjust=0,vjust=0, cex=.2)
ggsave(paste('/tigress/AKEY/akey_vol2/aclark4/projects/introgression/results/analysis/',tag,'/plots/frac_gaps_vs_dist_from_cen_',tag,'.pdf',sep=''), width = 12, height = 7)
# longest gap vs distance from telomere
ggplot(regions, (aes(x=distance_from_telomere, y=longest_gap, label=region_id))) + geom_point(size=.2, alpha=.5) #+ geom_text(aes(label=ifelse(length>1000,as.character(region_id),'')),hjust=0,vjust=0, cex=.2)
ggsave(paste('/tigress/AKEY/akey_vol2/aclark4/projects/introgression/results/analysis/',tag,'/plots/longest_gap_vs_dist_from_tel_',tag,'.pdf',sep=''), width = 12, height = 7)
##=====
# comparing patterns in regions that overlap and don't overlap genes
##=====
# scatter of frac gaps vs length
ggplot(regions, (aes(x=length, y=fraction_gap, colour=overlap_gene, label=region_id))) + geom_point(size=.2, alpha=.5) + geom_text(aes(label=ifelse(length>1000,as.character(region_id),'')),hjust=0,vjust=0, cex=.2)
ggsave(paste('/tigress/AKEY/akey_vol2/aclark4/projects/introgression/results/analysis/',tag,'/plots/gaps_vs_length_by_overlap_gene_',tag,'.pdf',sep=''), width = 12, height = 7)
## scatter of frac gaps+masked vs length
ggplot(regions, (aes(x=length, y=fraction_gap_masked, colour=overlap_gene, label=region_id))) + geom_point(size=.2, alpha=.5) + geom_text(aes(label=ifelse(length>1000,as.character(region_id),'')),hjust=0,vjust=0, cex=.2)
ggsave(paste('/tigress/AKEY/akey_vol2/aclark4/projects/introgression/results/analysis/',tag,'/plots/gaps_masked_vs_length_by_overlap_gene_',tag,'.pdf',sep=''), width = 12, height = 7)
# above with (no labels)
ggplot(regions, (aes(x=length, y=fraction_gap_masked, colour=overlap_gene))) + geom_point(size=.2, alpha=.5)
ggsave(paste('/tigress/AKEY/akey_vol2/aclark4/projects/introgression/results/analysis/',tag,'/plots/gaps_masked_vs_length_by_overlap_gene_nolab_',tag,'.pdf',sep=''), width = 12, height = 7)
## scatter of frac gaps+masked vs cer_id
ggplot(regions, (aes(x=cer_id, y=fraction_gap_masked, colour=overlap_gene, label=region_id))) + geom_point(size=.2, alpha=.5) + geom_text(aes(label=ifelse(length>1000,as.character(region_id),'')),hjust=0,vjust=0, cex=.2)
ggsave(paste('/tigress/AKEY/akey_vol2/aclark4/projects/introgression/results/analysis/',tag,'/plots/gaps_masked_vs_cer_id_by_overlap_gene_',tag,'.pdf',sep=''), width = 12, height = 7)
## scatter of frac gaps+masked vs cer_id
ggplot(regions, (aes(x=cer_id, y=fraction_gap_masked, colour=overlap_gene))) + geom_point(size=.2, alpha=.5)
ggsave(paste('/tigress/AKEY/akey_vol2/aclark4/projects/introgression/results/analysis/',tag,'/plots/gaps_masked_vs_cer_id_by_overlap_gene_nolab_',tag,'.pdf',sep=''), width = 12, height = 7)
## lengths
ggplot(regions, aes(x=as.factor(overlap_gene), y=(length))) + geom_violin()
ggsave(paste('/tigress/AKEY/akey_vol2/aclark4/projects/introgression/results/analysis/',tag,'/plots/violin_region_length_vs_number_genes_',tag,'.pdf',sep=''), width = 12, height = 7)
ggplot(regions, aes(x=as.factor(overlap_gene), y=number_non_gap)) + geom_violin()
ggsave(paste('/tigress/AKEY/akey_vol2/aclark4/projects/introgression/results/analysis/',tag,'/plots/violin_nongap_region_length_vs_number_genes_',tag,'.pdf',sep=''), width = 12, height = 7)
# number of gaps
ggplot(regions, aes(x=as.factor(overlap_gene), y=(number_gaps))) + geom_violin()
ggsave(paste('/tigress/AKEY/akey_vol2/aclark4/projects/introgression/results/analysis/',tag,'/plots/violin_num_gaps_vs_number_genes_',tag,'.pdf',sep=''), width = 12, height = 7)
# fraction of gaps
ggplot(regions, aes(x=as.factor(overlap_gene), y=(fraction_gap))) + geom_violin()
ggsave(paste('/tigress/AKEY/akey_vol2/aclark4/projects/introgression/results/analysis/',tag,'/plots/violin_frac_gaps_vs_number_genes_',tag,'.pdf',sep=''), width = 12, height = 7)
# longest gap stretch
ggplot(regions, aes(x=as.factor(overlap_gene), y=(longest_gap))) + geom_violin()
ggsave(paste('/tigress/AKEY/akey_vol2/aclark4/projects/introgression/results/analysis/',tag,'/plots/violin_longest_gap_vs_number_genes_',tag,'.pdf',sep=''), width = 12, height = 7)
# fraction gap+masked
ggplot(regions, aes(x=as.factor(overlap_gene), y=(fraction_gap_masked))) + geom_violin()
ggsave(paste('/tigress/AKEY/akey_vol2/aclark4/projects/introgression/results/analysis/',tag,'/plots/violin_gap_masked_vs_number_genes_',tag,'.pdf',sep=''), width = 12, height = 7)
# fraction gap+masked histogram
ggplot(regions, aes(x=(fraction_gap_masked))) + geom_histogram(binwidth=.01)
ggsave(paste('/tigress/AKEY/akey_vol2/aclark4/projects/introgression/results/analysis/',tag,'/plots/gap_masked_hist_',tag,'.pdf',sep=''), width = 12, height = 7)
# fraction gap+masked cdf
ggplot(regions, aes(x=(fraction_gap_masked))) + stat_ecdf()
ggsave(paste('/tigress/AKEY/akey_vol2/aclark4/projects/introgression/results/analysis/',tag,'/plots/gap_masked_cdf_',tag,'.pdf',sep=''), width = 12, height = 7)
# region length histogram
# non gap length histogram
# fraction gaps histogram
# fraction gaps vs total length
# fraction gaps vs overlap/not overlap gene
# num sites match only par vs fraction gaps
# num sites match only par vs length
# fraction sites match only par vs length
# num sites match only par vs non gap length
# fraction sites match only par vs non gap length
# num sites match only par histogram
# fraction sites match only par histogram
# num sites match only par vs overlap/not overlap gene
# ... vs no gene/gene with paralog/gene with no paralog
# ... vs distance from telomere
|
9d8ed46d53297419bc3bac2b69701bd6041644b1
|
7b0c2e442e0271e078f56694749bfc4092d13adb
|
/ui.R
|
8d389de1b0022870f9ee974713be1ea075991d1c
|
[] |
no_license
|
smagellan/devdataprod-shinyapp
|
8ee6433f3039e091f77aa9d1747242b743aabb4d
|
043f6d0c581d138e6dd83c52fa646d874b7a1a3a
|
refs/heads/master
| 2016-09-11T01:38:16.481659
| 2015-08-23T20:11:20
| 2015-08-23T20:11:20
| 41,265,115
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,317
|
r
|
ui.R
|
library(shiny);
testData <- read.csv('pml-testing.csv', na.strings=c("NA","#DIV/0!", ""));
dataLength <- nrow(testData);
#yep, do not allow all available data
choices <- list("1" = 1, "2" = 2, "3" = 3, "4" = 4, "5" = 5, "6" = 6, "7" = 7, "8" = 8, "9" = 9, "10" = 10);
print(dataLength);
shinyUI(pageWithSidebar(
# Application title
headerPanel("Let's predict exercise type"),
sidebarPanel(
checkboxGroupInput("samplePositions",
label = h3("Select sample numbers to predict"),
choices = choices,
selected = 1),
textOutput("answersData")
),
mainPanel(
p("Six young health participants were asked to perform one set of 10 repetitions of the Unilateral Dumbbell Biceps Curl in five different fashions:"),
span("- exactly according to the specification (Class A), "), br(),
span("- throwing the elbows to the front (Class B), "), br(),
span("- lifting the dumbbell only halfway (Class C), "), br(),
span("- lowering the dumbbell only halfway (Class D) "), br(),
span("- and throwing the hips to the front (Class E). "), br(),
span("- Class A corresponds to the specified execution of the exercise, while the other 4 classes correspond to common mistakes."), br(),
br(),
span("Full description on dataset available "),
a("here", href="(http://groupware.les.inf.puc-rio.br/har#weight_lifting_exercises)"), br(), br(),
span("Model was pre-trained, it is randomForest-based one. Model's consciousness was then hibernated using state of the art cryo-chamber aka R's save function."), br(),
span("Some person was tricky enough to restore model, and here it is, right in your browser"), br(),
span("Now you have unique chance to test who is who using model's wisdom: "), br(),
span("there are checkboxes at the left, each corresponds to test data sample."), br(),
span("Just check some of them and model will predict who is who for given dataset."), br(),
span("This project is based on 'Predictive Machine Learning' course, and can be used as crib, So don't tell students of the course about this application")
)
))
|
0c0dea9b33c426545d9423a024d29000a7670326
|
3cf1b8a2b76b76053664644d33174f19a58ab42d
|
/Exercise_1.r
|
931d35668fbcc36e7346615bb5f83cda4163940e
|
[] |
no_license
|
SL222/data_wrangling_exercise_1
|
0841d98ed05ca732af8e8bed8aef1d50765b4c89
|
8d859e91d3218be398e36e1cc5df3fd9665ec374
|
refs/heads/master
| 2021-01-25T09:01:06.327857
| 2017-06-08T17:24:34
| 2017-06-08T17:24:34
| 93,772,712
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,061
|
r
|
Exercise_1.r
|
library(readr)
> refine_original <- read_csv("~/Desktop/refine_original.csv")
library(tidyr)
library(dplyr)
tbl_df(refine_original)
#Clean up brand names (lowercase and mispellings)
refine_original %>%
mutate(company = tolower(company))
refine_original$company[refine_original$company=="Phillips"] <- "philips"
refine_original$company[refine_original$company=="philips"] <- "philips"
refine_original$company[refine_original$company=="phllips"] <- "philips"
refine_original$company[refine_original$company=="phillps"] <- "philips"
refine_original$company[refine_original$company=="phillipS"] <- "philips"
refine_original$company[refine_original$company=="fillips"] <- "philips"
refine_original$company[refine_original$company=="phlips"] <- "philips"
refine_original$company[refine_original$company=="Akzo"] <- "akzo"
refine_original$company[refine_original$company=="akz0"] <- "akzo"
refine_original$company[refine_original$company=="ak zo"] <- "akzo"
refine_original$company[refine_original$company=="AKZO"] <- "akzo"
refine_original$company[refine_original$company=="Van Houten"] <- "van houten"
refine_original$company[refine_original$company=="van Houten"] <- "van houten"
refine_original$company[refine_original$company=="unilver"] <- "unilever"
refine_original$company[refine_original$company=="Unilever"] <- "unilever"
# Separate product code and number
refine_original %>%
separate("Product code / number", c("product_code", "product_number"), sep = "-")
refine_original <- refine_original %>%
separate("Product code / number", c("product_code", "product_number"), sep = "-")
# Add product categories
refine_original %>%
mutate(product_category = product_code)
refine_original$category[refine_original$product_code == "p"] <- "Smartphone"
refine_original$category[refine_original$product_code == "v"] <- "TV"
refine_original$category[refine_original$product_code == "x"] <- "Laptop"
refine_original$category[refine_original$product_code == "q"] <- "Tablet"
# Add full address for geocoding
refine_original %>%
unite(full_address, address, city, country, sep = ",")
refine_original <- refine_original %>%
unite(full_address, address, city, country, sep = ",")
#Create dummy variables for company and product category
#first for companies
refine_original$company_philips <- as.numeric(refine_original$company == "philips")
refine_original$company_akzo <- as.numeric(refine_original$company == "akzo")
refine_original$company_van_houten <- as.numeric(refine_original$company == "van houten")
refine_original$company_unilever <- as.numeric(refine_original$company == "unilever")
#Second for product categories
refine_original$product_Laptop <- as.numeric(refine_original$category == "Laptop")
refine_original$company_Smartphone <- as.numeric(refine_original$category == "Smartphone")
refine_original$company_Tablet <- as.numeric(refine_original$category == "Tablet")
refine_original$company_TV <- as.numeric(refine_original$category == "TV")
#Export cleaned up dataset to csv
write.csv(refine_original, "refine_original_tidied.csv")
|
45db1ad3ce7bf1b739566fb4a29d41d31d2cc34d
|
296ed80a99aad799e80bfdf43dc47f5da261b454
|
/graph-algorithms/graph_kruskal_time_plot.R
|
498b50a168065d44d6595b8d39e8f33b59622b78
|
[
"MIT"
] |
permissive
|
lucaspetry/algorithms-and-data-structures
|
d7be6510e05d2b84889acc53e5c4b6751094d3a7
|
104d79923f856fde218ad37f285643d42b2379cc
|
refs/heads/master
| 2021-09-17T06:44:42.338698
| 2018-06-28T20:05:59
| 2018-06-28T20:05:59
| 107,608,779
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 608
|
r
|
graph_kruskal_time_plot.R
|
library(ggplot2)
library(here)
args <- commandArgs(trailingOnly = TRUE)
if(length(args) < 1) {
print("Please inform the data file!")
quit()
}
inputFile <- paste0(here(), "/", args[1])
data <- read.csv(inputFile, header = TRUE)
plot <- ggplot(data, aes(x = edges, y = time)) +
geom_line() +
geom_point(size = 2) +
#scale_x_continuous(breaks = seq(0, 100000, 10000)) +
#scale_y_continuous(breaks = seq(0, 1, 0.1)) +
xlab("Número de arestas") +
ylab("Tempo (ms)") +
theme_bw()
ggsave(paste0(inputFile, ".pdf"), plot, device = pdf(), width = 6, height = 4, unit = "in")
|
78af27e8ec4ecdb2b2cdb55211a2b3165b30ed29
|
4848ca8518dc0d2b62c27abf5635952e6c7d7d67
|
/R/d_2l.R
|
ddaa35e0307a6bcfd3a7e4d835076b4185840cd8
|
[] |
no_license
|
regenesis90/KHCMinR
|
ede72486081c87f5e18f5038e6126cb033f9bf67
|
895ca40e4f9953e4fb69407461c9758dc6c02cb4
|
refs/heads/master
| 2023-06-28T00:29:04.365990
| 2021-07-22T04:44:03
| 2021-07-22T04:44:03
| 369,752,159
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,041
|
r
|
d_2l.R
|
#' Average control delay per vehicle in 2-lane Road
#'
#' Average control delay per vehicle on a two-lane road (seconds/vehicle).
#' It follows <Formula 7-8> in KHCM(2013) p.175.
#' @param d_1 Uniform control delay (sec/vehicle)
#' @param d_2 Incremental lag indicative of intentionality and supersaturation. When there is no vehicle remaining at the end of the cycle immediately preceding the analysis period (seconds/vehicle)
#' @param d_3 Additional delay (sec/vehicle) received by vehicles arriving in the analysis period due to the remaining waiting vehicles before the analysis period.
#' @param PF Interlocking correction coefficient by signal interlocking
#' @export d_2l Average control delay per vehicle(seconds/vehicle).
#' @examples
#' d_2l(d_1 = 3.29, d_2 = 3.42, d_3 = 1.2, PF = 0.8)
d_2l <- function(d_1 = NULL, d_2 = NULL, d_3 = NULL, PF = NULL){
if (d_1 >= 0 & d_2 >= 0 & d_3 >= 0 & PF >= 0){res <- d_1 * PF + d_2 + d_3}
else {res <- 'Error : [d_1], [d_2], [d_3], [PF] must be positive. Please check that.'}
res
}
|
9924063010dfad62d5b4b7284190b78634a7c577
|
0c68b6774adcdf6410c14a6d49c0c236fcde0b36
|
/R/Data_PTAGIS_Marsh_Chin.R
|
78213ccdac7d8816fb35d525bb290ea230165f3a
|
[] |
no_license
|
KevinSee/MYTSBE
|
6a3bcb40600548be564f1822a5680ed44bebcbe3
|
3774ba3ee1c01b176cccd4c3c6d031ab8564e761
|
refs/heads/master
| 2021-08-31T19:43:58.628490
| 2017-12-22T16:11:02
| 2017-12-22T16:11:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 371
|
r
|
Data_PTAGIS_Marsh_Chin.R
|
#' @title Example juvenile Chinook PTAGIS data, Marsh Creek, ID, 2005-2015
#'
#' @description This is an example PTAGIS data for Chinook at Marsh Creek,ID, 2005-2015.
#' @name Data_PTAGIS_Marsh_Chin
#' @docType data
#' @usage Data_PTAGIS_Marsh_Chin
#' @format Data is formatted and ready for \code{MYTSBE()}.
#' @keywords example PTAGIS data for Marsh Creek Chinook
NULL
|
1583e25a9707d7cad22469a4912cd0045acaccd1
|
346b182c898c002b9118dfb59a07e248d8311827
|
/relationships/scripts/correlation.R
|
7d6f5d5caa62c0fb4648980199311f2326e79954
|
[] |
no_license
|
kkulbir/30daychartchallenge
|
956d4565c3f09a24656b24d3310f9699bb41b0c3
|
4bc0e66f4d7ba5a5614412d8e7a7c392d2ec840f
|
refs/heads/main
| 2023-04-02T21:59:56.788096
| 2021-04-14T03:15:47
| 2021-04-14T03:15:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,704
|
r
|
correlation.R
|
library(dplyr)
library(ggplot2)
tweets = rio::import(here::here("relationships", "data", "geokaramanis_tweets.xlsx")) %>%
janitor::clean_names()
median_comp = tweets %>%
filter(challenge == "Comparisons") %>%
summarise(median_comp = median(likes)) %>%
pull(median_comp)
median_dist = tweets %>%
filter(challenge == "Distributions") %>%
summarise(median_dist = median(likes)) %>%
pull(median_dist)
ggplot(data = tweets) +
geom_violin(aes(x = challenge, y = likes),
color = NA, fill = "#CBC3E3",
trim = FALSE) +
geom_point(aes(x = challenge, y = likes),
fill = "orange",
shape = 21) +
# comp labels
geom_segment(aes(y = median_comp, yend = median_comp,
x = 0.75, xend = 1.25)) +
geom_text(aes(x = 0.6, y = median_comp - 20, label = "Median: \n56.5"),
family = "Fira Sans Bold",
lineheight = 0.75) +
geom_curve(aes(x = 0.6, xend = 0.73,
y = median_comp - 15, yend = median_comp),
size = .25,
curvature = -.5,
arrow = arrow(length = unit(0.03, "npc"))) +
# dist labels
geom_segment(aes(y = median_dist, yend = median_dist,
x = 1.75, xend = 2.25)) +
geom_text(aes(x = 1.6, y = median_dist - 20, label = "Median: \n51"),
family = "Fira Sans Bold",
lineheight = 0.75) +
geom_curve(aes(x = 1.6, xend = 1.73,
y = median_dist - 15, yend = median_dist),
size = .25,
curvature = -.5,
arrow = arrow(length = unit(0.03, "npc"))) +
scale_y_continuous(breaks = seq(0, 120, by = 10),
sec.axis = dup_axis()) +
labs(title = "Number of likes by #30DayChartChallenge categories \nfrom @geokaramanis tweets",
caption = "data: Twitter (specifically @geokaramanis account) | viz: @ijeamaka_a") +
theme_minimal(base_family = "Fira Sans Bold") +
theme(
legend.position = "none",
axis.title = element_blank(),
axis.text.x = element_text(size = 12),
axis.text.y = element_text(color = "grey56"),
plot.title = element_text(hjust = 0.5, margin = margin(0, 0, 10, 0), lineheight = 1, size = 16),
plot.caption = element_text(hjust = 0.5, margin = margin(20, 0, 0, 0), size = 8, color = "grey70"),
plot.background = element_rect(fill = "grey97", color = NA),
panel.background = element_rect(fill = "grey97", color = NA),
panel.grid.major.x = element_blank(),
panel.grid.minor.y = element_blank()
)
ggsave("correlation.png", plot = last_plot(),
device = "png", path = here::here("relationships", "outputs"),
dpi = 320, width = 7, height = 7)
|
ea19567dbc7c9084c77f07df87aea0d92c083f9f
|
0e8904c009ce9d893bb754b1202d4e8cb4855c63
|
/scratch/03_get_paper_metadata.R
|
c68234f62103f42781b4877a07274e5c15c1db5b
|
[] |
no_license
|
richpauloo/cig_nlp
|
0fa0ae38a5c9005027df97e38d2de5c161d9f96e
|
ff0dbd151475a6b16d4994623eeac23d12e1a27a
|
refs/heads/master
| 2022-03-23T18:46:29.835678
| 2019-12-29T23:35:29
| 2019-12-29T23:35:29
| 158,959,331
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,436
|
r
|
03_get_paper_metadata.R
|
library(pdftools)
library(tokenizers)
library(stringr)
library(tidyr)
library(dplyr)
library(readr)
library(ggplot2)
library(viridis)
library(colormap)
fp <- "C:/Users/rpauloo/Desktop/2019 CItation/Papers/" # PC
# fp <- "/Users/richpauloo/Desktop/2019 CItation/Papers/" # Mac
# paper names
p <- list.files(paste0(fp, "all_papers"))
# doi REGEX
doi_regex <- "doi:(?=[:digit:])|doi: (?=[:digit:])|DOI:(?=[:digit:])|DOI: (?=[:digit:])|doi (?=[:digit:])|doi(?=[:digit:])|DOI (?=[:digit:])|DOI(?=[:digit:])"
# bring in raw text
raw <- read_rds("raw.rds")
# metadata
doi_loc <- vector("list", length = length(p))
doi_loc <- lapply(raw, str_locate, doi_regex)
bind_rows(doi_loc)
# add papers, bind to df
doi_df <- do.call(rbind.data.frame, doi_loc) %>% mutate(paper = p)
# sanity check
str_view(raw[[172]], doi_regex)
# remove papers unlikely to have true doi
doi_df <- doi_df %>%
mutate(start = ifelse(start <= 500, start, NA),
end = ifelse(start <= 500, end, NA))
# subset raw and doi_df by only the ones we have doi for
raw_sub <- raw[!is.na(doi_df[,1])]
doi_df_sub <- doi_df %>% filter(!is.na(start))
temp <- vector("list", length = length(raw_sub))
for(i in 1:length(raw_sub)){
temp[[i]] <- str_sub(string = raw_sub[[i]], start = doi_df_sub[i, 2], end = doi_df_sub[i, 2] + 50)
}
# send a list of paper dois to scopus and return journal names
# send a list of journal names to scopus and return impact factor
|
ee69e48d3e29a6116821bb88156826c2b1cae20b
|
3e63a021d9f7ee9cd23da2246eacb68572fe3822
|
/R/random_tibble.R
|
726e1ee47580bae7f05699090f1612e1f7cc7860
|
[] |
no_license
|
vegart/R-pkg-clusterd
|
fc9cdeaff02158bff68469887cbc9aaed9d71058
|
88c1f82554c8de3541887ad28934e478a81de034
|
refs/heads/master
| 2020-07-18T17:09:20.588981
| 2019-09-10T08:16:57
| 2019-09-10T08:16:57
| 206,279,761
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,733
|
r
|
random_tibble.R
|
#' @title random.tibble
#' @description ROWS GENERATED FROM UNIFORM,NORMAL,CHI-SQUARE DISTRIBUTION WITH DIFFERING PARAMETERS COMBINED AS SINGLE DATAFRAME.
#' @format A data frame with 52500 rows and 2 variables:
#' \describe{
#' \item{\code{label}}{integer grouping label}
#' \item{\code{rand}}{double value genrated from runif,rnorm, or rchisq}
#'}
#' @details NULL
random.tibble <- (
function(){
runif.10000_1_100 <- runif(n = 10000,min = 1,max=30)
runif.5000_4_400 <- runif(n = 5000,min = 5,max=10)
runif.2500_3_300 <- runif(n = 2500,min = 3,max=20)
rnorm.10000_0_1 <- rnorm(n = 10000,mean = 0,sd=1)
rnorm.5000_3_3 <- rnorm(n = 5000,mean = 3,sd=3)
rnorm.2500_5_5 <- rnorm(n = 2500,mean = 5,sd=5)
rchisq.10000_0_2 <- rchisq(10000, df = 0, ncp = 2.)
rchisq.5000_3_4 <- rchisq(5000, df = 3, ncp = 4.)
rchisq.2500_1_1 <- rchisq(2500, df = 1, ncp = 1.)
random.list <- list(
runif.element= list(
data.frame(label="runif.10000_1_100",rand=runif.10000_1_100),
data.frame(label="runif.5000_4_400",rand=runif.5000_4_400),
data.frame(label="runif.2500_3_300",rand=runif.2500_3_300)
),
rnorm.element = list(
data.frame(label="rnorm.10000_0_1",rand=rnorm.10000_0_1),
data.frame(label="rnorm.5000_3_3",rand=rnorm.5000_3_3),
data.frame(label="rnorm.2500_5_5",rand=rnorm.2500_5_5)
),
rchisq.element = list(
data.frame(label="rchisq.10000_0_2",rand=rchisq.10000_0_2),
data.frame(label="rchisq.5000_3_4",rand=rchisq.5000_3_4),
data.frame(label="rchisq.2500_1_1",rand=rchisq.2500_1_1)
)
)
random.tibble <- Reduce(rbind,unlist(random.list,recursive = FALSE))
random.tibble
}
)()
|
c9d2357b3f2ebecdf8a94366f0b05fb9109da472
|
2c92ce8af3f0b90f8b243b87abd8dc969ddafe29
|
/Packages/ETJPackage/man/smrzvectors.Rd
|
078529b62b0b5368a74923169c63f8ae5c1eaf61
|
[] |
no_license
|
anhnguyendepocen/AdvancedR-1
|
aaefb98850761ba783edab008de39d5e8d005a5c
|
025456f76eae60d493cbcd27718ecdfbcbd95e71
|
refs/heads/master
| 2020-05-05T11:04:46.251918
| 2018-03-13T04:25:03
| 2018-03-13T04:25:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 330
|
rd
|
smrzvectors.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/practice.R
\name{smrzvectors}
\alias{smrzvectors}
\title{Summarize Numeric vectors}
\usage{
smrzvectors(x)
}
\arguments{
\item{x}{a vector of numbers}
}
\value{
mean, median, and variance of vector
}
\description{
Gives summaries of a set of numbers
}
|
799af504b4e72f32a26caecc81283c6fbd6b82cc
|
a4f2a73b0beec40d3cfd649d6fd96eb6a4d22dd5
|
/R/casti.R
|
f139f7eed9a68e7da45647e5d58967286f4ecb17
|
[] |
no_license
|
gzitzlsb-it4i/RCzechia
|
84a413f48d6d961220eaa971a09e6d79543292ad
|
c5c8fc424c669fadc1063f8a31fbd9c068f5d0dc
|
refs/heads/master
| 2023-04-07T18:49:23.398749
| 2021-04-13T21:29:44
| 2021-04-13T21:29:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 800
|
r
|
casti.R
|
#' City Parts
#'
#' Function taking no parameters and returning data frame of districts of Prague and other major cities as \code{sf} polygons.
#'
#' Due to package size constraints the data are stored externally (and a working internet connection is required to use the package).
#'
#' The data is current to April 2021. Downloaded size is 1.5 MB.
#'
#'
#' @format \code{sf} data frame with 142 rows of 4 variables + geometry
#'
#' \describe{
#' \item{KOD}{Code of the city part / kod mestske casti}
#' \item{NAZEV}{Name of the city part / nazev mestske casti}
#' \item{KOD_OBEC}{Code of the city}
#' \item{NAZ_OBEC}{Name of the city}
#' }
#'
#' @source © ČÚZK, 2021 \url{https://vdp.cuzk.cz/}
#'
#' @export
casti <- function() {
result <- downloader("casti-R-2021-03.rds")
result
}
|
4a1021481b3cf9d52b3cb85f6196ae75d488444e
|
b0773858735860255285bd52fb16b083b920389b
|
/scu_bindStreamCatST.R
|
50e0efb697846a0a4d3612e2dac8d79fd1204219
|
[
"MIT"
] |
permissive
|
daauerbach/streamcatUtils
|
2baa5432d3e16ac4f2974de8e9ce9d0799a2409f
|
125ed7134347d1539f5ec9869a8250b42ac8361f
|
refs/heads/master
| 2021-01-10T13:19:29.525601
| 2016-04-08T18:38:38
| 2016-04-08T18:38:38
| 53,871,588
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 964
|
r
|
scu_bindStreamCatST.R
|
#Convenience function to roll up data from multiple states into a single object, removing any duplicate cats/COMIDs
#Default file location is "catdata" directory in working directory
#Peforms a very simple (fixed string) check for match between states to combine and available .rds objects
bindStreamCatST = function(states
,dirCatdata = paste0(getwd(),"/catdata")
){
if (!require("dplyr")) { install.packages("dplyr", dependencies = TRUE); library(dplyr)}
availst = states %in% gsub("catdata|.rds","",list.files(dirCatdata,pattern = "catdata"))
if(!all(availst)){
cat(paste("No data available for",states[!availst], "\nRemoved from returned object\n"))
states = states[availst]
}
return(distinct(
do.call(rbind
,lapply(states, function(i) readRDS(paste0(dirCatdata,"/catdata",i,".rds")))
)
)
)
} #end function
|
0445bf912fb10888561d00331b933c57ea6d4324
|
5bbee99774195fcbdf5aabd8c35c958637b91e9c
|
/man/plotStats.Rd
|
0f4fde7314a2f68d8b6945448fb061d992e606fe
|
[] |
no_license
|
kevinsanft/StochKit2R
|
92d5222e69347126d2871458005d77f4366a587f
|
c3ec6e88675672372ccb35920f667e53e37dcb16
|
refs/heads/master
| 2020-12-30T12:56:05.411997
| 2018-05-31T12:48:30
| 2018-05-31T12:48:30
| 91,376,075
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,130
|
rd
|
plotStats.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotStats.R
\name{plotStats}
\alias{plotStats}
\title{Plot StochKit2R simulation statistics output data}
\usage{
plotStats(data, species = NULL)
}
\arguments{
\item{data}{ensemble output from ssa or tauLeaping (stats object must exist, i.e. ensemble must NOT have been run with noStats=TRUE).}
\item{species}{A character vector of species names or a numeric vector of species indexes of the species that will be plotted. For numeric indexes, the first species is index 1. By default =NULL, all species are plotted.}
}
\value{
The ggplot object
}
\description{
\code{plotStats} Plots means and mean +/- one standard deviation of populations specified in \code{species}
}
\examples{
\dontrun{
#example using included dimer_decay.xml file
model <- system.file("dimer_decay.xml",package="StochKit2R")
#output written to ex_out directory (created in current working directory)
out <- ssa(model,time=10,realizations=100,intervals=20)
#plot the data for all species
plotStats(out)
#plot the data for species S2 and S3
plotStats(out,species=c("S2","S3"))
}
}
|
dbf437aa161de38801f9e07ab0af82b1e25d342c
|
af2308b13b2ef40aa0b9e7097b88bf45163192c5
|
/code/clean_data_script.R
|
ac3551c3c2eb8690260f5b55664934632cdc117d
|
[] |
no_license
|
Kehui-Zhang/workout3
|
5659b04405af85d2ab7f06be051f1d117db9dc94
|
8a1ca65c9a9c50e5ebed844a0deefdc4596a605b
|
refs/heads/master
| 2020-09-27T19:02:17.043203
| 2019-12-07T23:04:05
| 2019-12-07T23:04:05
| 226,587,055
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 891
|
r
|
clean_data_script.R
|
### Construct a data frame for abhijit_banerjee
ab_info_link <- ab %>% html_nodes(xpath ='//*[@id="gsc_a_b"]') %>%
html_nodes(xpath = '//*[@class="gsc_a_tr"]') %>% html_nodes(xpath ='td')
ab_result = sapply(html_children(ab_info_link), html_text)
ab_result = ab_result[ab_result != '*']
ab_citation_df = data.frame(paperName = ab_result[seq(1, length(ab_result), 5)],
researcher = ab_result[seq(2, length(ab_result), 5)],
journal = ab_result[seq(3, length(ab_result), 5)],
citations = ab_result[seq(4, length(ab_result), 5)],
year = ab_result[seq(5, length(ab_result), 5)])
ab_citation_df$citations = as.integer(ab_citation_df$citations)
setwd("/Users/zhangkehui/Desktop/UCB/Stat133/workouts/workout3/data/cleandata")
getwd()
write.csv(ab_citation_df,'ab_citation_df.csv')
|
6f4e7e77b911fb8fbcaffb1504f87a2ba18ab5bb
|
74a16ca69df3fb7aa6c8cb2c9fb75ac65a85be07
|
/run_analysis.R
|
224b825737afaf15d5e67e57e34bd06f3c989d55
|
[] |
no_license
|
apietrelli/Getting-and-Cleaning-Data-assignment
|
4002f3e1801db736d332f524774e8611f9633cdc
|
7e1654fd43327364eed339971971311d66347e4c
|
refs/heads/master
| 2021-04-26T16:44:34.432687
| 2016-01-30T12:42:10
| 2016-01-30T12:42:10
| 50,719,459
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 242
|
r
|
run_analysis.R
|
require(knitr)
require(markdown)
# Change this directory
setwd("~/Documents/Coursera_DataScience/3-Getting_cleaning_Data/Getting-and-Cleaning-Data-Assignment")
#
knit("run_analysis.Rmd")
markdownToHTML("run_analysis.md", "run_analysis.html")
|
6fef5c0f64f3662b777ec820d15a53fc1e79ca75
|
628dd6d7f44d6c90202509f5f7b756ec1d58a677
|
/man/wp.correlation.Rd
|
5c5eb80d25b9f823909c84d3eca7a362f6fbf3a3
|
[] |
no_license
|
johnnyzhz/WebPower
|
b87e1182575161ddae784f39f6e81218af2326fd
|
43f60e110c3099e9ccc6a9fd0a5609b3c3d34d9d
|
refs/heads/master
| 2023-05-25T05:22:58.281976
| 2023-05-18T12:17:56
| 2023-05-18T12:17:56
| 129,413,446
| 5
| 5
| null | 2023-08-15T21:11:57
| 2018-04-13T14:28:11
|
R
|
UTF-8
|
R
| false
| true
| 3,794
|
rd
|
wp.correlation.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/webpower.R
\name{wp.correlation}
\alias{wp.correlation}
\title{Statistical Power Analysis for Correlation}
\usage{
wp.correlation(n = NULL, r = NULL, power = NULL, p = 0, rho0 = 0,
alpha = 0.05, alternative = c("two.sided", "less", "greater"))
}
\arguments{
\item{n}{Sample size.}
\item{r}{Effect size or correlation. According to \cite{Cohen (1988)}, a correlation coefficient of 0.10, 0.30, and 0.50 are considered as an effect size of "small", "medium", and "large", respectively.}
\item{power}{Statistical power.}
\item{p}{Number of variables to partial out.}
\item{rho0}{Null correlation coefficient.}
\item{alpha}{Significance level chosed for the test. It equals 0.05 by default.}
\item{alternative}{Direction of the alternative hypothesis (\code{"two.sided"} or \code{"less"} or \code{"greater"}). The default is "two.sided".}
}
\value{
An object of the power analysis.
}
\description{
This function is for power analysis for correlation. Correlation measures whether and how a pair of variables are related. The Pearson Product Moment correlation coefficient (r) is adopted here.
The power calculation for correlation is conducted based on Fisher's z transformation of Pearson correlation coefficent \cite{(Fisher, 1915, 1921)}.
}
\examples{
wp.correlation(n=50,r=0.3, alternative="two.sided")
# Power for correlation
#
# n r alpha power
# 50 0.3 0.05 0.5728731
#
# URL: http://psychstat.org/correlation
#To calculate the power curve with a sequence of sample sizes:
res <- wp.correlation(n=seq(50,100,10),r=0.3, alternative="two.sided")
res
# Power for correlation
#
# n r alpha power
# 50 0.3 0.05 0.5728731
# 60 0.3 0.05 0.6541956
# 70 0.3 0.05 0.7230482
# 80 0.3 0.05 0.7803111
# 90 0.3 0.05 0.8272250
# 100 0.3 0.05 0.8651692
#
# URL: http://psychstat.org/correlation
#To plot the power curve:
plot(res, type='b')
#To estimate the sample size with a given power:
wp.correlation(n=NULL, r=0.3, power=0.8, alternative="two.sided")
# Power for correlation
#
# n r alpha power
# 83.94932 0.3 0.05 0.8
#
# URL: http://psychstat.org/correlation
#To estimate the minimum detectable effect size with a given power:
wp.correlation(n=NULL,r=0.3, power=0.8, alternative="two.sided")
# Power for correlation
#
# n r alpha power
# 83.94932 0.3 0.05 0.8
#
# URL: http://psychstat.org/correlation
#
#To calculate the power curve with a sequence of effect sizes:
res <- wp.correlation(n=100,r=seq(0.05,0.8,0.05), alternative="two.sided")
res
# Power for correlation
#
# n r alpha power
# 100 0.05 0.05 0.07854715
# 100 0.10 0.05 0.16839833
# 100 0.15 0.05 0.32163978
# 100 0.20 0.05 0.51870091
# 100 0.25 0.05 0.71507374
# 100 0.30 0.05 0.86516918
# 100 0.35 0.05 0.95128316
# 100 0.40 0.05 0.98724538
# 100 0.45 0.05 0.99772995
# 100 0.50 0.05 0.99974699
# 100 0.55 0.05 0.99998418
# 100 0.60 0.05 0.99999952
# 100 0.65 0.05 0.99999999
# 100 0.70 0.05 1.00000000
# 100 0.75 0.05 1.00000000
# 100 0.80 0.05 1.00000000
#
# URL: http://psychstat.org/correlation
}
\references{
Cohen, J. (1988). Statistical power analysis for the behavioral sciences (2nd Ed). Hillsdale, NJ: Lawrence Erlbaum Associates.
Fisher, R. A. (1915). Frequency distribution of the values of the correlation coefficient in samples from an indefinitely large population. Biometrika, 10(4), 507-521.
Fisher, R. A. (1921). On the probable error of a coefficient of correlation deduced from a small sample. Metron, 1, 3-32.
Zhang, Z., & Yuan, K.-H. (2018). Practical Statistical Power Analysis Using Webpower and R (Eds). Granger, IN: ISDSA Press.
}
|
85a11e64c2aee6976015e6435ba54c52b8a6fa3a
|
4ffab2641a17093a10b366ce567dff007175847b
|
/man/Initializer-class.Rd
|
13a7821d960bd3742f35202a6adaa559c0ac1785
|
[] |
no_license
|
cdeterman/lazytensor
|
02f4da5dd581ee3517c38741476e2cfb439dd22c
|
7a4c68fb3e757b82cf4bbef79b6ba1511160ce4b
|
refs/heads/master
| 2021-08-31T20:28:50.954011
| 2017-12-22T19:09:42
| 2017-12-22T19:09:42
| 109,305,353
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 775
|
rd
|
Initializer-class.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/initializers.R
\docType{data}
\name{Initializer}
\alias{Initializer}
\alias{Zeros}
\alias{Ones}
\alias{Constant}
\alias{RandomNormal}
\title{Initializer}
\format{An object of class \code{R6ClassGenerator} of length 24.}
\usage{
Initializer
Zeros
Ones
Constant
RandomNormal
}
\value{
Object of class relevant to defined backend
}
\description{
Initializer for a tensor
}
\section{Usage}{
\preformatted{
z = Zeros$new(shape = c(4,4))
o = Ones$new(shape = c(4,4))
c = Constant$new(shape = c(4,4), constant = 2)
r = RandomNormal$new(shape = c(4,4))
}
}
\section{Public Methods}{
\describe{
\item{\code{compute()}}{Evaluate Initializer}
}
}
\author{
Charles Determan Jr.
}
\keyword{datasets}
|
80bc00392be7bd056f83ae0e5afbc89f86fe296f
|
ee3a68ddb5f24a85a0e1f8590b89532df5af957f
|
/R/plot_specs_shiny.r
|
000f1fe32f9f2bab0a89167cb664614b4aa6c7ae
|
[] |
no_license
|
darcyj/specificity.shiny
|
6691a77b0b025b17cacd90febcf3b2c5499a2146
|
8a04d1c5e58d142a8d18f4ebd45a5b8c46298183
|
refs/heads/master
| 2023-06-30T13:34:57.961999
| 2021-08-06T19:56:17
| 2021-08-06T19:56:17
| 390,855,415
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,020
|
r
|
plot_specs_shiny.r
|
#' plot_specs_shiny
#'
#' Runs an interactive shiny visualization. Data can be re-downloaded from that
#' visualization, in either "wide" or "long" format. See ?aggregate_specs_list
#' for explanation of those formats.
#'
#' @author John L. Darcy
#'
#' @param sl specs_list. A named list of outputs from phy_or_env_spec. See examples.
#' @param fd data.frame. Optional feature data - a data.frame object with one row per
#' feature, including some column with feature IDs that includes feature IDs in sl as
#' rownames. If NULL, no feature data will be used (default:NULL).
#' @param fd_id_col integer or string. Either the column number of fd containing feature
#' IDs (see above), or the column name of that column (default: 1).
#' @return Returns output of shiny::shinyApp(). If run locally, this will manifest
#' as a browser window opening with an interactive visualization.
#'
#' @examples
#' # attach(antarctica)
#' # plot_specs_shiny(antarctica_specs_list, antarctica_taxonomy, 1)
#'
#'
#' @export
plot_specs_shiny <- function(sl, fd=NULL, fd_id_col=1){
# handle fd_id_col, but only if fd is provided
if(!is.null(fd)){
if(is.numeric(fd_id_col) && (fd_id_col %% 1 == 0) && (fd_id_col <= ncol(fd)) && (fd_id_col > 0)){
fd_id_col_name <- colnames(fd)[fd_id_col]
}else if(is.character(fd_id_col) && fd_id_col %in% colnames(fd)){
fd_id_col_name <- fd_id_col
}else{
stop("invalid fd_id_col")
}
}
# aggregate data into long and wide data types
# long - used for plotting with ggplot, does NOT have user metadata
# fat - used for displaying selected data to user
# baselong - only used for download outputs; contains everything
longdata <- aggregate_specs_list(sl, byFeature=FALSE)
widedata <- aggregate_specs_list(sl, byFeature=TRUE, fd=fd,
fd_id=fd_id_col_name)
baselongdata <- aggregate_specs_list(sl, byFeature=FALSE, fd=fd,
fd_id=fd_id_col_name)
# figure out what columns are available for searching
fieldChoices <- colnames(widedata)
searchdefault <- "FeatureID"
# add plotting helper variables to longdata
# jittercenter: the x-axis center of violins
# jitterx : the x-axis position of jittered points
# This manual jittering has to be done so that the base ggplot type can be
# a scatterplot instead of violin, since violin won't work with brushing.
# said another way, scales for x and y must be continuous.
longdata$jitterx <- longdata$jittercenter <- rep(0, nrow(longdata))
jitterwidth <- 0.6 # (1 means jittered columns are touching)
vars <- unique(longdata$Variable)
for(i in 1:length(vars)){
var_i <- vars[i]
longdata$jitterx[longdata$Variable==var_i] <- runif(
n=sum(longdata$Variable==var_i),
min=i-(jitterwidth/2),
max=i+(jitterwidth/2)
)
longdata$jittercenter[longdata$Variable==var_i] <- i
}
rm(var_i)
# hilight: whether or not a point is hilighted. 0=don't hilight, 1=hilight.
# to be interpreted as alpha (opacity) value by ggplot.
longdata$hilight01 <- rep(0, nrow(longdata))
# show01: show this point, true or false? actually interpreted in ggplot as
# alpha, i.e. opacity. 1=show, 0=hide. points will be hidden if not found by
# search function.
longdata$show01 <- rep(1, nrow(longdata))
# define download choices
downloadChoices <- c(
"All data (long)",
"All data (wide)",
"Shown/searched data (long)",
"Shown/searched data (wide)",
"Brushed data (long)",
"Brushed data (wide)"
)
# page setup stuff
ui <- shiny::fluidPage(
shiny::fluidRow(
shiny::column(width = 12,
shiny::plotOutput("plot1", height = 300,
brush = shiny::brushOpts(id = "plot1_brush")
)
)
),
shiny::fluidRow(
shiny::column(width=4,
shiny::textInput("searchQuery", label = shiny::h4("Search string:"), value = ""),
),
shiny::column(width=4,
shiny::selectInput("searchField", label = shiny::h4("Search field:"),
choices=fieldChoices, selected=searchdefault)
),
shiny::column(width=2,
shiny::selectInput("searchIgnoreCase", label = shiny::h4("Ignore Case?"),
choices=c(TRUE,FALSE), selected=TRUE)
),
shiny::tags$style(type='text/css', "#searchButtonClick { width:100%; margin-top: 45px;}")
),
# these are formatted as such because I had difficulty getting the parens right
shiny::fluidRow(
shiny::column(width=2,
colourpicker::colourInput("sigColor",
value="black",
label=shiny::h4(
shiny::HTML(
paste0(
"<em>P</em>",
intToUtf8(8804),
intToUtf8(945),
" color:"
)
)
)
)
),
shiny::column(width=2,
colourpicker::colourInput("nsigColor",
value="gray",
label=shiny::h4(
shiny::HTML(
paste0(
"<em>P</em>",
">",
intToUtf8(945),
" color:"
)
)
)
)
),
shiny::column(width=2,
shiny::numericInput("alpha", label=shiny::h4(paste0(intToUtf8(945), ":")), value=0.05, step=0.01),
),
shiny::column(width=2,
shiny::numericInput("pointSize", label = shiny::h4("Point size:"), value=0.5, step=0.1),
),
shiny::column(width=2,
shiny::selectInput("pointPCH", label = shiny::h4("Point type:"),
choices=as.character(0:20), selected="19")
)
),
shiny::fluidRow(
shiny::column(width=2,
colourpicker::colourInput("hilightColor", label=shiny::h4("Brush hilight color:"),
value="red")
),
shiny::column(width=2,
shiny::numericInput("hilightSize", label=shiny::h4("Brush hilight size:"),
value=2, step=0.1),
),
shiny::column(width=4,
shiny::selectInput("downloadType", label = shiny::h4("Download type:"),
choices=downloadChoices, selected=1)
),
shiny::column(width=2,
shiny::downloadButton("downloadData", "Download")
),
shiny::tags$style(type='text/css', "#downloadData { width:100%; margin-top: 45px;}")
),
shiny::fluidRow(
shiny::column(width=12,
shiny::checkboxGroupInput(inputId="displayFields",
label=shiny::h4("Fields to show in brushed points (below):"),
choices = fieldChoices, selected=fieldChoices, inline=TRUE)
)
),
shiny::fluidRow(
shiny::column(width = 12,
shiny::h4("Brushed points:"),
# shiny::verbatimTextOutput("brush_info") # OLD
DT::dataTableOutput("brush_info") # NEW
)
)
)
# function to build points_df, the data.frame used for plotting geom_point via ggplot
make_points_df <- function(
longdf, # complete data object, long format (longdata)
widedf, # complete data object, wide format (widedata)
searchField, # which field to search
searchQuery, # text, what to search for
searchIgnoreCase, # if true, ignore case
sigColor, # color for significant features
nsigColor, # color for non-significant features
alpha){ # alpha value to determine significance (e.g. 0.05)
# longdf <- longdata
# widedf <- widedata
# searchField <- "tax"
# searchQuery <- "cyano"
# searchIgnoreCase <- TRUE
# sigColor <- "red"
# nsigColor <- "black"
# alpha <- 0.05
# determine significance, assign colors
longdf$pointColor <- rep(nsigColor, nrow(longdf))
longdf$pointColor[longdf$Pval <= alpha] <- sigColor
# do search, unless blank.
searchQuery <- gsub(" ", "", searchQuery)
if(searchQuery != ""){
j <- which(colnames(widedf) == searchField)[1]
hits <- grepl(pattern=searchQuery, x=as.character(widedf[,j]),
ignore.case=searchIgnoreCase)
hitIDs <- widedf$FeatureID[hits]
longdf$show01 <- rep(0, nrow(longdf))
longdf$show01[longdf$FeatureID %in% hitIDs] <- 1
}else{
longdf$show01 <- rep(1, nrow(longdf))
}
# significance
longdf$sigTF <- longdata$Pval <= alpha
# subset to exclude unshown points
return( longdf[longdf$show01 == 1, ] )
}
# function to make hilight_df, which is plotted UNDER points_df for hilight effect
make_hilight_df <- function(
points_df, # from make_points_df
brush_df){ # from brushedPoints()
return( points_df[points_df$FeatureID %in% brush_df$FeatureID, ] )
}
# function to build display_df (rows shown for selected points)
make_display_df <- function(
hilight_df, # see above
widedf, # widedata full version
displayFields){ # character vector of which fields to show
# subset widedf to only include visible+hilighted features in plotdf
features2keep <- unique(hilight_df$FeatureID)
output <- widedf[widedf$FeatureID %in% features2keep, ]
# subset widedf to only include columns selected by user
output <- output[, colnames(output) %in% displayFields]
return(output)
}
# function to extract colors from points_df since ggplot is STUPID and can't take
# colors as a column inside aes()
get_colors_from_points_df <- function(points_df){points_df$pointColor}
get_FeatureIDs <- function(df){unique(df$FeatureID)}
# server function
server <- function(input, output, session) {
# build points_df
points_df <- shiny::reactive({make_points_df(
longdf=longdata,
widedf=widedata,
searchField=input$searchField,
searchQuery=input$searchQuery,
searchIgnoreCase=input$searchIgnoreCase,
sigColor=input$sigColor,
nsigColor=input$nsigColor,
alpha=input$alpha
)})
# build hilight_df
hilight_df <- shiny::reactive({make_hilight_df(
points_df = points_df(),
brush_df = shiny::brushedPoints(longdata, input$plot1_brush)
)})
# build display_df
display_df <- shiny::reactive({make_display_df(
hilight_df=hilight_df(),
widedf=widedata,
displayFields=input$displayFields
)})
# output brushdata for rendering
# output$brush_info <- shiny::renderPrint({ display_df() }) # OLD
output$brush_info <- DT::renderDataTable(display_df(),
options = list(scrollX=TRUE, sDom='<"top">lrt<"bottom">ip')) # NEW
# draw plot
# using aes_string() instead of aes() because R CMD check thinks that the unquoted
# variable names used by aes() are undeclared variables. Easy enough to use aes_string()
# instead, which allows the variable names to be quoted.
output$plot1 <- shiny::renderPlot({
ggplot2::ggplot(data=longdata, mapping=ggplot2::aes_string(x="jitterx", y="Spec")) +
ggplot2::scale_x_continuous(limits=c(0.5, length(vars)+0.5), breaks=1:length(vars),
labels=vars, expand = c(0, 0)) + ggplot2::theme(axis.title.x = ggplot2::element_blank()) +
ggplot2::geom_violin(ggplot2::aes_string(group="jittercenter", y="Spec"), scale="count" ) +
ggplot2::geom_point(data=hilight_df(), ggplot2::aes_string(x="jitterx", y="Spec"),
color=input$hilightColor, size=input$hilightSize, shape=as.integer(input$pointPCH)) +
ggplot2::geom_point(data=points_df(), ggplot2::aes_string(x="jitterx", y="Spec"),
color=get_colors_from_points_df(points_df()),
size=input$pointSize, shape=as.integer(input$pointPCH))
})
# make data to download
data2download <- shiny::reactive({
if(input$downloadType == downloadChoices[1]){ #"All data (long)"
baselongdata
}else if(input$downloadType == downloadChoices[2]){ #"All data (wide)"
widedata
}else if(input$downloadType == downloadChoices[3]){ #"Shown/searched data (long)"
baselongdata[baselongdata$FeatureID %in% get_FeatureIDs(points_df()),]
}else if(input$downloadType == downloadChoices[4]){ #"Shown/searched data (wide)"
widedata[widedata$FeatureID %in% get_FeatureIDs(points_df()),]
}else if(input$downloadType == downloadChoices[5]){ #"Brushed data (long)"
baselongdata[baselongdata$FeatureID %in% get_FeatureIDs(display_df()),]
}else if(input$downloadType == downloadChoices[6]){ #"Brushed data (wide)"
widedata[widedata$FeatureID %in% get_FeatureIDs(display_df()),]
}else{
# just do full long data
baselongdata
}
})
output$downloadData <- shiny::downloadHandler(
filename = function() {
filename <- gsub(pattern="[ \\(\\)\\/]+", replacement="_", x=input$downloadType)
filename <- sub(pattern="_+$", replacement="", x=filename)
paste(filename, "_", Sys.Date(), ".tsv", sep="")
},
content = function(file) {
write.table(x=data2download(), file=file, sep="\t", row.names=FALSE, quote=FALSE)
}
)
}
# run program
shiny::shinyApp(ui, server)
}
|
9b0747eba5a89928b7eef3927bcefd0c838fe512
|
3306705a8177848618753a1ee485e723bc5cf2d7
|
/R major ML/graddescend.R
|
d246cbe5bdfadac209d85aff91e700d5a98e3e1c
|
[
"MIT"
] |
permissive
|
lfthwjx/MachineLearningR
|
01c510bba0f0232ed70d249e38c4929202ae370f
|
3e4dd5e723ee4b72202d398870feb0ff568f8a3d
|
refs/heads/master
| 2021-01-13T12:35:18.047677
| 2016-11-01T21:46:00
| 2016-11-01T21:46:00
| 72,579,399
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,516
|
r
|
graddescend.R
|
x = seq(-10,10,by=1)
yorg = x^2-25*x-10
y = yorg + runif(length(x),-20,20)
plot(x,y)
lines(x,yorg,col = "blue")
X = matrix(c(x^2,x,rep(1,length(x))),ncol = 3)
Xt = t(X)
solve(Xt%*%X)%*%Xt%*%t(t(y))
a=runif(1,0,1)
b=runif(1,0,1)
c=runif(1,0,1)
#iterative solution
alpha = 1e-5
N=length(x)
for (i in 1:100000) {
sumv=0
td_a=0
td_b=0
td_c=0
for (j in 1:N) {
yfit = a*x[j]^2+b*x[j]+c
td_a = td_a+2*(yfit-y[j])*x[j]^2
td_b = td_b+2*(yfit-y[j])*x[j]
td_c = td_c+2*(yfit-y[j])
sumv = sumv+(yfit-y[j])^2
}
print("------------------")
print(td_a)
print(td_b)
print(td_c)
print(paste("sumv:",sumv,seq=""))
a=a-alpha*td_a
b=b-alpha*td_b
c=c-alpha*td_c
}
###############################################
vnew = runif(3,-20,20)
N = length(x)
for (i in 1:2) {
sumv = 0
td_vc = c(0,0,0)
td_mt = matrix(rep(0,9),ncol = 3)
for (j in 1:N) {
yfit = vnew[1]*x[j]^2+vnew[2]*x[j]+vnew[3]
td_vc = td_vc + c(2*(yfit - y[j])*x[j]^2,2*(yfit-y[j])*x[j],2*(yfit - y[j]))
td_mt = td_mt + 2*matrix(c(x[j]^4,x[j]^3,x[j]^2,x[j]^3,x[j]^2,x[j],x[j]^2,x[j],1),ncol = 3)
sumv=sumv+(yfit-y[j])
}
vnew = vnew-solve(td_mt)%*%td_vc
}
out = split(iris,iris$Species)
out
out[1]
out[[1]]
aggregate(iris[,1:4],by=list(iris$Species),mean)
aggregate(cbind(Sepal.Length,Sepal.Width)~Species,iris,mean)
a0 = array(1:10,c(2,5))
a0
apply(a0,1,sum)
devtools::install_github("datacamp/RDocumentation")
library(RDocumentation)
|
15b7cbea8a6b72f592ab32f40caaed5a13e6926c
|
93fef68695ec291350e728b928c608f6cb9e09eb
|
/NewTCGA_2017scripts/Thesis_final_scripts/Prognostic_lncs_more_detail_paper/final_scripts_2019/revisions_2020/Figure4_run_TCGAbiolinks_individual_plots.R
|
3a61f518f8bb96ec9327ba9a31dfdb7a4a8c17f4
|
[] |
no_license
|
HongyuanWu/lncRNAs_TCGA
|
ae4fa9202704545fc59a9dae19dabeeda2b7cb34
|
cbfe2356f8d65b20672dcc378fe7de309eec3dba
|
refs/heads/master
| 2023-07-28T00:11:29.152750
| 2021-09-09T13:33:01
| 2021-09-09T13:33:01
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 14,091
|
r
|
Figure4_run_TCGAbiolinks_individual_plots.R
|
source("/u/kisaev/lncRNAs_TCGA/NewTCGA_2017scripts/Thesis_final_scripts/Prognostic_lncs_more_detail_paper/final_scripts_2019/revisions_2020/load_data.R")
#------FEATURES-----------------------------------------------------
setwd("/.mounts/labs/reimandlab/private/users/kisaev/Thesis/TCGA_FALL2017_PROCESSED_RNASEQ/lncRNAs_2019_manuscript")
allCands = readRDS("final_candidates_TCGA_PCAWG_results_100CVsofElasticNet_June15.rds")
allCands = subset(allCands, data == "TCGA") #173 unique lncRNA-cancer combos, #166 unique lncRNAs
allCands$combo = unique(paste(allCands$gene, allCands$cancer, sep="_"))
library(corrplot)
get_median_risk_group = function(PI_centered, PI_lower_thres, epsilon = 1e-16) {
if (!any(PI_centered > PI_lower_thres)) {
PI_lower_thres = PI_lower_thres - epsilon
} else if (all(PI_centered > PI_lower_thres)) {
PI_lower_thres = PI_lower_thres + epsilon
}
risk_group = 1 + (PI_centered > PI_lower_thres)
risk_group = c("low_risk", "high_risk")[risk_group]
risk_group = factor(risk_group, levels = c("low_risk", "high_risk"))
risk_group
}
#--------------------------------------------------------------------
#Clinical files - use TCGAbiolinks via previous script
#--------------------------------------------------------------------
clin = readRDS("clin_data_lncs_new_variables_July19_tcgabiolinks_data.rds")
for(i in 1:length(clin)){
print(i)
d = clin[[i]]
z=which(str_detect(colnames(d), "ENSG"))
d=d[,-z]
lncs_keep = filter(allCands, cancer %in% d$Cancer[1])$gene
gene_exp=as.data.table(filter(rna, Cancer == d$Cancer[1]))
z=which(colnames(gene_exp) %in% c(lncs_keep, "patient"))
gene_exp = gene_exp[,..z]
d = merge(d, gene_exp, by="patient")
clin[[i]] = d
}
#results from cleaned up analysis
res = readRDS("process_tcga_biolinks_results_for_plotting.rds")
colnames(res)[2] = "lncRNA"
res$pairs = paste(res$lncRNA, res$type, res$colname, sep="_")
pairs = unique(res$pairs)
require(vcd)
get_name=function(g){
z=which(allCands$gene == g)
name=allCands$gene_symbol[z]
name=name[1]
return(name)
}
#--------LOOK AT ASSOCIATIONS BETWEEN EXPRESSION-------------------------------
#For each clinical variable -> xaxis is the clinical variable
#y-axis it each lncRNAs expression
#x-axis is continous if variable is continous such as age...
get_clin_lnc_plots = function(dtt){
canc = dtt$Cancer[1]
print(canc)
print(dim(dtt))
cancer_type = canc_conv$type[which(canc_conv$Cancer %in% dtt$Cancer)]
#get lncs
z = which(str_detect(colnames(dtt), "ENSG"))
lncs = colnames(dtt)[z]
z = which(lncs %in% res$lnc)
if(!(length(z)==0)){
lncs=lncs[z]
#look at individual lncRNAs
get_cor = function(lnc){
z = which((str_detect(colnames(dtt), "ENSG") & !(colnames(dtt) %in% lnc)))
new_dat = dtt
if(length(z) > 0){
new_dat = dtt[,-z]}
#add 0/1 labels
new_dat$lncRNA_tag = ""
med = median(new_dat[,which(colnames(new_dat) %in% lnc)])
k = which(colnames(new_dat) %in% lnc)
if(med ==0){
#if median = 0 then anyone greater than zero is 1
l1 = which(new_dat[,k] > 0)
l2 = which(new_dat[,k] ==0)
new_dat$lncRNA_tag[l1] = 1
new_dat$lncRNA_tag[l2] = 0
}
if(!(med ==0)){
l1 = which(new_dat[,k] >= med)
l2 = which(new_dat[,k] < med)
new_dat$lncRNA_tag[l1] = 1
new_dat$lncRNA_tag[l2] = 0
}
lnc_id=lnc
res_dat = as.data.table(filter(res, type==cancer_type, lnc==lnc_id))
cols_dat = which(colnames(new_dat) %in% res_dat$colname)
for(i in cols_dat){
print(i)
col = colnames(new_dat)[i]
new_dat_plot = new_dat[,c("patient", col, lnc, "lncRNA_tag", "OS", "OS.time")]
#palette
colourCount = length(unique(new_dat_plot$Clinical))
getPalette = colorRampPalette(brewer.pal(9, "Set1"))
new_dat_plot[,3] = log1p(new_dat_plot[,3])
med = median(new_dat_plot[,3])
colnames(new_dat_plot)[3] = "lncRNA_exp"
new_dat_plot[,2] = as.character(new_dat_plot[,2])
z1 = which(is.na(new_dat_plot[,which(colnames(new_dat_plot) %in% col)]))
if(!(length(z1)==0)){
new_dat_plot = new_dat_plot[-z1,]}
z2 = which(new_dat_plot[,which(colnames(new_dat_plot) %in% col)] %in% c("[Unknown]", "[Not Available]",
"#N/A", "[Not Evaluated]", "[Discrepancy]", "[Not Applicable]", "Unknown", "N/A", "NA", "Not Available", "Not performed",
"Indeterminate", "[Not Available]", "[Unknown]", "Not Performed Clinical",
"Performed but Not Available", "[Not submitted]"))
if(!(length(z2)==0)){
new_dat_plot = new_dat_plot[-z2,]}
unq = length(unique(new_dat_plot[,2]))
colnames(new_dat_plot)[2] = "Clinical"
print(unique(new_dat_plot[,2]))
new_dat_plot = as.data.table(new_dat_plot)
new_dat_plot = new_dat_plot[order(lncRNA_exp)]
new_dat_plot$patient = factor(new_dat_plot$patient, levels=new_dat_plot$patient)
new_dat_plot$lncRNA_tag = factor(new_dat_plot$lncRNA_tag, levels=c(1,0))
#if numerical variable make correlation plot
if(length(unique(new_dat_plot$Clinical)) > 20){
new_dat_plot$Clinical = as.numeric(new_dat_plot$Clinical)
g = ggscatter(new_dat_plot, x = "Clinical", y = "lncRNA_exp",
color = "black", shape = 21, size = 3, # Points color, shape and size
add = "reg.line", # Add regressin line
add.params = list(color = "blue", fill = "lightgray"), # Customize reg. line
conf.int = TRUE, # Add confidence interval
cor.coef = TRUE, # Add correlation coefficient. see ?stat_cor
cor.coeff.args = list(method = "spearman", label.sep = "\n"))+ggtitle(paste(get_name(lnc), cancer_type, colnames(new_dat)[i]))
print(g)
}
#if categorial variable make facetted barplot
if(!(length(unique(new_dat_plot$Clinical)) > 20)){
check_comp1 = ((col == "X1p.19q.codeletion") & (lnc == "ENSG00000239552"))
check_comp2 = ((col == "clinical_stage") & (lnc == "ENSG00000259641"))
if((check_comp1 | check_comp2)){
#get risk based models clinical only
new_dat_plot$OS.time = new_dat_plot$OS.time/365
cox_lnc = coxph(Surv(OS.time, OS) ~ Clinical, data = new_dat_plot)
relRisk <- predict(cox_lnc, new_dat_plot, type="risk") # relative risk
new_dat_plot$rel_risk_clin_only = relRisk
# split into two risk groups based on median
PI_lower_thres = median(new_dat_plot$rel_risk_clin_only)
PI_max_threshold = summary(new_dat_plot$rel_risk_clin_only)[5]
new_dat_plot$risk_group_clin_only = get_median_risk_group(
new_dat_plot$rel_risk_clin_only,
PI_lower_thres)
new_dat_plot$risk_group_clin_only = factor(new_dat_plot$risk_group_clin_only, levels=c("low_risk", "high_risk"))
#get risk based models clinical plus lncRNA only
cox_lnc = coxph(Surv(OS.time, OS) ~ Clinical + lncRNA_tag, data = new_dat_plot)
relRisk <- predict(cox_lnc, new_dat_plot, type="risk") # relative risk
new_dat_plot$rel_risk_clin_plus_lncRNA = relRisk
# split into two risk groups based on median
PI_lower_thres = median(new_dat_plot$rel_risk_clin_plus_lncRNA)
PI_max_threshold = summary(new_dat_plot$rel_risk_clin_plus_lncRNA)[5]
new_dat_plot$risk_group_clin_plus_lncRNA = get_median_risk_group(
new_dat_plot$rel_risk_clin_plus_lncRNA,
PI_lower_thres)
new_dat_plot$risk_group_clin_plus_lncRNA = factor(new_dat_plot$risk_group_clin_plus_lncRNA, levels=c("low_risk", "high_risk"))
file_name = paste("/u/kisaev/", lnc, col, ".pdf", sep="_")
pdf(file_name, width=6, height=5)
fit <- survfit(Surv(OS.time, OS) ~ risk_group_clin_only, data = new_dat_plot)
cox_lnc = coxph(Surv(OS.time, OS) ~ risk_group_clin_only, data = new_dat_plot)
hr=round(summary(cox_lnc)$coefficients[2], digits=3)
pval=round(summary(cox_lnc)$coefficients[5], digits=15)
lowrisksamps = table(new_dat_plot$risk_group_clin_only)[1]
highrisksamps = table(new_dat_plot$risk_group_clin_only)[2]
s1 <- ggsurvplot(fit ,
title = paste("HR=", hr, "waldpval=", pval, "riskhigh=", highrisksamps, "risklow=", lowrisksamps),
xlab = "Time (Years)",
font.main = c(7, "bold", "black"),
data = new_dat_plot, # data used to fit survival curves.
pval = TRUE, # show p-value of log-rank test.
conf.int = FALSE, # show confidence intervals for
#xlim = c(0,8),
risk.table = FALSE, # present narrower X axis, but not affect
break.time.by = 1, # break X axis in time intervals by 500.
palette =c("#4DBBD5FF", "#E64B35FF"))
print(s1)
fit <- survfit(Surv(OS.time, OS) ~ risk_group_clin_plus_lncRNA, data = new_dat_plot)
cox_lnc = coxph(Surv(OS.time, OS) ~ risk_group_clin_plus_lncRNA, data = new_dat_plot)
hr=round(summary(cox_lnc)$coefficients[2], digits=3)
pval=round(summary(cox_lnc)$coefficients[5], digits=15)
lowrisksamps = table(new_dat_plot$risk_group_clin_plus_lncRNA)[1]
highrisksamps = table(new_dat_plot$risk_group_clin_plus_lncRNA)[2]
s2 <- ggsurvplot(fit ,
xlab = "Time (Years)",
font.main = c(7, "bold", "black"),
title = paste("HR=", hr, "waldpval=", pval, "riskhigh=", highrisksamps, "risklow=", lowrisksamps),
data = new_dat_plot, # data used to fit survival curves.
pval = TRUE, # show p-value of log-rank test.
conf.int = FALSE, # show confidence intervals for
#xlim = c(0,8),
palette =c("#4DBBD5FF", "#E64B35FF"),
break.time.by = 1)#, # break X axis in time intervals by 500.
print(s2)
dev.off()
}
g <- ggplot(new_dat_plot, aes(patient, lncRNA_exp)) + geom_col(aes(fill = Clinical))+
facet_grid(~lncRNA_tag+Clinical, space="free", scales="free") + theme_bw()+
theme(axis.text.x = element_blank(), axis.ticks.x = element_blank(),
strip.text.x = element_text(size = 3, colour = "black"),
legend.position = "none")+ggtitle(paste(get_name(lnc), cancer_type, colnames(new_dat)[i]))
print(g)
tab=table(new_dat_plot$Clinical, new_dat_plot$lncRNA_tag)
mo = mosaic(~ Clinical + lncRNA_tag, data = new_dat_plot,
main = paste(get_name(lnc), cancer_type, colnames(new_dat)[i]), shade = TRUE, legend = TRUE)
print(mo)
chi=t(table(new_dat_plot$Clinical, new_dat_plot$lncRNA_tag))
chisq=chisq.test(chi)
#residuals = corrplot(chisq$residuals, is.cor = FALSE, col=c("blue", "red"))
#print(residuals)
#also print boxplot
box = ggboxplot(new_dat_plot, "Clinical", "lncRNA_exp",
color = "lncRNA_tag", palette =c("#FC4E07", "#00AFBB"),
add = "jitter") +ggtitle(paste(get_name(lnc), cancer_type, colnames(new_dat)[i]))
box = box + stat_compare_means(aes(group = lncRNA_tag), label = "p.format")+stat_n_text()
print(box)
#also print KM plot
new_dat_plot$OS.time = new_dat_plot$OS.time/365
fit <- survfit(Surv(OS.time, OS) ~ lncRNA_tag + Clinical, data = new_dat_plot)
s <- ggsurvplot(
title = paste(get_name(lnc), cancer_type, colnames(new_dat)[i]),
fit,
xlab = "Time (Years)",
font.main = c(14, "bold", "black"),
font.x = c(12, "plain", "black"),
font.y = c(12, "plain", "black"),
font.tickslab = c(11, "plain", "black"),
font.legend = 4,
risk.table.fontsize = 2,
data = new_dat_plot, # data used to fit survival curves.
risk.table = TRUE, # show risk table.
legend = "right",
pval = TRUE, # show p-value of log-rank test.
conf.int = FALSE, # show confidence intervals for
# point estimaes of survival curves.
xlim = c(0,10), # present narrower X axis, but not affect
# survival estimates.
break.time.by = 1, # break X axis in time intervals by 500.
palette = "npg",
risk.table.y.text.col = T, # colour risk table text annotations.
risk.table.y.text = FALSE # show bars instead of names in text annotations
# in legend of risk table
)
print(s)
fit <- survfit(Surv(OS.time, OS) ~ lncRNA_tag, data = new_dat_plot)
s2 <- ggsurvplot(fit ,
xlab = "Time (Years)",
data = new_dat_plot, # data used to fit survival curves.
facet.by = "Clinical",
pval = TRUE, # show p-value of log-rank test.
conf.int = FALSE, # show confidence intervals for
xlim = c(0,10),
palette ="npg",
break.time.by = 1)#, # break X axis in time intervals by 500.
print(s2)
fit <- survfit(Surv(OS.time, OS) ~ Clinical, data = new_dat_plot)
s3 <- ggsurvplot(fit ,
xlab = "Time (Years)",
data = new_dat_plot, # data used to fit survival curves.
pval = TRUE, # show p-value of log-rank test.
conf.int = FALSE, # show confidence intervals for
xlim = c(0,10),
palette ="npg",
break.time.by = 1)#,
print(s3)
}
}#all cols dat
}#get_cor
llply(lncs, get_cor)
}#only run if lncs in final clinical res dataset
}#end get_clin_lnc_plots
pdf("/u/kisaev/Jan2021/Figure4_individual_plots_clinical_plots.pdf", width=6, height=5)
llply(clin, get_clin_lnc_plots, .progress="text")
dev.off()
|
74b310b20e25af023a7a7d3564729f405cf2ff97
|
10e7160c116b33c1c6ef4b919176089cb5177d10
|
/R/labelLayer.R
|
a1ae1bd49be477868a7b2c442873695b5d761036
|
[] |
no_license
|
riatelab/cartography
|
556008a18a06b024791f7d1b1c7e0e2711b4c6f4
|
188a92e7cdc6e51d82ca8cacd970c7bb426f5d84
|
refs/heads/master
| 2023-08-06T02:16:00.746142
| 2023-01-18T09:30:27
| 2023-01-18T09:30:27
| 37,248,864
| 353
| 46
| null | 2021-02-16T13:03:24
| 2015-06-11T08:27:11
|
R
|
UTF-8
|
R
| false
| false
| 3,140
|
r
|
labelLayer.R
|
#' @title Label Layer
#' @description Put labels on a map.
#' @name labelLayer
#' @param x an sf object, a simple feature collection.
#' spdf, df, dfid and spdfid are not used.
#' @param spdf a SpatialPointsDataFrame or a SpatialPolygonsDataFrame; if spdf
#' is a SpatialPolygonsDataFrame texts are plotted on centroids.
#' @param df a data frame that contains the labels to plot. If df is missing
#' spdf@data is used instead.
#' @param spdfid name of the identifier variable in spdf, default to the first
#' column of the spdf data frame. (optional)
#' @param dfid name of the identifier variable in df, default to the first
#' column of df. (optional)
#' @param txt labels variable.
#' @param col labels color.
#' @param cex labels cex.
#' @param ... further \link{text} arguments.
#' @param bg halo color if halo is TRUE
#' @param r width of the halo
#' @param overlap if FALSE, labels are moved so they do not overlap.
#' @param halo If TRUE, then a 'halo' is printed around the text and additional
#' arguments bg and r can be modified to set the color and width of the halo.
#' @param show.lines if TRUE, then lines are plotted between x,y and the word,
#' for those words not covering their x,y coordinate
#' @seealso \link{layoutLayer}
#' @export
#' @examples
#' library(sf)
#' opar <- par(mar = c(0,0,0,0))
#' mtq <- st_read(system.file("gpkg/mtq.gpkg", package="cartography"))
#' plot(st_geometry(mtq), col = "darkseagreen3", border = "darkseagreen4",
#' bg = "#A6CAE0")
#' labelLayer(x = mtq, txt = "LIBGEO", col= "black", cex = 0.7, font = 4,
#' halo = TRUE, bg = "white", r = 0.1,
#' overlap = FALSE, show.lines = FALSE)
#' par(opar)
labelLayer <- function(x, spdf, df, spdfid = NULL, dfid = NULL, txt,
col = "black",
cex = 0.7, overlap = TRUE, show.lines = TRUE,
halo = FALSE, bg = "white", r = 0.1, ...){
if (missing(x)){
x <- convertToSf(spdf = spdf, df = df, spdfid = spdfid, dfid = dfid)
}
if (methods::is(x, 'Spatial')){
x <- sf::st_as_sf(x)
}
words <- x[[txt]]
cc <- sf::st_coordinates(sf::st_centroid(
x = sf::st_geometry(x),
of_largest_polygon = max(sf::st_is(sf::st_as_sf(x), "MULTIPOLYGON"))
))
if(nrow(x) == 1){
overlap <- TRUE
}
if (!overlap){
x <- unlist(cc[,1])
y <- unlist(cc[,2])
lay <- wordlayout(x,y,words,cex)
if(show.lines){
for(i in 1:length(x)){
xl <- lay[i,1]
yl <- lay[i,2]
w <- lay[i,3]
h <- lay[i,4]
if(x[i]<xl || x[i]>xl+w ||
y[i]<yl || y[i]>yl+h){
points(x[i],y[i],pch=16,col=col,cex=.5)
nx <- xl+.5*w
ny <- yl+.5*h
lines(c(x[i],nx),c(y[i],ny), col=col, lwd = 1)
}
}
}
cc <- matrix(data = c(lay[,1]+.5*lay[,3], lay[,2]+.5*lay[,4]),
ncol = 2, byrow = FALSE)
}
if (halo){
shadowtext(x = cc[,1], y = cc[,2], labels = words,
cex = cex, col = col, bg = bg, r = r, ...)
}else{
text(x = cc[,1], y = cc[,2], labels = words, cex = cex, col = col, ...)
}
}
|
fc887d584d9f281c9d113927a7f3adfcd560008b
|
2b4d529da8ab3ebcc7c10651a346b99e0d715d69
|
/summary_sc2.R
|
7a5f71ab494182dfb01fbda6afb4025a8edcf677
|
[] |
no_license
|
SHIccc/Regularization_Energy
|
37bffa89a78e18a5ff0b537a6197b5c69ac64b33
|
70f3feeaf514213338ae5c226ad5c8ebcf32cf31
|
refs/heads/master
| 2020-03-15T00:19:21.454755
| 2018-05-11T15:25:46
| 2018-05-11T15:25:46
| 131,865,344
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,934
|
r
|
summary_sc2.R
|
#lag=2
rm(list = ls(all = TRUE))
load(file = "fit2forep2.RData")
SISoutput = fit2.fore.fore
SISoutput$ix%/%(90^2)+1
par(mgp=c(2, 0.5, 0))
plot(SISoutput$fit, main="(I)SIS-SCAD, lag=2")
print(SISoutput$fit)
load(file = "cvfitscad2.RData")
plot(cvfit.scad2, ylim = c(0,1))
#model
ix.SIS = SISoutput$ix
coef.SIS = SISoutput$coef.est
# coefficient matrix
# For A1
SISoutput$ix[1:25]
SISoutput$ix[1:25] - (SISoutput$ix[1:25]%/%90)*90 # the row
SISoutput$ix[1:25]%/%90 + 1 # the column
A1 = matrix(ncol=90, nrow=90, sample(0, 90*90, replace=TRUE))
# For A1
SISoutput$ix[1:25]
SISoutput$ix[1:25] - (SISoutput$ix[1:25]%/%90)*90 # the row
SISoutput$ix[1:25]%/%90 + 1 # the column
A1 = matrix(ncol=90, nrow=90, sample(0, 90*90, replace=TRUE))
A1[67,23] = SISoutput$coef.est[2]
A1[67,25] = SISoutput$coef.est[3]
A1[68,26] = SISoutput$coef.est[4]
A1[69,26] = SISoutput$coef.est[5]
A1[67,27] = SISoutput$coef.est[6]
A1[68,32] = SISoutput$coef.est[7]
A1[69,32] = SISoutput$coef.est[8]
A1[75,42] = SISoutput$coef.est[9]
A1[67,60] = SISoutput$coef.est[10]
A1[68,60] = SISoutput$coef.est[11]
A1[69,60] = SISoutput$coef.est[12]
A1[76,60] = SISoutput$coef.est[13]
A1[64,61] = SISoutput$coef.est[14]
A1[71,61] = SISoutput$coef.est[15]
A1[64,65] = SISoutput$coef.est[16]
A1[68,66] = SISoutput$coef.est[17]
A1[69,68] = SISoutput$coef.est[18]
A1[69,69] = SISoutput$coef.est[19]
A1[68,70] = SISoutput$coef.est[20]
A1[74,74] = SISoutput$coef.est[21]
A1[75,74] = SISoutput$coef.est[22]
A1[76,76] = SISoutput$coef.est[23]
A1[74,84] = SISoutput$coef.est[24]
A1[75,84] = SISoutput$coef.est[25]
A1[67,86] = SISoutput$coef.est[26]
A1
A2 = matrix(ncol=90, nrow=90, sample(0, 90*90, replace=TRUE))
(SISoutput$ix[26:36] - 8100) - ((SISoutput$ix[26:36] - 8100)%/%90)*90 # the row
(SISoutput$ix[26:36] - 8100)%/%90 + 1 # the column
A2[76,24] = SISoutput$coef.est[27]
A2[67,44] = SISoutput$coef.est[28]
A2[67,60] = SISoutput$coef.est[29]
A2[76,60] = SISoutput$coef.est[30]
A2[75,63] = SISoutput$coef.est[31]
A2[74,65] = SISoutput$coef.est[32]
A2[75,65] = SISoutput$coef.est[33]
A2[76,68] = SISoutput$coef.est[34]
A2[74,77] = SISoutput$coef.est[35]
A2[76,79] = SISoutput$coef.est[36]
A2[75,80] = SISoutput$coef.est[37]
# compare with y1
#y1mod = cbind(A1, A2, A3, A4, A5) %*% z1
y1mod = cbind(A1, A2) %*% z1
#y1mod.ts = ts(t(y1mod), frequency = 12, start = c(2010, 9)) # mts [1:45, 1:90]
#y1.ts = ts(t(y1), frequency = 12, start = c(2010, 9)) # mts [1:45, 1:90]
#y1fore = y1mod[,45]
#for ( i in 1:9)
#{
# B = (cbind(A1, A2))^(i) %*% y1mod
# y1fore = cbind(y1fore, B[,45])
#}
#rm(B)
#or ust this
y1mod = cbind(A1, A2) %*% z1
y1fore = y1mod[,45]
B1 = cbind(A1, A2) %*% rbind(y1mod, z1[1:90,])
y1fore = cbind(y1fore, B1[,45]) # 12.14
B2 = cbind(A1, A2) %*% rbind(B1, y1mod)
y1fore = cbind(y1fore, B2[,45]) # 1.15
B3 = cbind(A1, A2) %*% rbind(B2, B1)
y1fore = cbind(y1fore, B3[,45]) # 2.15
B4 = cbind(A1, A2) %*% rbind(B3, B2)
y1fore = cbind(y1fore, B4[,45]) # 3.15
B5 = cbind(A1, A2) %*% rbind(B4, B3)
y1fore = cbind(y1fore, B5[,45]) # 4.15
B6 = cbind(A1, A2) %*% rbind(B5, B4)
y1fore = cbind(y1fore, B6[,45]) # 5.15
B7 = cbind(A1, A2) %*% rbind(B6, B5)
y1fore = cbind(y1fore, B7[,45]) # 6.15
B8 = cbind(A1, A2) %*% rbind(B7, B6)
y1fore = cbind(y1fore, B8[,45]) # 7.15
B9 = cbind(A1, A2) %*% rbind(B8, B7)
y1fore = cbind(y1fore, B9[,45]) # 8.15
#calculate and plot mse
y1org = y1[, 36:45]
y1fore.ts = ts(t(y1fore), start = c(2014,11), frequency = 12)
y1org.ts = ts(t(y1org), start = c(2014,11), frequency = 12)
#plot(y1org.ts[70:77], lwd = 2, col = "red ", ylim =c(-2,2))
#lines(y1fore.ts, lwd = 2, col = "blue")
plot(y1org.ts[,1], lwd = 1.2, col = "grey", ylim = c(-2,2))
lines(y1fore.ts[,1], lwd = 2, col = "blue")
for (i in 2:90)
{
lines(y1org.ts[,i], lwd = 1.2, col = "grey")
lines(y1fore.ts[,i], lwd = 2, col = "blue")
}
plot(y1org[,1], col="grey", lwd =1.2, type="l", ylim = c(-2, 2))
lines(y1fore[,1], col = "blue", lwd = 2)
for (i in 2:10)
{
lines(y1org[,i], col = "grey", lwd = 2)
lines(y1fore[,i], col = "blue", lwd = 2)
}
#y1mse = 0
#y1diff = 0
#for (i in 1:90)
#{
# for (j in 1:10)
# {
# y1diff[i*j]= y1org[i,j] - y1fore[i,j]
# y1mse = y1mse + (y1diff[i*j])^2
# }
#}
#y1mse = y1mse/900
y1diff = y1org - y1fore
y1sum=0
for (i in 1:90)
{
for (j in 1:10)
{
y1sum= y1sum+y1diff[i,j]^2
}
}
y1mse = y1sum/900
y1diff.ts = ts(y1diff, start = c(2014,11), frequency = 12)
plot(y1diff[,1], type = "l", col="black", ylim =c(-2,2))
for (i in 2:10){
lines(y1diff[,i], col=rainbow(11)[i])
}
#information criteria
y1est = cbind(y1mod, y1fore[,-1])
load(file = "y1inf.RData")
y1res = y1est - t(y1inf[5:58,])
y1sig=0
for (i in 1:54){
y1sig = y1sig + t(y1res[,i]) %*% y1res[,i]
}
y1sig.avg = y1sig/54
sc2.aic = log(abs(y1sig.avg)) + 2*36/54
sc2.hq = log(abs(y1sig.avg)) + 2*36*log(log(54))/54
sc2.bic = log(abs(y1sig.avg)) + 2*36*log(54)/54
|
43c1c936f00506ec238ba2742952abfdb6e513de
|
3c0b2e2d45711dc1af20cf58b2b5aaceeaba1369
|
/ui.R
|
766db9b009a3212a07169bfc383fdc37ae4f3045
|
[] |
no_license
|
Sambit78/attrition-master
|
53dc419fc8ad1abe5fda190379ea4069f6f8c7f2
|
3e4eac7fab76a08c5ff10ebeef7110bb2091b71a
|
refs/heads/main
| 2023-04-20T20:16:16.971208
| 2021-05-01T04:22:43
| 2021-05-01T04:22:43
| 363,321,020
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,005
|
r
|
ui.R
|
############################################
# Data Professor #
# http://youtube.com/dataprofessor #
# http://github.com/dataprofessor #
# http://facebook.com/dataprofessor #
# https://www.instagram.com/data.professor #
############################################
# Import libraries
library(shiny)
library(h2o)
library(recipes)
library(readxl)
library(tidyverse)
library(tidyquant)
library(lime)
# Load Data
path_train <- "00_Data/telco_train.xlsx"
path_test <- "00_Data/telco_test.xlsx"
path_data_definitions <- "00_Data/telco_data_definitions.xlsx"
train_raw_tbl <- read_excel(path_train, sheet = 1)
test_raw_tbl <- read_excel(path_test, sheet = 1)
definitions_raw_tbl <- read_excel(path_data_definitions, sheet = 1, col_names = FALSE)
# Processing Pipeline
source("00_Scripts/data_processing_pipeline.R")
train_readable_tbl <- process_hr_data_readable(train_raw_tbl, definitions_raw_tbl)
test_readable_tbl <- process_hr_data_readable(test_raw_tbl, definitions_raw_tbl)
# ML Preprocessing Recipe
recipe_obj <- recipe(Attrition ~ ., data = train_readable_tbl) %>%
step_zv(all_predictors()) %>%
step_num2factor(JobLevel, levels = c('1', '2', '3', '4', '5')) %>%
step_num2factor(StockOptionLevel, levels = c('0', '1', '2', '3'), transform = function(x) {x + 1}) %>%
prep()
recipe_obj
train_tbl <- bake(recipe_obj, new_data = train_readable_tbl)
test_tbl <- bake(recipe_obj, new_data = test_readable_tbl)
####################################
# User interface #
####################################
ui <- pageWithSidebar(
# Page header
headerPanel('Attrition Predictor'),
# Input values
sidebarPanel(
selectInput("Employee_Number","Select Employee Number", choices = test_tbl %>%
select(EmployeeNumber) %>%
distinct())
),
mainPanel(
tabsetPanel(
tabPanel("Attrition Risk",plotOutput("limeplot"))
)
)
)
|
b98f255a5cf6dd8dcb49dd0d1248506ef805eedb
|
375bc6c6ec97d3f7d973aa967c55eec7cbb2b15c
|
/test/temporary/9.3.R
|
aa2a2cc5bf30b73c2a61b0872a071ead3054c7cf
|
[] |
no_license
|
peterwu19881230/Microbial_phenotype_data_mining
|
de9ce76ad6717d0d95df4d9972b399ddc5ae37dc
|
0877af44fd66244f4ff927480acc5f524cb132fb
|
refs/heads/master
| 2020-08-19T11:07:16.888840
| 2019-10-18T01:39:48
| 2019-10-18T01:39:48
| 215,914,207
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,478
|
r
|
9.3.R
|
###(Updated 9/3/2019) manually correct the fake ECKs made by Nichols (search for gene names in EcoCyc and put back the right ECK)
###====================================================================================================================================================
###The list of 22 fake ECKs (I pulled them out by manually looking at the sorted ECK of the original data file that Nichols' provides) -> correct ECK
###ECK4466-MOKC ECK0018
###ECK4472-YOAI ECK1786
###ECK5000-SROH ECK4505
###ECK5001-SGRT ECK4477
###ECK5002-ISTR-1 G0-10202 This doesn't have ECK number. Use this as the EcoCyc ID
###ECK5003-RYBD ECK4621
###ECK5004-RYEF ECK4574
###ECK5005-TP2 G0-8894 This doesn't have ECK number. Use this as the EcoCyc ID
###ECK5006-TPKE70 G0-8906 This doesn't have ECK number. Use this as the EcoCyc ID
###ECK5007-YKGR ECK4486
###ECK5008-YMIB ECK4487
###ECK5009-YMJD ECK4488
###ECK5010-YNBG ECK4489
###ECK5011-YOAJ ECK4490
###ECK5012-YOAK ECK4491
###ECK5013-YOBI ECK4492
###ECK5014-YOEI ECK4493
###ECK5015-YOHP ECK4494
###ECK5016-YPDK ECK4495
###ECK5017-YQCG ECK4497
###ECK5018-YQEL ECK4498
###ECK5019-YQFG ECK4499
###correct the rows of ECK_1st_table for the above strains
##correct the 2nd, 3rd columns of "id_ECKs_CorrectedECKs_AssociatedGeneNames" generated at the end of clean_names.R (associated_gene_names shouldn't have to change)
fake_ECK_genes=c("ECK4466-MOKC","ECK4472-YOAI","ECK5000-SROH","ECK5001-SGRT","ECK5002-ISTR-1","ECK5003-RYBD","ECK5004-RYEF","ECK5005-TP2","ECK5006-TPKE70",
"ECK5007-YKGR","ECK5008-YMIB","ECK5009-YMJD","ECK5010-YNBG","ECK5011-YOAJ","ECK5012-YOAK","ECK5013-YOBI","ECK5014-YOEI","ECK5015-YOHP",
"ECK5016-YPDK","ECK5017-YQCG","ECK5018-YQEL","ECK5019-YQFG")
corrected_ECK=c("ECK0018","ECK1786","ECK4505","ECK4477","","ECK4621","ECK4574","","","ECK4486","ECK4487","ECK4488","ECK4489","ECK4490","ECK4491","ECK4492","ECK4493","ECK4494","ECK4495",
"ECK4497","ECK4498","ECK4499")
temp=id_ECKs_CorrectedECKs_AssociatedGeneNames
for(i in 1:length(fake_ECK_genes)){
index_=grep(fake_ECK_genes[[i]],id_ECKs_CorrectedECKs_AssociatedGeneNames$originalECKs)
temp[index_,2]=str_replace(fake_ECK_genes[[i]],"^ECK[0-9]{4}",corrected_ECK[[i]])
temp[index_,3]=temp[index_,2]
}
id_ECKs_CorrectedECKs_AssociatedGeneNames=temp
###====================================================================================================================================================
|
1a01e1648dc1a8953f4f3780fa263084939037e9
|
b63c9bb0d7bdeb75aa8151055920af7b34ced83f
|
/R/dplyrOracle.R
|
2e9cd190cfd0fe812bb907436793cb2510e982d6
|
[] |
no_license
|
bernardocaldas/dplyrOracle
|
62c3a831cf6b292eb06b8b1fc2961f3f2b26ad62
|
2841cd966810740b07a204d73ffe2ece34f8be3e
|
refs/heads/master
| 2021-01-15T08:08:46.629130
| 2016-03-23T09:24:49
| 2016-03-23T09:24:49
| 54,482,145
| 0
| 1
| null | 2016-03-22T14:33:54
| 2016-03-22T14:33:54
| null |
UTF-8
|
R
| false
| false
| 146
|
r
|
dplyrOracle.R
|
#' dplyrOracle: plyr for Oracle
#'
#' @docType package
#' @name dplyrOracle
#' @import dplyr ROracle DBI assertthat
#' @importFrom utils head
NULL
|
4f04659112e0035331c212247d6f00c6ad36f30c
|
7917fc0a7108a994bf39359385fb5728d189c182
|
/cran/paws.analytics/man/quicksight_describe_theme_alias.Rd
|
3c8ebb2f8dd3ada945d5452de53ff0761574d75a
|
[
"Apache-2.0"
] |
permissive
|
TWarczak/paws
|
b59300a5c41e374542a80aba223f84e1e2538bec
|
e70532e3e245286452e97e3286b5decce5c4eb90
|
refs/heads/main
| 2023-07-06T21:51:31.572720
| 2021-08-06T02:08:53
| 2021-08-06T02:08:53
| 396,131,582
| 1
| 0
|
NOASSERTION
| 2021-08-14T21:11:04
| 2021-08-14T21:11:04
| null |
UTF-8
|
R
| false
| true
| 978
|
rd
|
quicksight_describe_theme_alias.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/quicksight_operations.R
\name{quicksight_describe_theme_alias}
\alias{quicksight_describe_theme_alias}
\title{Describes the alias for a theme}
\usage{
quicksight_describe_theme_alias(AwsAccountId, ThemeId, AliasName)
}
\arguments{
\item{AwsAccountId}{[required] The ID of the AWS account that contains the theme alias that you're
describing.}
\item{ThemeId}{[required] The ID for the theme.}
\item{AliasName}{[required] The name of the theme alias that you want to describe.}
}
\value{
A list with the following syntax:\preformatted{list(
ThemeAlias = list(
Arn = "string",
AliasName = "string",
ThemeVersionNumber = 123
),
Status = 123,
RequestId = "string"
)
}
}
\description{
Describes the alias for a theme.
}
\section{Request syntax}{
\preformatted{svc$describe_theme_alias(
AwsAccountId = "string",
ThemeId = "string",
AliasName = "string"
)
}
}
\keyword{internal}
|
af8a5e785d140ff4582cf50050503680cf444fd8
|
5bb12ef90f32a9713ca1ea3a9b615908393e0a5e
|
/man/compareSingleCellsAgainstBulk.Rd
|
c2bcfeca0e4a94b577c457d4a3c51bf3838f1949
|
[] |
no_license
|
nukappa/dropbead
|
f4a0b8ef1b73e30262e938b588c9875a8267d426
|
a378177f35b26a9b8e60bfb04364b5389a67ebd7
|
refs/heads/master
| 2020-12-26T11:15:51.822483
| 2018-02-21T21:00:17
| 2018-02-21T21:00:17
| 67,596,707
| 0
| 0
| null | 2016-09-07T10:25:46
| 2016-09-07T10:25:46
| null |
UTF-8
|
R
| false
| true
| 1,009
|
rd
|
compareSingleCellsAgainstBulk.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/DGEMatrix.R
\name{compareSingleCellsAgainstBulk}
\alias{compareSingleCellsAgainstBulk}
\title{Compare gene expression between single cell sample and bulk}
\usage{
compareSingleCellsAgainstBulk(single.cells, bulk.data, method = "pearson",
ylab = "single cell sample", col = "steelblue", ...)
}
\arguments{
\item{single.cells}{A \code{data.frame} containing genes as rownames and columns as cells.}
\item{bulk.data}{A \code{data.frame} containing genes as rownames and a single column
with transcript counts.}
\item{ylab}{Label of the single cell sample.}
\item{col}{The color of the scatterplot points.}
\item{log.space}{If True (default value) then the average of single cells data is
performed in log space.}
}
\value{
A scatterplot comparing the gene expression levels and with the correlation
among the samples computed.
}
\description{
Compares the gene expression measurements from single cell data against bulk data.
}
|
c5577fb8accfaa2d5543728a6a94ecc14c6dc8dc
|
718c983e521a231c99185223eb48a797b984c1cf
|
/server.R
|
336237f2a2761dda074d1fc76a040eb846844f57
|
[] |
no_license
|
markie1mb/DDP
|
55a1fcd7a9e58b08d38197980a7f442257260677
|
c1a68525412c6b40456884c9e94773604cbd030a
|
refs/heads/master
| 2021-01-19T12:33:17.897577
| 2017-02-19T13:56:14
| 2017-02-19T13:56:14
| 82,321,061
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,607
|
r
|
server.R
|
library(shiny)
## Defining global parameters
moneybag<-510
n<-0
NumberHist<-as.vector(rep(0,37))
names(NumberHist)<-c(0:36)
StatsHist<-data.frame(red=c(0,0),black=c(0,0),odd=c(0,0),even=c(0,0))
RedBlack<-c('red','black','red','black','red','black','red','black','red','black',
'black','red','black','red','black','red','black','red',
'red','black','red','black','red','black','red','black','red','black',
'black','red','black','red','black','red','black','red')
BetNr<-""
Bet<-""
shinyServer(function(input, output) {
## Reactive functions
BetText<- reactive( {
Bet<<-input$Choice
if(Bet!="Number"){
Bet
} else {
paste(Bet,input$Number, sep=" ")
}
})
BetAmount<- reactive( {
XX<-input$Action
BetAm<<-input$Amount
BetAm
})
BetNumber<- reactive( {
BetNr<<-input$Number
BetNr
})
RouletteNumber <- reactive( {
XX<-input$Action
nn<<-floor(runif(1,min=0,max=37))
nn
})
Factor <- reactive( {
XX<-input$Action
F<-33
if(Bet!="Number")
{
if(nn==0) {F<-0}
else if ((nn %% 2)==0 & (Bet=='Even')){F<-2}
else if ((nn %% 2)==1 & (Bet=='Odd')){F<-2}
else if (RedBlack[nn]=='black' & (Bet=='Black')){F<-2}
else if (RedBlack[nn]=='red' & (Bet=='Red')){F<-2}
else {F<-0}
}
else if (Bet=="Number" & BetNumber()==nn){F<-35}
else {F<-0}
F
})
BetMoneybag <- reactive( {
XX<-input$Action
moneybag<<-moneybag-BetAm+Factor()*BetAm
moneybag
})
Bars <- reactive( {
XX<-input$Action
NumberHist[nn+1]<<-NumberHist[nn+1]+1
NumberHist
})
N <- reactive( {
XX<-input$Action
n<<-n+1
n
})
Statistics <- reactive( {
XX<-input$Action
if (nn!=0){
if ((nn %% 2)==0){StatsHist[1,"even"]<<-StatsHist[1,"even"]+1}
if ((nn %% 2)==1){StatsHist[1,"odd"]<<-StatsHist[1,"odd"]+1}
if (RedBlack[nn]=='black'){StatsHist[1,"black"]<<-StatsHist[1,"black"]+1}
if (RedBlack[nn]=='red'){StatsHist[1,"red"]<<-StatsHist[1,"red"]+1}
}
StatsHist[2,]<-StatsHist[1,]/N()
StatsHist
})
output$outTable<-renderTable(data.frame(Last_number=RouletteNumber(),Bet=BetText(),Amount=BetAmount(),Cash=BetMoneybag()),digits=0)
output$StatHist <- renderTable(Statistics())
output$plot<- renderPlot(barplot(Bars()))
})
|
6c0df0a1f993084802a779c4ee59a2b09a2fd43a
|
2a8c6cf47ec942751a571b011f83ff7cf6a456e5
|
/cachematrix.R
|
ddeed5972ba49637c48925b0862a1e1f6a9bbe2c
|
[] |
no_license
|
mbalbas/ProgrammingAssignment2
|
c73f30b11de5ef75195279c227698ad27cbe64cd
|
f00fab59dc423b0ff1aabab50ff00c33e9a8f0d0
|
refs/heads/master
| 2021-01-18T18:49:05.807872
| 2015-02-22T22:14:31
| 2015-02-22T22:14:31
| 31,181,922
| 0
| 0
| null | 2015-02-22T21:40:07
| 2015-02-22T21:40:06
| null |
UTF-8
|
R
| false
| false
| 1,832
|
r
|
cachematrix.R
|
## Two functions are used to compute the inverse of a matrix, checking first whether
## it was already computed and if so taking its cache saved value
#makeCacheMatrix sets and gets the value of a matrix x and its inverse
makeCacheMatrix <- function(x = numeric()) {
#i, the inverse of x, is initially set to NULL
i <- NULL
#sets is a kind of method setting the value of the matrix to be inverted, x,
# and the inverse matrix to NULL
#Note that the matrix cannot be initialized with set, but with makeCacheMatrix.
set <- function(y) {
x <<- y
i <<- NULL
}
# returns the current value of the matrix
get <- function() x
# assigns inverse to i
setinverse <- function(inverse) i <<- inverse
#returns the current value of the inverse of x, i
getinverse <- function() i
#makeCacheMatrix returns a list containing set, get, setinverse, and getinverse
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
#cacheSolve is a function to compute the inverse of a matrix x,
#testing first whether the inverse was already computed and saved in cache
cacheSolve <- function(x, ...) {
#i is a local variable, initialized calling the getinverse function of makeCacheMatrix
#to recover the inverse saved in cache.
i <- x$getinverse()
#if i was already computed (not null) the saved value is returned
if(!is.null(i)) {
message("getting cached data")
return(i)
}
#else, if i has not been computed yet, the inverse is computed using the solve function
data <- x$get()
i <- solve(data, ...)
#the new computed inverse is saved in makeCacheMatrix using setinverse
x$setinverse(i)
# returns the newly computed inverse
i
}
|
d132cce1b9919c22176e10c8c49f3f075dd22b49
|
294028e16f3ba67b628d6ff56ef9cf1c0907fbf1
|
/R/softstop.R
|
3d6d41c0edbdf5d8f0e6ea355e50c4f412a66683
|
[] |
no_license
|
spenceredwin/boxit
|
f05c05717f3929ecbdc435493ba57faabae581db
|
ecbbba877d27819acd2f2748344438f06a09273f
|
refs/heads/master
| 2020-07-28T22:27:54.842006
| 2019-09-24T19:17:03
| 2019-09-24T19:17:03
| 209,560,757
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 661
|
r
|
softstop.R
|
#' Stop a Script
#'
#' A helper function that temporarily sets options(error = NULL) to stop the script without calling error handling
#'
#' @param message An error message to print when the script is stopped. Default is NULL.
#' @keywords internal
#' @export
#' @examples
#' softstop(message = "User stopped script")
softstop <- function(message = NULL)
{
#get existing error options
error_opt <- getOption("error")
#turn off error handling
options(error = NULL)
#reset error options after script stops
on.exit(options(error = error_opt))
#stop the script and print the error message
stop(message, call. = FALSE)
}
|
e561dfed652bc669f4d2926391a58236affa9919
|
74319646bc48cefbba33d77c933dc4e0695b4bdd
|
/run_analysis.R
|
1188967ab16e911e13f504c1a1869189eca988bf
|
[] |
no_license
|
gyozonagy/getting-and-cleaning-wearable-computing-data-
|
0b8620b1a89b53a81770ec82821b009d55df9e82
|
b30c45fbce7c518b2a901b5c5cabf12ea8215d94
|
refs/heads/master
| 2021-01-01T18:22:31.322196
| 2014-10-26T21:24:42
| 2014-10-26T21:24:42
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,775
|
r
|
run_analysis.R
|
setwd("C:/courseproject")
test_subj <- read.table("test/subject_test.txt")
#test_X <- read.table("test/X_test.txt")
test_y <- read.table("test/y_test.txt")
train_subj <- read.table("train/subject_train.txt")
#train_X <- read.table("train/X_train.txt")
train_y <- read.table("train/y_train.txt")
acti_labels <- read.table("activity_labels.txt")
colnames(acti_labels) <- c("activity","act_names")
df_subj <- rbind(test_subj,train_subj)
df_acti <- rbind(test_y,train_y)
colnames(df_acti) <- c("activity")
df_acti <- merge(df_acti,acti_labels)
test1 <- read.table("test/Inertial Signals/body_acc_x_test.txt")
train1 <- read.table("train/Inertial Signals/body_acc_x_train.txt")
df1 <- rbind(test1,train1)
test2 <- read.table("test/Inertial Signals/body_acc_y_test.txt")
train2 <- read.table("train/Inertial Signals/body_acc_y_train.txt")
df2 <- rbind(test2,train2)
test3 <- read.table("test/Inertial Signals/body_acc_z_test.txt")
train3 <- read.table("train/Inertial Signals/body_acc_z_train.txt")
df3 <- rbind(test3,train3)
test4 <- read.table("test/Inertial Signals/body_gyro_x_test.txt")
train4 <- read.table("train/Inertial Signals/body_gyro_x_train.txt")
df4 <- rbind(test4,train4)
test5 <- read.table("test/Inertial Signals/body_gyro_y_test.txt")
train5 <- read.table("train/Inertial Signals/body_gyro_y_train.txt")
df5 <- rbind(test5,train5)
test6 <- read.table("test/Inertial Signals/body_gyro_z_test.txt")
train6 <- read.table("train/Inertial Signals/body_gyro_z_train.txt")
df6 <- rbind(test6,train6)
test7 <- read.table("test/Inertial Signals/total_acc_x_test.txt")
train7 <- read.table("train/Inertial Signals/total_acc_x_train.txt")
df7 <- rbind(test7,train7)
test8 <- read.table("test/Inertial Signals/total_acc_y_test.txt")
train8 <- read.table("train/Inertial Signals/total_acc_y_train.txt")
df8 <- rbind(test8,train8)
test9 <- read.table("test/Inertial Signals/total_acc_z_test.txt")
train9 <- read.table("train/Inertial Signals/total_acc_z_train.txt")
df9 <- rbind(test9,train9)
m1 <- rowMeans(df1)
m2 <- rowMeans(df2)
m3 <- rowMeans(df3)
m4 <- rowMeans(df4)
m5 <- rowMeans(df5)
m6 <- rowMeans(df6)
m7 <- rowMeans(df7)
m8 <- rowMeans(df8)
m9 <- rowMeans(df9)
sd1 <- apply(df1,1,sd)
sd2 <- apply(df2,1,sd)
sd3 <- apply(df3,1,sd)
sd4 <- apply(df4,1,sd)
sd5 <- apply(df5,1,sd)
sd6 <- apply(df6,1,sd)
sd7 <- apply(df7,1,sd)
sd8 <- apply(df8,1,sd)
sd9 <- apply(df9,1,sd)
df <- data.frame(df_subj,df_acti,m1,sd1,m2,sd2,m3,sd3,m4,sd4,m5,sd5,m6,sd6,m7,sd7,m8,sd8,m9,sd9)
colnames(df) <- c("subject","activity","act_names","mean_body_acc_x","stdev_body_acc_x","mean_body_acc_y","stdev_body_acc_y","mean_body_acc_z","stdev_body_acc_z","mean_body_gyro_x","stdev_body_gyro_x","mean_body_gyro_y","stdev_body_gyro_y","mean_body_gyro_z","stdev_body_gyro_z","mean_tot_acc_x","stdev_tot_acc_x","mean_tot_acc_y","stdev_tot_acc_y","mean_tot_acc_z","stdev_tot_acc_z")
#ordf <- df[order(df$subject, df$activity),]
df$act_names <- NULL
outdf <- data.frame(0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0)
colnames(outdf) <- c("subject","activity","mean_body_acc_x","stdev_body_acc_x","mean_body_acc_y","stdev_body_acc_y","mean_body_acc_z","stdev_body_acc_z","mean_body_gyro_x","stdev_body_gyro_x","mean_body_gyro_y","stdev_body_gyro_y","mean_body_gyro_z","stdev_body_gyro_z","mean_tot_acc_x","stdev_tot_acc_x","mean_tot_acc_y","stdev_tot_acc_y","mean_tot_acc_z","stdev_tot_acc_z")
for (sub in 1:30) {
for (act in 1:6) {
sel_act_sub <- df[df$subject == sub & df$activity == act,]
mean_act_sub <- colMeans(sel_act_sub)
if (!is.na(mean_act_sub[1])) {
outdf <- rbind(outdf,mean_act_sub)
}
}}
outdf = outdf[-1,]
outdf <- merge(outdf,acti_labels)
write.table(outdf, file = "outfile.txt",row.name=FALSE)
df <- merge(df,acti_labels)
|
71dd5e9ef5e1fed90fb8738588a0f3812f2a311f
|
c9cbbed37a77aaaeb36a8107265ee500bbfc2640
|
/pollutantmean.R
|
5bfe637ffebc758d14a985e2172ba927379125ea
|
[] |
no_license
|
smandalapu/datasciencecoursera
|
d6ac95bfa52059b0af35b48ae8ecaa8e33a5658b
|
80c128f2bf7fc25cf465d637bf2b7c898db1c032
|
refs/heads/master
| 2016-09-06T01:07:14.752567
| 2014-10-21T03:04:32
| 2014-10-21T03:04:32
| 25,125,695
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 384
|
r
|
pollutantmean.R
|
pollutantmean<-function(directory,pollutant,id=1:332) {
dir<-character(1)
pol<-character(1)
dir<-paste(directory)
pol<-paste(pollutant)
files=paste(directory,"/",formatC(id,width=3,flag="0"),".csv",sep="")
# print(files)
data<-lapply(files,function(x) read.csv(x))
data_rbind <- do.call("rbind", data)
# print(nrow(data_rbind))
mean(data_rbind[[pol]],na.rm=TRUE)
}
|
28b3f258046743a09ea114d03c279ec9191d5f70
|
fd91fd81027df91f03e29138b26e2a1b6e31e054
|
/man/inapplicable.phyData.Rd
|
64ad12df673a0145be3568e1409f368115b9c4da
|
[] |
no_license
|
gitter-badger/TreeSearch
|
77fa06b36d691f942c8ef578f35f3e005cc2f13e
|
5a95195211d980baa6db29260bf929a12c5bf707
|
refs/heads/master
| 2022-04-20T07:40:33.050434
| 2020-04-16T13:47:57
| 2020-04-16T13:47:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 8,321
|
rd
|
inapplicable.phyData.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{inapplicable.phyData}
\alias{inapplicable.phyData}
\title{Thirty Datasets with Inapplicable data}
\format{An object of class \code{list} of length 30.}
\source{
{
\describe{
\item{Agnarsson2004}{AGNARSSON, I. 2004. Morphological phylogeny of cobweb spiders and their
relatives (Araneae, Araneoidea, Theridiidae). Zoological Journal of the
Linnean Society, 141, 447--626.}
\item{Capa2011}{CAPA, M., HUTCHINGS, P., AGUADO, M. T. and BOTT, N. J. 2011. Phylogeny of
Sabellidae (Annelida) and relationships with other taxa inferred from
morphology and multiple genes. Cladistics, 27, 449--469.}
\item{DeAssis2011}{ DE ASSIS, J. E. and CHRISTOFFERSEN, M. L. 2011. Phylogenetic relationships
within Maldanidae (Capitellida, Annelida), based on morphological characters.
Systematics and Biodiversity, 9, 233--245.}
\item{OLeary1999}{ O'LEARY, M. A. and GEISLER, J. H. 1999. The position of Cetacea within
Mammalia: phylogenetic analysis of morphological data from extinct and
extant taxa. Systematic Biology, 48, 455--490.}
\item{Rousset2004}{ ROUSSET, V., ROUSE, G. W., SIDDALL, M. E., TILLIER, A. and PLEIJEL, F.
2004. The phylogenetic position of Siboglinidae (Annelida) inferred from
18S rRNA, 28S rRNA and morphological data. Cladistics, 20, 518--533.}
\item{Sano2011}{ SANO, M. and AKIMOTO, S.-I. 2011. Morphological phylogeny of
gall-forming aphids of the tribe Eriosomatini (Aphididae: Eriosomatinae).
Systematic Entomology, 36, 607--627.}
\item{Sansom2010}{ SANSOM, R. S., FREEDMAN, K., GABBOTT, S. E., ALDRIDGE, R. J. and
PURNELL, M. A. 2010. Taphonomy and affinity of an enigmatic Silurian vertebrate,
\emph{Jamoytius kerwoodi} White. Palaeontology, 53, 1393--1409.}
\item{Schulze2007}{ SCHULZE, A., CUTLER, E. B. and GIRIBET, G. 2007. Phylogeny of sipunculan
worms: A combined analysis of four gene regions and morphology. Molecular Phylogenetics and Evolution, 42, 171--92.}
\item{Shultz2007}{ SHULTZ, J. W. 2007. A phylogenetic analysis of the arachnid orders based
on morphological characters. Zoological Journal of the Linnean Society, 150, 221--265.}
\item{Wetterer2000}{ WETTERER, A. L., ROCKKMAN, M. V. and SIMMONS, N. B. 2000. Phylogeny of
phyllostomid bats (Mammalia: Chiroptera): data from diverse morphological
systems, sex chromosomes, and restriction sites. Bulletin of the American Museum of Natural History, 248, 1--200.}
\item{Wills2012}{ WILLS, M. A., GERBER, S., RUTA, M. and HUGHES, M. 2012. The disparity of
priapulid, archaeopriapulid and palaeoscolecid worms in the light of new data.
Journal of Evolutionary Biology, 25, 2056--2076.}
\item{Aguado2009}{ AGUADO, M. T. and SAN MARTIN, G. 2009. Phylogeny of Syllidae (Polychaeta)
based on morphological data. Zoologica Scripta, 38, 379--402.}
\item{Aria2015}{ ARIA, C., CARON, J. B. and GAINES, R. 2015. A large new leanchoiliid from
the Burgess Shale and the influence of inapplicable states on stem arthropod
phylogeny. Palaeontology, 58, 629--660.}
\item{Asher2005}{ASHER, R. J. and HOFREITER, M. 2006. Tenrec phylogeny and the noninvasive
extraction of nuclear DNA. Systematic biology, 55, 181--94.}
\item{Baker2009}{ BAKER, W. J., SAVOLAINEN, V., ASMUSSEN-LANGE, C. B., CHASE, M. W.,
DRANSFIELD, J., FOREST, F., HARLEY, M. M., UHL, N. W. and WILKINSON, M. 2009.
Complete generic-level phylogenetic analyses of palms (Arecaceae) with
comparisons of supertree and supermatrix approaches.
Systematic Biology, 58, 240--256.}
\item{Bouchenak2010}{ BOUCHENAK-KHELLADI, Y., VERBOOM, G. A., SAVOLAINEN, V. and HODKINSON, T. R.
2010. Biogeography of the grasses (Poaceae): a phylogenetic approach to reveal
evolutionary history in geographical space and geological time. Botanical
Journal of the Linnean Society, 162, 543--557.}
\item{Conrad2008}{ CONRAD, J. L. 2008. Phylogeny And Systematics Of Squamata (Reptilia) Based
On Morphology. Bulletin of the American Museum of Natural History, 310, 1--182.}
\item{Dikow2009}{ DIKOW, T. 2009. A phylogenetic hypothesis for Asilidae based on a total
evidence analysis of morphological and DNA sequence data (Insecta: Diptera:
Brachycera: Asiloidea). Organisms Diversity and Evolution, 9, 165--188.}
\item{Eklund2004}{ EKLUND, H., DOYLE, J. A. and HERENDEEN, P. S. 2004. Morphological
phylogenetic analysis of living and fossil Chloranthaceae. International
Journal of Plant Sciences, 165, 107--151.}
\item{Geisler2001}{ GEISLER, J. H. 2001. New morphological evidence for the phylogeny of
Artiodactyla, Cetacea, and Mesonychidae. American Museum Novitates, 3344, 53.}
\item{Giles2015}{ GILES, S., FRIEDMAN, M. and BRAZEAU, M. D. 2015. Osteichthyan-like cranial
conditions in an Early Devonian stem gnathostome. Nature, 520, 82--85.}
\item{Griswold1999}{ GRISWOLD, C. E., CODDINGTON, J. A., PLATNICK, N. I. and FORSTER, R. R. 1999.
Towards a phylogeny of entelegyne spiders (Araneae, Araneomorphae, Entelegynae).
Journal of Arachnology, 27, 53--63.}
\item{Liljeblad2008}{ LILJEBLAD, J., RONQUIST, F., NIEVES-ALDREY, J. L., FONTAL-CAZALLA, F.,
ROS-FARRE, P., GAITROS, D. and PUJADE-VILLAR, J. 2008. A fully web-illustrated
morphological phylogenetic study of relationships among oak gall wasps and
their closest relatives (Hymenoptera: Cynipidae).}
\item{Loconte1991}{ LOCONTE, H. and STEVENSON, D. W. 1991. Cladistics of the Magnoliidae.
Cladistics, 7, 267--296.}
\item{Longrich2010}{ LONGRICH, N. R., SANKEY, J. and TANKE, D. 2010. \emph{Texacephale langstoni},
a new genus of pachycephalosaurid (Dinosauria: Ornithischia) from the upper
Campanian Aguja Formation, southern Texas, USA. Cretaceous Research, 31, 274--284.}
\item{OMeara2014}{ O'MEARA, R. N. and THOMPSON, R. S. 2014. Were There Miocene Meridiolestidans?
Assessing the phylogenetic placement of \emph{Necrolestes patagonensis} and the
presence of a 40 million year Meridiolestidan ghost lineage. Journal
of Mammalian Evolution, 21, 271--284.}
\item{Rougier2012}{ ROUGIER, G. W., WIBLE, J. R., BECK, R. M. D. and APESTEGUIA, S. 2012.
The Miocene mammal \emph{Necrolestes} demonstrates the survival of a
Mesozoic nontherian lineage into the late Cenozoic of South America.
Proceedings of the National Academy of Sciences, 109, 20053--8.}
\item{Sharkey2011}{ SHARKEY, M. J., CARPENTER, J. M., VILHELMSEN, L., HERATY, J., LILJEBLAD, J.,
DOWLING, A. P. G., SCHULMEISTER, S., MURRAY, D., DEANS, A. R., RONQUIST, F.,
KROGMANN, L. and WHEELER, W. C. 2012. Phylogenetic relationships among
superfamilies of Hymenoptera. Cladistics, 28, 80--112.}
\item{Sundue2010}{ SUNDUE, M. A., ISLAM, M. B. and RANKER, T. A. 2010. Systematics of
Grammitid Ferns (Polypodiaceae): Using Morphology and Plastid Sequence Data
to Resolve the Circumscriptions of Melpomene and the Polyphyletic Genera
\emph{Lellingeria} and \emph{Terpsichore}. Systematic Botany, 35, 701--715.}
\item{Vinther2008}{ VINTHER, J., VAN ROY, P. and BRIGGS, D. E. G. 2008. Machaeridians are
Palaeozoic armoured annelids. Nature, 451, 185--188.}
\item{Wilson2003}{ WILSON, G. D. F. and EDGECOMBE, G. D. 2003. The Triassic isopod
\emph{Protamphisopus wianamattensis} (Chilton) and comparison by extant
taxa (Crustacea, Phreatoicidea). Journal of Paleontology, 77, 454--470.}
\item{Wortley2006}{ WORTLEY, A. H. and SCOTLAND, R. W. 2006. The effect of combining molecular
and morphological data in published phylogenetic analyses.
Systematic Biology, 55, 677--685.}
\item{Zanol2014}{ ZANOL, J., HALANYCH, K. M. and FAUCHALD, K. 2014. Reconciling taxonomy and
phylogeny in the bristleworm family Eunicidae (Polychaete, Annelida).
Zoologica Scripta, 43, 79--100.}
\item{Zhu2013}{ ZHU, M., YU, X., AHLBERG, P. E., CHOO, B., LU, J., QIAO, T., QU, Q., ZHAO, W.,
JIA, L., BLOM, H. and ZHU, Y. 2013. A Silurian placoderm with osteichthyan-like
marginal jaw bones. Nature, 502, 188--193.}
}
}
}
\usage{
inapplicable.phyData
}
\description{
These are the datasets used to evaluate the behaviour of the inapplicable algorithm
in Brazeau, Guillerme and Smith (2017).
}
\details{
The name of each item corresponds to the datasets listed below.
Datasets are sorted into two subsets, each sorted alphabetically;
the first subset comprise simpler datasets with faster processing times.
The value is the dataset in \code{phyDat} format.
}
\references{
\insertRef{Brazeau2018}{TreeSearch}
}
\keyword{datasets}
|
405f8db96b4c0f822edfd7f0b4491536db7f5368
|
26f1cb213312ad204072dadd6b1163bcc0fa1bba
|
/exemples/chap6/6.10.R
|
01d4173d2bd89fe4334678e8ed566b91de587495
|
[] |
no_license
|
fmigone/livreR
|
a9b6f61a0aab902fb3b07fc49ea7dd642b65bdc3
|
998df678da1559ee03438c439335db796a416f2f
|
refs/heads/master
| 2020-04-22T05:47:14.083087
| 2018-05-01T14:39:10
| 2018-05-01T14:39:10
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,843
|
r
|
6.10.R
|
library(reshape2) # transposition
library(tidyr) # autres fonctions de transposition
library(dplyr) # gestion simplifiée des données
library(tibble) # affichage des ddnnées
# éclatement de la colonne amenities en plusieurs
# séparateur : pipe |, à noter \\| ici (expression régulière)
# création de variables am1 à am50 (plus que nécessaire)
houses2 <- separate(houses,
col=amenities,
into=paste0("am",1:50),
sep="\\|")
# transposition : une ligne par équipement
houses_eq <- melt(houses2,
id="id",
measure=paste0("am",1:50),
value.name = "equipement")
# affichage
as_tibble(houses_eq)
# A tibble: 31,950 × 3
# id variable equipement
# <int> <fctr> <chr>
# 1 5589471 am1 TV
# 2 3018762 am1 TV
# 3 12862174 am1 TV
# 4 6198459 am1 TV
# 5 2605719 am1 Cable TV
# 6 917659 am1 Internet
# 7 2375969 am1 TV
# 8 13146717 am1 TV
# 9 2680392 am1 TV
# 10 4838079 am1 TV
# ... with 31,940 more rows
# élimination des lignes vides + tri
houses_eq <- filter(houses_eq,
equipement != "") %>%
arrange(id, equipement) %>%
select(-variable)
# affichage
as_tibble(houses_eq)
# A tibble: 8,213 × 2
# id equipement
# <int> <chr>
# 1 57766 Cable TV
# 2 57766 Essentials
# 3 57766 Family/Kid Friendly
# 4 57766 Heating
# 5 57766 Internet
# 6 57766 Kitchen
# 7 57766 Safety Card
# 8 57766 TV
# 9 57766 Washer
# 10 57766 Wireless Internet
# ... with 8,203 more rows
|
57df69bee2451c2364a1e87a8d2b517e6aaf74ed
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/pointdexter/examples/cps_sy1819.Rd.R
|
19d479ae9866aa3a87cbfa00c301fa739bab51c1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 266
|
r
|
cps_sy1819.Rd.R
|
library(pointdexter)
### Name: cps_sy1819
### Title: Chicago Public Schools (CPS) - School Profile Information,
### School Year (SY) 2018-2019
### Aliases: cps_sy1819
### Keywords: datasets
### ** Examples
# load the necessary data -----
data("cps_sy1819")
|
eaf4ffa0284fa0c03598c013e7bb495a42941710
|
97d42d0116a38692851fbc42deac5475c761229d
|
/Code/Table Code/SF - OS/Item 166.R
|
8e11958ea714f6056fca456d810446cbbf9a0c31
|
[] |
no_license
|
casey-stevens/Cadmus-6000-2017
|
f4632518088de34541b0c6c130b3dd021f0809d1
|
ab4450e77885a9723dba6bc3890112dec8c7328f
|
refs/heads/master
| 2021-01-25T06:17:59.728730
| 2018-09-27T20:35:42
| 2018-09-27T20:35:42
| 93,548,804
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,881
|
r
|
Item 166.R
|
#############################################################################################
## Title: RBSA Analysis
## Author: Casey Stevens, Cadmus Group
## Created: 06/13/2017
## Updated:
## Billing Code(s):
#############################################################################################
## Clear variables
# rm(list = ls())
rundate <- format(Sys.time(), "%d%b%y")
options(scipen = 999)
## Create "Not In" operator
"%notin%" <- Negate("%in%")
# Source codes
source("Code/Table Code/SourceCode.R")
source("Code/Table Code/Weighting Implementation Functions.R")
source("Code/Sample Weighting/Weights.R")
source("Code/Table Code/Export Function.R")
# Read in clean RBSA data
rbsa.dat <- read.xlsx(xlsxFile = file.path(filepathCleanData, paste("clean.rbsa.data", rundate, ".xlsx", sep = "")))
length(unique(rbsa.dat$CK_Cadmus_ID))
#Read in data for analysis
# mechanical.dat <- read.xlsx(xlsxFile = file.path(filepathRawData, mechanical.export))
#clean cadmus IDs
mechanical.dat$CK_Cadmus_ID <- trimws(toupper(mechanical.dat$CK_Cadmus_ID))
#Read in data for analysis
# windows.doors.dat <- read.xlsx(xlsxFile = file.path(filepathRawData, windows.export))
windows.doors.dat$CK_Cadmus_ID <- trimws(toupper(windows.doors.dat$CK_Cadmus_ID))
#############################################################################################
#
#For Mechanical information
#
#############################################################################################
#subset to columns needed for analysis
item166.dat.1 <- mechanical.dat[which(colnames(mechanical.dat) %in% c("CK_Cadmus_ID"
,"Generic"
,"Primary.Heating.System"
,"Heating.Fuel"
,""
,""))]
#remove any repeat header rows from exporting
item166.dat.11 <- item166.dat.1[which(item166.dat.1$CK_Cadmus_ID != "CK_CADMUS_ID"),]
#Keep only Yes and No in primary heating system indicator
item166.dat.12 <- item166.dat.11[which(item166.dat.11$Primary.Heating.System == "Yes"),]
length(unique(item166.dat.12$CK_Cadmus_ID)) #576 out of 601
#check uniques
unique(item166.dat.12$Primary.Heating.System)
item166.dat.12$count <- 1
item166.dat.13 <- unique(item166.dat.12[which(item166.dat.12$Heating.Fuel == "Electric"),])
item166.sum <- summarise(group_by(item166.dat.13, CK_Cadmus_ID, Heating.Fuel)
,Count = sum(count))
item166.sum$Count <- 1
which(duplicated(item166.sum$CK_Cadmus_ID)) #none are duplicated!
unique(item166.sum$Heating.Fuel)
item166.merge <- left_join(rbsa.dat, item166.sum)
item166.merge <- item166.merge[which(!is.na(item166.merge$Heating.Fuel)),]
item166.mechanical <- item166.merge
#############################################################################################
#
#Windows Data
#
#############################################################################################
item166.dat <- windows.doors.dat[which(colnames(windows.doors.dat) %in% c("CK_Cadmus_ID"
,"Type"
,"Sub-Type"
,"Area"
,"Quantity"
,"Frame./.Body.Type"
,"Glazing.Type"))]
item166.dat0 <- left_join(item166.dat, item166.mechanical, by = "CK_Cadmus_ID")
item166.dat0.1 <- item166.dat0[which(item166.dat0$Heating.Fuel == "Electric"),]
item166.dat1 <- left_join(item166.dat0.1, rbsa.dat, by = "CK_Cadmus_ID")
length(unique(item166.dat1$CK_Cadmus_ID)) #316
#subset to only windows
item166.dat2 <- item166.dat1[which(item166.dat1$Type == "Window"),]
#clean up frame/body type
unique(item166.dat2$`Frame./.Body.Type`)
item166.dat2$Frame.Type <- trimws(item166.dat2$`Frame./.Body.Type`)
item166.dat2$Frame.Type[grep("Wood|Vinyl|Fiberglass|wood|vinyl|fiberglass|tile|Garage", item166.dat2$Frame.Type)] <- "Wood/Vinyl/Fiberglass/Tile"
item166.dat2$Frame.Type[grep("Metal|Aluminum|metal|aluminum", item166.dat2$Frame.Type)] <- "Metal"
item166.dat2$Frame.Type[grep("N/A", item166.dat2$Frame.Type)] <- "Unknown"
item166.dat2$Frame.Type[which(is.na(item166.dat2$Frame.Type))] <- "Unknown"
unique(item166.dat2$Frame.Type)
item166.dat2 <- item166.dat2[which(item166.dat2$Frame.Type != "Unknown"),]
#clean up glazing types
item166.dat2$Glazing <- trimws(item166.dat2$Glazing.Type)
item166.dat2$Glazing[grep("Single", item166.dat2$Glazing)] <- "Single Glazed"
item166.dat2$Glazing[grep("Double", item166.dat2$Glazing)] <- "Double Glazed"
item166.dat2$Glazing[grep("Triple", item166.dat2$Glazing)] <- "Triple Glazed"
item166.dat2$Glazing[which(!(item166.dat2$Glazing %in% c("Single Glazed", "Double Glazed", "Triple Glazed")))] <- "Unknown"
unique(item166.dat2$Glazing)
item166.dat2 <- item166.dat2[which(item166.dat2$Glazing != "Unknown"),]
item166.dat2$Framing.Categories <- paste(item166.dat2$Frame.Type, item166.dat2$Glazing, sep = " ")
item166.dat2$count <- 1
unique(item166.dat2$Quantity)
item166.dat2$Quantity <- as.numeric(as.character(item166.dat2$Quantity))
item166.dat3 <- item166.dat2[which(!is.na(as.numeric(as.character(item166.dat2$Quantity)))),]
item166.sum <- summarise(group_by(item166.dat3, CK_Cadmus_ID, Framing.Categories)
,Quantity = sum(Quantity))
#############################################################################################
#
# Merge mechanical and window data together
#
#############################################################################################
item166.merge <- left_join(item166.sum, item166.mechanical)
item166.merge1 <- item166.merge[which(!(is.na(item166.merge$Heating.Fuel))),]
item166.merge2 <- left_join(rbsa.dat, item166.merge1)
item166.merge <- item166.merge2[which(!is.na(item166.merge2$Quantity)),]
item166.merge <- item166.merge[which(!is.na(item166.merge$Framing.Categories)),]
################################################
# Adding pop and sample sizes for weights
################################################
item166.data <- weightedData(item166.merge[-which(colnames(item166.merge) %in% c("Heating.Fuel"
,"Count"
,"Framing.Categories"
,"Quantity"))])
item166.data <- left_join(item166.data, item166.merge[which(colnames(item166.merge) %in% c("CK_Cadmus_ID"
,"Heating.Fuel"
,"Count"
,"Framing.Categories"
,"Quantity"))])
#######################
# Weighted Analysis
#######################
item166.final <- proportionRowsAndColumns1(CustomerLevelData = item166.data
,valueVariable = 'Quantity'
,columnVariable = 'State'
,rowVariable = 'Framing.Categories'
,aggregateColumnName = "Region")
item166.final$Framing.Categories[which(item166.final$Framing.Categories == "Total")] <- "All Framing Types"
item166.cast <- dcast(setDT(item166.final)
, formula = BuildingType + Framing.Categories ~ State
, value.var = c("w.percent", "w.SE", "count", "n", "N","EB"))
item166.table <- data.frame("BuildingType" = item166.cast$BuildingType
,"Framing.Categories"= item166.cast$Framing.Categories
,"Percent_ID" = item166.cast$w.percent_ID
,"SE_ID" = item166.cast$w.SE_ID
# ,"Count_ID" = item166.cast$count_ID
,"n_ID" = item166.cast$n_ID
,"Percent_MT" = item166.cast$w.percent_MT
,"SE_MT" = item166.cast$w.SE_MT
# ,"Count_MT" = item166.cast$count_MT
,"n_MT" = item166.cast$n_MT
,"Percent_OR" = item166.cast$w.percent_OR
,"SE_OR" = item166.cast$w.SE_OR
# ,"Count_OR" = item166.cast$count_OR
,"n_OR" = item166.cast$n_OR
,"Percent_WA" = item166.cast$w.percent_WA
,"SE_WA" = item166.cast$w.SE_WA
# ,"Count_WA" = item166.cast$count_WA
,"n_WA" = item166.cast$n_WA
,"Percent_Region" = item166.cast$w.percent_Region
,"SE_Region" = item166.cast$w.SE_Region
# ,"Count_Region" = item166.cast$count_Region
,"n_Region" = item166.cast$n_Region
,"EB_ID" = item166.cast$EB_ID
,"EB_MT" = item166.cast$EB_MT
,"EB_OR" = item166.cast$EB_OR
,"EB_WA" = item166.cast$EB_WA
,"EB_Region" = item166.cast$EB_Region
)
levels(item166.table$Framing.Categories)
rowOrder <- c("Metal Single Glazed"
,"Metal Double Glazed"
,"Metal Triple Glazed"
,"Wood/Vinyl/Fiberglass/Tile Single Glazed"
,"Wood/Vinyl/Fiberglass/Tile Double Glazed"
,"Wood/Vinyl/Fiberglass/Tile Triple Glazed"
,"Other Double Glazed"
,"All Framing Types"
)
item166.table <- item166.table %>% mutate(Framing.Categories = factor(Framing.Categories, levels = rowOrder)) %>% arrange(Framing.Categories)
item166.table <- data.frame(item166.table)
item166.final.SF <- item166.table[which(item166.table$BuildingType == "Single Family")
,-which(colnames(item166.table) %in% c("BuildingType"))]
exportTable(item166.final.SF, "SF", "Table B-11", weighted = TRUE)
#######################
# Unweighted Analysis
#######################
item166.final <- proportions_two_groups_unweighted(CustomerLevelData = item166.data
,valueVariable = 'Quantity'
,columnVariable = 'State'
,rowVariable = 'Framing.Categories'
,aggregateColumnName = "Region")
item166.final$Framing.Categories[which(item166.final$Framing.Categories == "Total")] <- "All Framing Types"
item166.cast <- dcast(setDT(item166.final)
, formula = BuildingType + Framing.Categories ~ State
, value.var = c("Percent", "SE", "Count", "n"))
item166.table <- data.frame("BuildingType" = item166.cast$BuildingType
,"Framing.Categories"= item166.cast$Framing.Categories
,"Percent_ID" = item166.cast$Percent_ID
,"SE_ID" = item166.cast$SE_ID
,"n_ID" = item166.cast$n_ID
,"Percent_MT" = item166.cast$Percent_MT
,"SE_MT" = item166.cast$SE_MT
,"n_MT" = item166.cast$n_MT
,"Percent_OR" = item166.cast$Percent_OR
,"SE_OR" = item166.cast$SE_OR
,"n_OR" = item166.cast$n_OR
,"Percent_WA" = item166.cast$Percent_WA
,"SE_WA" = item166.cast$SE_WA
,"n_WA" = item166.cast$n_WA
,"Percent_Region" = item166.cast$Percent_Region
,"SE_Region" = item166.cast$SE_Region
,"n_Region" = item166.cast$n_Region
)
levels(item166.table$Framing.Categories)
rowOrder <- c("Metal Single Glazed"
,"Metal Double Glazed"
,"Metal Triple Glazed"
,"Wood/Vinyl/Fiberglass/Tile Single Glazed"
,"Wood/Vinyl/Fiberglass/Tile Double Glazed"
,"Wood/Vinyl/Fiberglass/Tile Triple Glazed"
,"Other Double Glazed"
,"All Framing Types"
)
item166.table <- item166.table %>% mutate(Framing.Categories = factor(Framing.Categories, levels = rowOrder)) %>% arrange(Framing.Categories)
item166.table <- data.frame(item166.table)
item166.final.SF <- item166.table[which(item166.table$BuildingType == "Single Family")
,-which(colnames(item166.table) %in% c("BuildingType"))]
exportTable(item166.final.SF, "SF", "Table B-11", weighted = FALSE)
|
e6f7d09f904291288a939c665641159c1bb8847d
|
cb04524789baf79c09efa67f6874290a30df7a1a
|
/man/toGregorian.Rd
|
f1b1e15d89906f06acc51b3370af118606bf3b77
|
[
"Apache-2.0"
] |
permissive
|
d-callan/ethiopianDate
|
7b117dced41f989ea2a41b5057a7a0ce799c0118
|
22c997debf3ed52ac1871c2ab620f15e8c8ee814
|
refs/heads/master
| 2023-07-15T03:45:36.795608
| 2021-08-23T19:26:25
| 2021-08-23T19:26:25
| 398,299,123
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 451
|
rd
|
toGregorian.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/convert.R
\name{toGregorian}
\alias{toGregorian}
\title{Convert Ethiopian Date to Gregorian Calendar}
\usage{
toGregorian(ethiopianDate)
}
\arguments{
\item{ethiopianDate}{Date or character representing a date in the Ethiopian calendar}
}
\value{
Date from the Gregorian calendar
}
\description{
This function takes an Ethiopian date and converts
it to a Gregorian date.
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.