blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7678eec535b04395db2a68530df30ddf2c204af7
|
c53e367a5a155cfb1ee3a41e8b0351aeaa8d331d
|
/gridGraphics/test-scripts/test-par.R
|
2e05bd8b89df5b856db5600dea0817820e0c805b
|
[
"MIT"
] |
permissive
|
solgenomics/R_libs
|
bcf34e00bf2edef54894f6295c4f38f1e480b3fc
|
e8cdf30fd5f32babf39c76a01df5f5544062224e
|
refs/heads/master
| 2023-07-08T10:06:04.304775
| 2022-05-09T15:41:26
| 2022-05-09T15:41:26
| 186,859,606
| 0
| 2
|
MIT
| 2023-03-07T08:59:16
| 2019-05-15T15:57:13
|
C++
|
UTF-8
|
R
| false
| false
| 1,890
|
r
|
test-par.R
|
require(grDevices) # for gray
library(gridGraphics)
par1 <- function() {
par("ylog") # FALSE
plot(1 : 12, log = "y")
par("ylog") # TRUE
}
par2 <- function() {
plot(1:2, xaxs = "i") # 'inner axis' w/o extra space
par(c("usr", "xaxp"))
}
par3 <- function() {
( nr.prof <-
c(prof.pilots = 16, lawyers = 11, farmers = 10, salesmen = 9,
physicians = 9, mechanics = 6, policemen = 6, managers = 6,
engineers = 5, teachers = 4, housewives = 3, students = 3,
armed.forces = 1))
par(las = 3)
barplot(rbind(nr.prof)) # R 0.63.2: shows alignment problem
par(las = 0) # reset to default
}
par4 <- function() {
## 'fg' use:
plot(1:12, type = "b", main = "'fg' : axes, ticks and box in gray",
fg = gray(0.7), bty = "7" , sub = R.version.string)
}
## Line types
showLty <- function(ltys, xoff = 0, ...) {
stopifnot((n <- length(ltys)) >= 1)
op <- par(mar = rep(.5,4)); on.exit(par(op))
plot(0:1, 0:1, type = "n", axes = FALSE, ann = FALSE)
y <- (n:1)/(n+1)
clty <- as.character(ltys)
mytext <- function(x, y, txt)
text(x, y, txt, adj = c(0, -.3), cex = 0.8, ...)
abline(h = y, lty = ltys, ...); mytext(xoff, y, clty)
y <- y - 1/(3*(n+1))
abline(h = y, lty = ltys, lwd = 2, ...)
mytext(1/8+xoff, y, paste(clty," lwd = 2"))
}
par5 <- function() {
showLty(c("solid", "dashed", "dotted", "dotdash", "longdash", "twodash"))
par(new = TRUE) # the same:
showLty(c("solid", "44", "13", "1343", "73", "2262"), xoff = .2, col = 2)
}
par6 <- function() {
showLty(c("11", "22", "33", "44", "12", "13", "14", "21", "31"))
}
plotdiff(expression(par1()), "par-1")
plotdiff(expression(par2()), "par-2")
plotdiff(expression(par3()), "par-3")
plotdiff(expression(par4()), "par-4")
plotdiff(expression(par5()), "par-5")
plotdiff(expression(par6()), "par-6")
plotdiffResult()
|
7b572b7a9ca5474681dfcdf07254f522229585af
|
85abc2513a155bf54eea49580acb1f09b742bc88
|
/R/util.r
|
296dcadee1d2b617881fe7080d1571ad39765d36
|
[] |
no_license
|
mkoohafkan/cimir
|
13ce7bb87ae3ffa3ed238d6529d485c44a066e53
|
e54c8d91e4dae3bad48bebe7606952a11b58d417
|
refs/heads/master
| 2023-02-19T23:56:29.933152
| 2023-02-14T05:23:49
| 2023-02-14T05:23:49
| 160,749,999
| 6
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,780
|
r
|
util.r
|
cimis.tz = "Etc/GMT+8"
empty.record = tibble(
Date = as.Date(character(0)),
Hour = character(0),
Julian = integer(0),
Station = character(0),
Standard = character(0),
ZipCodes = character(0),
Scope = character(0),
Item = character(0),
Value = character(0),
Qc = character(0)
)
#' Quick Fix to as_tibble
#'
#' Handle empty lists when coercing to tibble. See
#' [tibble issue 851](https://github.com/tidyverse/tibble/issues/851).
#'
#' @param d An object to coerce to a tibble.
#' @return a tibble.
#' @importFrom purrr modify_if
#' @keywords internal
as_tibble_fix = function(d) {
as_tibble(modify_if(d, ~ identical(.x, list()),
~ list(NULL)))
}
#' To Datetime
#'
#' Collapse The Date and Hour columns to a single DateTime Column.
#'
#' @param d A data frame of CIMIS data results.
#' @return The data frame, with a new `"Datetime"` column replacing
#' the `"Date"` and `"Hour"` columns.
#'
#' @details According to the
#' [CIMIS Report FAQs](https://cimis.water.ca.gov/Default.aspx),
#' all CIMIS data is based on Pacific Standard Time (PST).
#'
#' @examples
#' if(is_key_set()) {
#' d = cimis_data(targets = 170, start.date = Sys.Date() - 4,
#' end.date = Sys.Date() - 1, items = "hly-air-tmp")
#' cimis_to_datetime(d)
#' }
#' @importFrom dplyr select mutate if_else rename
#' @importFrom stringr str_c
#' @export
cimis_to_datetime = function(d) {
if (!("Hour" %in% names(d)))
d = mutate(d, Hour = "0000")
rename(select(mutate(d,
Hour = if_else(is.na(.data$Hour), "0000", .data$Hour),
Date = as.POSIXct(str_c(.data$Date, " ", .data$Hour),
format = "%Y-%m-%d %H%M", tz = cimis.tz)),
-.data$Hour
), Datetime = .data$Date)
}
#' Record to Data Frame
#'
#' Convert a single record, containing one or more data items, to a to
#' a single data frame.
#'
#' @param record A single CIMIS record, in list format.
#' @return A data frame. The column `"Item"` identifies the data item.
#'
#' @importFrom tidyr unnest
#' @importFrom dplyr mutate bind_rows setdiff as_tibble
#' @importFrom purrr map
#' @importFrom rlang .data
#' @keywords internal
record_to_df = function(record) {
if (identical(record, list())) {
return(empty.record)
}
fixed = c("Date", "Hour", "Julian", "Station", "Standard",
"ZipCodes", "Scope")
data.names = setdiff(names(record), fixed)
other.names = setdiff(names(record), data.names)
unnest(mutate(as_tibble(record[other.names]),
Date = as.Date(.data$Date),
Julian = as.integer(.data$Julian),
Data = list(bind_rows(map(record[data.names], as_tibble),
.id = "Item"))
), cols = c(.data$Data))
}
#' Bind Records
#'
#' Bind CIMIS records into a single data frame. This function
#' is used internally.
#'
#' @param result CIMIS query results.
#' @return A data frame.
#'
#' @importFrom tidyr unnest
#' @importFrom purrr map_dfr
#' @importFrom dplyr mutate bind_rows as_tibble case_when
#' across matches
#' @importFrom rlang .data
#' @keywords internal
bind_records = function(result) {
mutate(unnest(mutate(
map_dfr(result[[c("Data", "Providers")]], as_tibble),
Records = map(.data$Records, record_to_df)),
cols = c(.data$Records)), across(matches("Value"), as.numeric))
}
#' Split CIMIS Query
#'
#' Split a large CIMIS query into multiple smaller queries based on a
#' time interval.
#'
#' @inheritParams cimis_data
#' @param max.records The maximum number of records returned by a
#' query. The default value is the the maximum data limit allowed by
#' the CIMIS Web API (1,750 records).
#' @return A data frame with columns "targets", "start.date",
#' "end.date", and "items".
#'
#' @details Queries are not split by `targets` or `items`, i.e. each
#' resulting query will include all targets and items.
#'
#' @examples
#' cimis_split_query(170, "2000-01-01", "2010-12-31", "day-air-tmp-avg")
#' cimis_split_query(c(149, 170), "2018-01-01", "2018-12-31",
#' c("day-air-tmp-avg", "hly-air-tmp", "hly-rel-hum"))
#'
#' @importFrom dplyr tibble n mutate bind_rows
#' @export
cimis_split_query = function(targets, start.date, end.date, items,
max.records = 1750L) {
hourly.items = intersect(items, cimis_items("Hourly")[["Data Item"]])
daily.items = intersect(items, cimis_items("Daily")[["Data Item"]])
if (length(hourly.items) > 0L) {
hourly.ranges = mutate(date_seq(start.date, end.date, max.records,
24 * length(targets) * length(hourly.items)),
items = rep(list(hourly.items), n()))
} else {
hourly.ranges = NULL
}
if (length(daily.items) > 0L) {
daily.ranges = mutate(date_seq(start.date, end.date, max.records,
length(targets) * length(daily.items)),
items = rep(list(daily.items), n()))
} else {
daily.ranges = NULL
}
mutate(bind_rows(daily.ranges, hourly.ranges),
targets = rep(list(targets), n()))
}
#' @importFrom dplyr tibble
#' @importFrom utils head tail
#' @keywords internal
date_seq = function(start.date, end.date, max.length, multiplier) {
start.date = as.Date(start.date)
end.date = as.Date(end.date)
num.records = as.numeric(end.date - start.date) * multiplier
if (num.records < max.length) {
tibble(start.date = start.date, end.date = end.date)
} else {
num.queries = as.integer(ceiling(num.records / max.length))
seq.start = seq(start.date, end.date, length.out = num.queries + 1)
starts = head(seq.start, -1)
ends = c(head(tail(seq.start, -1), -1) - 1, tail(seq.start, 1))
tibble(start.date = starts, end.date = ends)
}
}
#' Compass Direction To Degrees
#'
#' Convert the Compass direction labels to degrees.
#'
#' @param x A vector of compass directions, i.e. the data item labels
#' "DayWindNnw", "DayWindSse", etc. Recognized directions are
#' North-northeast (NNE), East-northeast (ENE), East-southeast (ESE),
#' South-southeast (SSE), South-southwest (SSW), West-southwest (WSW),
#' West-northwest (WNW), and North-northwest (NNW).
#'
#' @return A numeric vector of degrees corresponding to the middle
#' azimuth of the corresponding compass direction.
#'
#' @examples
#' cimis_compass_to_degrees("day-wind-nne")
#' cimis_compass_to_degrees(c("SSE", "SSW", "wsw", "Wnw", "nnw"))
#'
#' @seealso [cimis_degrees_to_compass()]
#'
#' @importFrom dplyr case_when
#' @importFrom stringr str_to_upper str_detect
#' @export
cimis_compass_to_degrees = function(x) {
x = str_to_upper(x)
res = case_when(
str_detect(x, "NNE$") ~ 22.5,
str_detect(x, "ENE$") ~ 67.5,
str_detect(x, "ESE$") ~ 112.5,
str_detect(x, "SSE$") ~ 157.5,
str_detect(x, "SSW$") ~ 202.5,
str_detect(x, "WSW$") ~ 247.5,
str_detect(x, "WNW$") ~ 292.5,
str_detect(x, "NNW$") ~ 337.5,
TRUE ~ NA_real_
)
if (any(is.na(res)))
stop("Unrecognized values in arugment \"x\".")
res
}
#' Degrees to Compass Direction
#'
#' Convert decimal degrees to Compass direction.
#'
#' @param x A vector of directions in decimal degrees.
#' @return A factor vector of compass directions.
#'
#' @details Degrees are labeled with their corresponding
#' Primary InterCardinal compass direction, following the
#' convention of the CIMIS daily wind data items.
#'
#' @examples
#' cimis_degrees_to_compass(c(30, 83, 120, 140, 190, 240, 300, 330))
#' cimis_degrees_to_compass(cimis_compass_to_degrees(c("NNE", "ENE",
#' "ESE", "SSE", "SSW", "WSW", "WNW", "NNW")))
#'
#' @seealso [cimis_compass_to_degrees()]
#' @export
cimis_degrees_to_compass = function(x) {
breaks = c(0, 45, 90, 135, 180, 225, 270, 315, 360)
labels = c("NNE", "ENE", "ESE", "SSE", "SSW", "WSW", "WNW", "NNW")
cut(x, breaks, labels, include.lowest = TRUE)
}
#' Format CIMIS Station Location
#'
#' Format the latitude and longitude of station in
#' Decimal Degrees (DD) or Hour Minutes Seconds (HMS).
#'
#' @inheritParams cimis_to_datetime
#' @param format The format to use, either Decimal Degrees (`"DD"`)
#' or Hour Minutes Seconds (`"HMS"`).
#'
#' @return The data frame, with a new `"Latitude"` and `"Longitude"`
#' columns replacing the `"HmsLatitude"` and `"HmsLongitude"`
#' columns.
#'
#' @examples
#' if(is_key_set()) {
#' d = cimis_station(170)
#' cimis_format_location(d, "DD")
#' cimis_format_location(d, "HMS")
#' }
#'
#' @importFrom dplyr mutate_at rename
#' @importFrom stringr str_split str_replace
#' @export
cimis_format_location = function(d, format = c("DD", "HMS")) {
format = match.arg(str_to_upper(format), c("DD", "HMS"))
if (format == "HMS") {
fun = function(x)
str_replace(str_split(x, " / ", simplify = TRUE)[, 1], "^-", "")
} else {
fun = function(x)
as.numeric(str_split(x, " / ", simplify = TRUE)[, 2])
}
rename(
mutate_at(d, c("HmsLatitude", "HmsLongitude"), fun),
Latitude = .data$HmsLatitude, Longitude = .data$HmsLongitude
)
}
|
664210a0358e3d429f71383c0bbc5fa66283da39
|
3050849fdeb7b54d22f5b72ec004fefeb0af86a6
|
/tests/testthat/test.DIscBIO.IMP.R
|
f6cbb8d2eeebc4b36cca8739ae8abbfe22f04da9
|
[
"MIT"
] |
permissive
|
dami82/DIscBIO
|
4e8fd3591b2c1d92c9deb83596713f076a12ac1c
|
8de0522099697a9364ee01befdb13f5b36b16970
|
refs/heads/master
| 2021-04-22T02:19:49.309902
| 2020-04-08T11:37:15
| 2020-04-08T11:37:15
| 259,098,496
| 0
| 1
|
MIT
| 2020-04-26T18:01:52
| 2020-04-26T18:01:51
| null |
UTF-8
|
R
| false
| false
| 9,340
|
r
|
test.DIscBIO.IMP.R
|
# ---------------------------------------------------------------------------- #
# Data pre-processing #
# ---------------------------------------------------------------------------- #
context("Data loading and pre-processing")
sc <- DISCBIO(valuesG1msReduced) # Reduced dataset used for testing
test_that("Loading datasets generate the expected output", {
expect_equal(dim(valuesG1msReduced), c(1092, 30))
})
test_that("Data signature changes", {
expect_equal(class(sc)[1], "DISCBIO")
expect_equal(attr(class(sc), "package"), "DIscBIO")
})
# This function will be used only if the dataset has ERCC
sc <- NoiseFiltering(sc, plot=FALSE, export=FALSE, quiet=TRUE)
test_that("Noise filtering is added", {
expect_equal(length(sc@noiseF), 341)
})
# In this case this function is used to normalize the reads
sc <- Normalizedata(
sc, mintotal=1000, minexpr=0, minnumber=0, maxexpr=Inf, downsample=FALSE,
dsn=1, rseed=17000
)
test_that("Data is normalized", {
expect_equal(class(sc@fdata), "data.frame")
expect_output(str(sc@fdata), "1000 obs. of 30 variables")
})
# This function can be used for: 1- filtering and normalizing the dataset that has no ERCC. 2- to normalize and filter genes and cells after the noise filtering.
sc <- FinalPreprocessing(sc, GeneFlitering="NoiseF", export=FALSE, quiet=TRUE)
test_that("Data is normalized", {
expect_equal(dim(sc@fdata), c(341, 30))
})
# ---------------------------------------------------------------------------- #
# K-means clustering #
# ---------------------------------------------------------------------------- #
context("K-means clustering")
sc <- Clustexp(sc, cln=2, quiet=TRUE) # K-means clustering
sc <- comptSNE(sc, rseed=15555, quiet=TRUE)
test_that("tSNE is computed", {
expect_equal(class(sc@tsne), "data.frame")
expect_output(str(sc@tsne), "30 obs. of 2 variables")
})
test_that("Cluster plots output is as expexted", {
expect_equivalent(
object = Jaccard(sc, Clustering="K-means", K=2, plot = FALSE),
expected = c(.790, .653),
tolerance = .01
)
expect_equal(
object = KMclustheatmap(sc, hmethod = "single", plot = FALSE),
expected = c(1, 2)
)
})
# --------------------------------- Outliers --------------------------------- #
context("Outliers")
Outliers <- FindOutliersKM(
sc, K=2, outminc=5, outlg=2, probthr=.5*1e-3, thr=2**-(1:40),
outdistquant=.75, plot = FALSE, quiet = TRUE
)
# Adjusting outliers
outlg <- round(length(sc@fdata[, 1]) / 200) # The cell will be considered as an outlier if it has a minimum of 0.5% of the number of filtered genes as outlier genes.
Outliers2 <- FindOutliersKM(
sc, K=2, outminc=5, outlg=outlg, probthr=.5*1e-3, thr=2**-(1:40),
outdistquant=.75, plot = FALSE, quiet = TRUE
)
Order <- KmeanOrder(sc, quiet = TRUE, export = FALSE)
test_that("Outliers are the expected", {
expect_equivalent(Outliers, c(3, 7, 19))
expect_equivalent(Outliers2, c(3, 7, 19))
expect_equivalent(
object = Order@kordering,
expected = c(
23, 20, 6, 21, 27, 26, 24, 28, 10, 19, 15, 25, 16, 8, 14, 13, 22, 4,
17, 2, 3, 18, 11, 29, 9, 5, 12, 1, 30, 7
)
)
})
# --------------------- Differential Expression Analysis --------------------- #
context("Differential Expression Analysis")
# Binomial differential expression analysis
cdiff1 <- KMClustDiffGenes(sc, K=1, fdr=.2, export=FALSE, quiet=TRUE)
# differential expression analysis between all clusters
cdiff2 <- DEGanalysis(
sc, Clustering="K-means", K=2, fdr=.2, name="Name", export=FALSE,
quiet=TRUE, plot=FALSE, nperms=5, nresamp=2
)
# differential expression analysis between two particular clusters.
cdiff3 <- DEGanalysis2clust(
sc, Clustering="K-means", K=2, fdr=.15, name="Name", First="CL1",
Second="CL2", export=FALSE, quiet=TRUE, plot=FALSE
)
test_that("DEGs are calculated", {
expect_identical(
object = sapply(cdiff1, function(x) class(x)[1]),
expected = c("matrix", "data.frame")
)
expect_identical(
object = sapply(cdiff2, function(x) class(x)[1]),
expected = c("matrix", "data.frame")
)
expect_identical(
object = sapply(cdiff3, function(x) class(x)[1]),
expected = c("matrix", "data.frame")
)
})
# Decision tree
sigDEG <- cdiff3[[1]]
DATAforDT <- ClassVectoringDT(
sc, Clustering="K-means", K=2, First="CL1", Second="CL2", sigDEG,
quiet = TRUE
)
j48dt <- J48DT(DATAforDT, quiet = TRUE, plot = FALSE)
j48dt_eval <- J48DTeval(
DATAforDT, num.folds=10, First="CL1", Second="CL2", quiet=TRUE
)
rpartDT <- RpartDT(DATAforDT, quiet = TRUE, plot = FALSE)
rpartEVAL <- RpartEVAL(
DATAforDT, num.folds=10, First="CL1", Second="CL2", quiet = TRUE
)
test_that("Decision tree elements are defined", {
expect_output(str(DATAforDT), "3 obs. of 30 variables")
expect_s3_class(j48dt, "J48")
expect_s3_class(summary(j48dt), "Weka_classifier_evaluation")
expect_identical(j48dt_eval, c(TP = 14, FN = 4, FP = 5, TN = 7))
expect_s3_class(rpartDT, "rpart")
expect_identical(rpartEVAL, c(TP = 15, FN = 3, FP = 3, TN = 9))
})
# ---------------------------------------------------------------------------- #
# Model-based clustering #
# ---------------------------------------------------------------------------- #
context("Model-based clustering")
# Technically, this should be done before Clustexp, but it's ok in practice to
# apply it after K-means because it uses different slots.
sc <- Exprmclust(sc, K=2, quiet=TRUE)
test_that("Model-based clustering elements are OK", {
expect_identical(
object = names(sc@MBclusters),
expected = c("pcareduceres", "MSTtree", "clusterid", "clucenter")
)
})
sc <- comptsneMB(sc, rseed=15555, quiet = TRUE)
test_that("tSNE clustering works fine", {
expect_equal(dim(sc@MBtsne), c(30, 2))
})
# --------------------------------- Outliers --------------------------------- #
context("MB outliers")
sc <- Clustexp(sc, cln=2, quiet=TRUE)
Outliers <- FindOutliersMB(
sc, K=2, outminc=5, outlg=2, probthr=.5*1e-3, thr=2**-(1:40),
outdistquant=.75, plot = FALSE, quiet = TRUE
)
outlg <- round(length(sc@fdata[, 1]) / 200) # The cell will be considered as an outlier if it has a minimum of 0.5% of the number of filtered genes as outlier genes.
Outliers2 <- FindOutliersMB(
sc, K=2, outminc=5, outlg=outlg, probthr=.5*1e-3, thr=2**-(1:40),
outdistquant=.75, plot = FALSE, quiet = TRUE
)
test_that("MB clustering and outliers work as expected", {
expect_equivalent(
object = Jaccard(sc, Clustering="MB", K=2, plot = FALSE),
expected = c(.819, .499),
tolerance = 0.01
)
expect_equivalent(Outliers, c(3, 4, 7))
expect_equal(outlg, 2)
expect_equal(Outliers2, c("G1_12" = 3, "G1_18" = 4, "G1_21" = 7))
})
sc <- MB_Order(sc, quiet = TRUE, export = FALSE)
mb_heat <- MBclustheatmap(sc, hmethod="single", plot = FALSE, quiet = TRUE)
test_that("More MB things are OK", {
expect_equal(
object = sc@MBordering,
expected = c(
8, 28, 27, 18, 21, 10, 29, 26, 17, 25, 5, 13, 12, 19, 16, 15, 23,
20, 30, 14, 7, 9, 24, 22, 3, 2, 4, 6, 1, 11
)
)
expect_equal(mb_heat, c(1, 2))
})
# ----------------------------------- DEGs ----------------------------------- #
context("MB DEGs")
# Binomial DE analysis
cdiff1 <- MBClustDiffGenes(sc, K=2, fdr=.2, export=FALSE, quiet=TRUE)
# DE analysis between all clusters
cdiff2 <- DEGanalysis(
sc, Clustering="MB", K=2, fdr=.2, name="Name", export=FALSE, quiet=TRUE,
plot = FALSE
)
# differential expression analysis between particular clusters.
cdiff3 <- DEGanalysis2clust(
sc, Clustering="MB", K=2, fdr=.15, name="Name", First="CL1", Second="CL2",
export = FALSE, plot = FALSE, quiet = TRUE
)
test_that("DEGs are calculated", {
expect_identical(
object = sapply(cdiff1, function(x) class(x)[1]),
expected = c("matrix", "data.frame")
)
expect_identical(
object = sapply(cdiff2, function(x) class(x)[1]),
expected = c("matrix", "data.frame")
)
expect_identical(
object = sapply(cdiff3, function(x) class(x)[1]),
expected = c("matrix", "data.frame")
)
})
# Decision tree
sigDEG <- cdiff3[[1]]
DATAforDT <- ClassVectoringDT(
sc, Clustering="MB", K=2, First="CL1", Second="CL2", sigDEG, quiet = TRUE
)
j48dt <- J48DT(DATAforDT, quiet = TRUE, plot = FALSE)
j48dt_eval <- J48DTeval(
DATAforDT, num.folds=10, First="CL1", Second="CL2", quiet = TRUE
)
rpartDT <- RpartDT(DATAforDT, quiet = TRUE, plot = FALSE)
rpartEVAL <- RpartEVAL(
DATAforDT, num.folds=10, First="CL1", Second="CL2", quiet = TRUE
)
test_that("Decision tree elements are defined", {
expect_output(str(DATAforDT), "23 obs. of 30 variables") # used to be 31
expect_s3_class(j48dt, "J48")
expect_s3_class(summary(j48dt), "Weka_classifier_evaluation")
expect_identical(j48dt_eval, c(TP = 21, FN = 2, FP = 4, TN = 3))
expect_s3_class(rpartDT, "rpart")
expect_identical(rpartEVAL, c(TP = 19, FN = 4, FP = 4, TN = 3))
})
|
e8c3a9eb4307b447e08d986ba079952cbca4d232
|
fcb9bd2e5be13ef292927a1674af9fa2b9cdbbf3
|
/R/print.pcoasig.R
|
68ce294b8730d0bf89937d159c2e535944aba91a
|
[] |
no_license
|
vanderleidebastiani/PCPS
|
9a68e9a169a422d8239c708ecb79a4d86f9b8330
|
71329266a31d87faac6977c624a176edda5f85f5
|
refs/heads/master
| 2021-08-06T18:05:58.611153
| 2021-02-02T19:41:08
| 2021-02-02T19:41:08
| 22,879,537
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 237
|
r
|
print.pcoasig.R
|
#' @rdname pcoa.sig
#' @encoding UTF-8
#' @export
print.pcoasig <- function(x, ...){
cat("Call:\n")
cat(deparse(x$call), "\n\n")
cat("PCoA values:\n")
print(x$values)
cat("\nProbabilities:\n")
print(x$probabilities)
invisible(x)
}
|
226d05c0e1cd9451bff38df3b512123bfcb22fb7
|
2c88dd784cc718ca44aa04a5d42adbb512a2164e
|
/interpolation.R
|
23e1534e5da0ca2f4b4f041bd6218f545c8113cc
|
[] |
no_license
|
erikagonzalezs/mortar-interpolation
|
a36cdc1460ad7901dd4b5ae5594e44176224f13a
|
2175f4ceb14638f6bde37c3051dfec72344a180e
|
refs/heads/master
| 2022-09-06T03:26:05.830535
| 2020-05-26T16:37:12
| 2020-05-26T16:37:12
| 267,095,325
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,359
|
r
|
interpolation.R
|
#--------------------------------
# RETO 2
#
#--------------------------------
# Integrantes
# - Andres Camilo Giraldo Gil
# - Erika Alejandra Gonzalez
# - Leonel Steven Londono
#--------------------------------
# Analisis Numerico
#--------------------------------
#Instalacion Librerias Requeridas
#--------------------------------
#install.packages("bezier")
#install.packages("gridBezier")
#install.packages("PolynomF")
#install.packages("rgl")
#install.packages("plot3D")
#Incluir las librerias requeridas
#--------------------------------
require(bezier)
require(gridBezier)
require(PolynomF)
require(rgl)
require(plot3D)
#FUNCIONES
#--------------------------------
#Funcion que calcula los puntos que tendra que un cuadrante en todo el espacio
#--------------------------------
calcularProfundidadMortero <- function(puntos,zMax, dX, dY, dZ, longitud){
finalDelCiclo <- FALSE
auxX= puntos[,1]
auxY= puntos[,2]
auxZ= puntos[,3]
nuevasCoordenadasX <- length(auxX) + 1
nuevasCoordenadasY <- 1
decrementoX <- dX
decrementoY <- dY
decrementoZ <- zMax - dZ
while(!finalDelCiclo){
if((auxX[nuevasCoordenadasY] - decrementoX) >= 0){
auxX[nuevasCoordenadasX] <- auxX[nuevasCoordenadasY] - decrementoX
}
if((auxY[nuevasCoordenadasY] - decrementoY) >= 0){
auxY[nuevasCoordenadasX] <- auxY[nuevasCoordenadasY] - decrementoY
}else
{
auxY[nuevasCoordenadasX] = 0
auxX[nuevasCoordenadasX] = auxX[nuevasCoordenadasX - 1]
}
auxZ[nuevasCoordenadasX] = decrementoZ
nuevasCoordenadasX = nuevasCoordenadasX + 1
nuevasCoordenadasY = nuevasCoordenadasY + 1
if(nuevasCoordenadasY == 101)
{
nuevasCoordenadasY = 1
if(nuevasCoordenadasX >= longitud){
decrementoZ = decrementoZ - 0.0000007
}
else if(nuevasCoordenadasX >= 19000){
decrementoZ = decrementoZ - 0.006
}
else if(nuevasCoordenadasX >= 18000){
decrementoZ = decrementoZ - 0.005
}
else if(nuevasCoordenadasX >= 17000){
decrementoZ = decrementoZ - 0.004
}
else if(nuevasCoordenadasX >= 16000){
decrementoZ = decrementoZ - 0.003
}
else if(nuevasCoordenadasX >= 15000){
decrementoZ = decrementoZ - 0.002
}
else if(nuevasCoordenadasX < 15000){
decrementoZ = decrementoZ - dZ
}
decrementoX = decrementoX + dX
decrementoY = decrementoY + dY
}
if(nuevasCoordenadasX == 40000){
finalDelCiclo <- TRUE
}
}
nuevosPuntos <- rbind(auxX, auxY, auxZ)
return (nuevosPuntos)
}
#Funcion que calcula los puntos de un mortero en base a otro usando splines
#--------------------------------
calcularPuntosNuevoMortero <- function(cuadrantePX, cuadrantePY, cuadrantepZ, dZ, zMax, longitud){
finalDelCiclo <- FALSE
nuevasCoordenadas <- 1
morteroNuevoX <- c()
morteroNuevoY <- c()
morteroNuevoZ <- c()
while(!finalDelCiclo){
auxX <- cuadrantePX[nuevasCoordenadas:(nuevasCoordenadas+99)]
auxY <- cuadrantePY[nuevasCoordenadas:(nuevasCoordenadas+99)]
auxXX <- c(auxX[1])
auxYY <- c(auxY[1])
auxSegundoCiclo <- sample(4:10,1)
contadorSegundoCiclo <- auxSegundoCiclo
while(TRUE){
if(contadorSegundoCiclo >=100){
break;
}
auxXX <- c(auxXX, auxX[contadorSegundoCiclo])
auxYY <- c(auxYY, auxY[contadorSegundoCiclo])
contadorSegundoCiclo <- contadorSegundoCiclo + auxSegundoCiclo
}
auxXX <- c(auxXX, auxX[100])
auxYY <- c(auxYY, auxY[100])
splineMortero <- spline(auxXX, auxYY)
morteroNuevoX <- c(morteroNuevoX, splineMortero$x)
morteroNuevoY <- c(morteroNuevoY, splineMortero$y)
auxZ <- 1
while (auxZ <= length(splineMortero$x)){
morteroNuevoZ <- c(morteroNuevoZ, zMax)
#Creo que aqui van los if de los limites para que de la curvatura de aabajo pero no estoy segurdo
auxZ <- auxZ + 1
}
zMax <- zMax - dZ
nuevasCoordenadas <- nuevasCoordenadas + 100
auxXX <- c()
auxYY <- c()
if((nuevasCoordenadas+99) > longitud){
finalDelCiclo <- TRUE
}
}
nuevoMortero <- rbind(morteroNuevoX, morteroNuevoY, morteroNuevoZ)
return (nuevoMortero)
}
#Funcion que calcula el error de los puntos respecto
#--------------------------------
calcularError <- function(zMax,zMin, cuadranteOriginal, cuadranteNuevo){
finDelCiclo <- FALSE
coordenadaZ <- zMax
distanciasOriginales <- c()
distanciasNuevas <- c()
errorRelativo <- c()
errorAbsoluto <- c()
cat("\tDistancia del punto Original\t Distancia del punto Nuevo \t Error absoluto\t Error Relativo\n")
cuadranteOriginalX <- cuadranteOriginal[1,]
cuadranteOriginalY <- cuadranteOriginal[2,]
cuadranteOriginalZ <- cuadranteOriginal[3,]
cuadranteNuevoX <- cuadranteNuevo[1,]
cuadranteNuevoY <- cuadranteNuevo[2,]
cuadranteNuevoZ <- cuadranteNuevo[3,]
for (i in 1:length(cuadranteOriginalX)) {
distanciasOriginales[i] <- sqrt((cuadranteOriginalX[i]^2)
+(cuadranteOriginalY[i]^2)
+(cuadranteOriginalZ[i]^2))
}
for(i in 1:length(cuadranteNuevoX)){
distanciasNuevas[i] <- sqrt((cuadranteNuevoX[i]^2)
+(cuadranteNuevoY[i]^2)
+(cuadranteNuevoZ[i]^2))
}
for (i in distanciasNuevas) {
errorAbsoluto[i] <-abs( distanciasOriginales[i] - distanciasNuevas[i] )
errorRelativo[i] <- abs( distanciasOriginales[i] - distanciasNuevas[i] )/distanciasOriginales[i]
cat("\t", distanciasOriginales[i],
"\t", distanciasNuevas[i],
"\t", errorAbsoluto[i],
"\t", errorRelativo[i],
"\n")
}
retorno <- rbind(distanciasOriginales,distanciasNuevas, errorAbsoluto, errorRelativo)
return(retorno)
}
#Funcion que grafica los cuadrantes que recibe
#--------------------------------
graficarFigura <- function(cuadrantePX, cuadrantePY, cuadrantePZ, cuadranteNX, cuadranteNY, color){
plot3d(cuadrantePX,cuadrantePY, cuadrantePZ, type = "l", lwd = 10, col = color,
xlab = "x", ylab="y", zlab="z", xlim = c(-10,10), ylim = c(-10,10) , zlim = c(0,5))
plot3d(cuadranteNX,cuadrantePY, cuadrantePZ, type = "l", lwd = 10, col = color,
xlab = "x", ylab="y", zlab="z", xlim = c(-10,10), ylim = c(-10,10) , zlim = c(0,5))
plot3d(cuadranteNX, cuadranteNY, cuadrantePZ, type = "l", lwd = 10, col = color,
xlab = "x", ylab="y", zlab="z", xlim = c(-10,10), ylim = c(-10,10) , zlim = c(0,5))
plot3d(cuadrantePX,cuadranteNY, cuadrantePZ, type = "l", lwd = 10, col = color,
xlab = "x", ylab="y", zlab="z", xlim = c(-10,10), ylim = c(-10,10) , zlim = c(0,5))
}
#Declaracion de puntos de control de un cuadrante
#--------------------------------
t <- seq(0,4, length=100)
coordenadasX <- c(0,0.99,4.78,8.91,9)
coordenadasY <- c(9,8.95,7.63,1.24,0)
coordenadasZ <- c(rep(4,5))
#Matriz con los puntos de control
p <- matrix(rbind(coordenadasX,coordenadasY,coordenadasZ),
nrow=5, ncol=3, byrow=TRUE)
puntosDeBezier <- bezier(t=t, p=p, deg =1)
#Se imprime la grafica del cuadrante vista desde arriba
#--------------------------------
xAux <- p[,1]
yAux <- p[,2]
p
plot(xAux, yAux, type = "l")
#Se calculan y grafican los cuadrantes del mortero
#--------------------------------
cuadrante <- calcularProfundidadMortero(puntosDeBezier, 4,0.02,0.02,0.01, 25000)
cuadrantePositivoX <- cuadrante[1,]
cuadrantePositivoY <- cuadrante[2,]
cuadrantePositivoZ <- cuadrante[3,]
cuadranteNegativoX=-1*cuadrantePositivoX
cuadranteNegativoY=-1*cuadrantePositivoY
graficarFigura(cuadrantePositivoX, cuadrantePositivoY,
cuadrantePositivoZ,cuadranteNegativoX,
cuadranteNegativoY, "blue")
#Se calcula y se grafica el grosor de las paredes del mortero
#--------------------------------
cuadrantePared <- calcularProfundidadMortero(puntosDeBezier, 4,0.002,0.002,0.001, 300)
cuadranteParedX <- cuadrantePared[1,]
cuadranteParedY <- cuadrantePared[2,]
cuadranteParedZ <- cuadrantePared[3,]
cuadranteParedNX=-1*cuadranteParedX
cuadranteParedNY=-1*cuadranteParedY
graficarFigura(cuadranteParedX, cuadranteParedY,
cuadranteParedZ,cuadranteParedNX,
cuadranteParedNY, "blue")
#Se calculan y grafican los cuadrantes del mortero interno
#--------------------------------
cuadranteInternoPositivoX <- cuadrante[1,]
cuadranteInternoPositivoY <- cuadrante[2,]
cuadranteInternoPositivoZ <- cuadrante[3,]
cuadranteInternoNegativoX=-1*cuadranteInternoPositivoX
cuadranteInternoNegativoY=-1*cuadranteInternoPositivoY
cuadranteInternoPositivoX <- cuadranteInternoPositivoX-1
cuadranteInternoPositivoY <- cuadranteInternoPositivoY-1
cuadranteInternoNegativoX <- cuadranteInternoNegativoX+1
cuadranteInternoNegativoY <- cuadranteInternoNegativoY+1
graficarFigura(cuadranteInternoPositivoX,cuadranteInternoPositivoY,
cuadranteInternoPositivoZ,cuadranteInternoNegativoX,
cuadranteInternoNegativoY, "gray")
#Se obtiene un nuevo mortero con los puntos del anterior y se grafica
#--------------------------------
cuadranteMorteroNuevo <- calcularPuntosNuevoMortero(cuadrante[1,],cuadrante[2,], cuadrante[3,],0.01, 4, 25000)
cuadranteMNX <- cuadranteMorteroNuevo[1,]
cuadranteMNY <- cuadranteMorteroNuevo[2,]
cuadranteMNZ <- cuadranteMorteroNuevo[3,]
cuadranteMNNX=-1*cuadranteMNX
cuadranteMNNY=-1*cuadranteMNY
graficarFigura(cuadranteMNX, cuadranteMNY,
cuadranteMNZ,cuadranteMNNX,
cuadranteMNNY, "green")
#Se calcula los errores en base a todos los puntos
#--------------------------------
distancias <- calcularError(4,1,cuadrante,cuadranteMorteroNuevo)
errorAbsolutoPromedio <- 0
errorRelativoPromedio <- 0
for(i in length(distancias[3,])){
errorAbsolutoPromedio <- errorAbsolutoPromedio + distancias[3,i]
errorRelativoPromedio <- errorRelativoPromedio + distancias[4,i]
}
errorAbsolutoPromedio <- errorAbsolutoPromedio/length(distancias[3,])
errorRelativoPromedio <- errorRelativoPromedio/length(distancias[4,])
cat("\n\n")
cat("Error Absoluto Promedio: ", errorAbsolutoPromedio, "\n")
cat("Error Relativo Promedio",errorRelativoPromedio,"\n")
|
ebd69c3f46a173c870fc9c68a062130e81034794
|
66cb7880ff3e2785bd4de737799d8e14e9d6b25e
|
/tester_file.R
|
0853af29e0712fa01e233378a17dac67591225fc
|
[] |
no_license
|
aeraposo/Data-440-Raposo
|
0dda5220b960caca4520468b797c2ebc925627cf
|
c3c48542cc5ff383da7f77b3540d00601131f233
|
refs/heads/master
| 2023-01-18T19:05:49.166979
| 2020-11-24T17:44:32
| 2020-11-24T17:44:32
| 289,048,044
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 23
|
r
|
tester_file.R
|
# trying to push to git
|
5d758f8761264ba471cafee4000d8097ea500817
|
da75298a0a888118873c387139df1496739ebccd
|
/ggplot2 example.R
|
a02b278f051412f41023f3447ab7e8ecc591e73b
|
[] |
no_license
|
AlonFriedman01/Visual-analytics-class
|
b791b37bcb94d27cd49b5db86ee567c127d78c8a
|
ebcc96054ce565265435be59d9f42aff2c1c4b46
|
refs/heads/master
| 2021-01-07T12:37:06.707025
| 2020-04-12T21:22:54
| 2020-04-12T21:22:54
| 241,695,186
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 504
|
r
|
ggplot2 example.R
|
df <- data.frame(
gp = factor(rep(letters[1:3], each = 10)),
y = rnorm(30)
)
ds <- plyr::ddply(df, "gp", plyr::summarise, mean = mean(y), sd = sd(y))
ggplot(df, aes(gp, y)) +
geom_point() +
geom_point(data = ds, aes(y = mean), colour = 'red', size = 3)
ggplot() +
geom_point(data = df, aes(gp, y)) +
geom_point(data = ds, aes(gp, mean), colour = 'red', size = 3) +
geom_errorbar(
data = ds,
aes(gp, mean, ymin = mean - sd, ymax = mean + sd),
colour = 'red',
width = 0.4
)
|
2decd098bb7be382010f71a63bae4aed8e320817
|
a508837184dbad17a408cfe8a0d066c8f7254ccc
|
/app.R
|
8466cc255ad3e9bd1d73eb70918c83e57f9674dd
|
[] |
no_license
|
usfviz/bckenstler-hw3
|
ea0c241b9d89c5285c4d21b0f08c20883cfe749e
|
e1ca5b6c6e311f9a5568ac63a9618a40714d509b
|
refs/heads/master
| 2021-01-19T22:59:12.771886
| 2017-04-27T06:14:11
| 2017-04-27T06:14:11
| 88,901,211
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,874
|
r
|
app.R
|
library(shiny)
library(ggplot2)
if(!"GGally" %in% installed.packages()) install.packages('GGally')
library(GGally)
library(MASS)
fb <- read.csv('data/dataset_Facebook.csv',sep = ';')
fb <- fb[fb$Total.Interactions!=6334,]
ui <- fluidPage(
titlePanel("FB Post Interactions and Times"),
fluidRow(
mainPanel(tabsetPanel(
tabPanel("Interactions vs. Post Times", plotOutput("scatterPlot")),
tabPanel("Correlation Between Interactions/Time", plotOutput("scatterPlot_matrix")),
tabPanel("Parallel Post Interactions and Time of Day", plotOutput("parcoords"))
)
)
)
,
fluidRow(
column(width = 6,
radioButtons(
inputId = 'type',
label = 'Type of Post',
choices = c("All" = -1,
"Photo" = "Photo",
"Link" = "Link",
"Video" = "Video",
"Status" = "Status"
)
)
),
column(width = 6,
radioButtons(
inputId = 'interaction',
label = 'Type of Interaction',
choices = c("Comment" = "comment",
"Like" = "like",
"Share" = "share"
)
)
)
)
)
server <- function(input, output) {
output$scatterPlot <- renderPlot({
type <- input$type
inter <- input$interaction
if(type !=-1){
indices <- fb$Type==type
}
else indices <- !vector(mode = "logical", length = length(fb$Type))
x <- fb$Post.Hour[indices]
if(inter=="comment") y <- fb$comment[indices]
else if(inter=="like") y <- fb$like[indices]
else y <- fb$share[indices]
total.interactions <- fb$Total.Interactions[indices]
post.type <- fb$Type[indices]
qplot(x, y, xlab='Hour of the Day', ylab='Number of Interactions', size=total.interactions,
color=post.type)
})
output$scatterPlot_matrix <- renderPlot({
type <- input$type
inter <- input$interaction
if(type !=-1){
indices <- fb$Type==type
}
else indices <- !vector(mode = "logical", length = length(fb$Type))
comments <- fb$comment[indices]
likes <- fb$like[indices]
shares <- fb$share[indices]
hour.of.day <- fb$Post.Hour[indices]
ggpairs(data.frame(comments, likes, shares, hour.of.day))
})
output$parcoords <- renderPlot({
type <- input$type
inter <- input$interaction
if(type !=-1){
indices <- fb$Type==type
}
else indices <- !vector(mode = "logical", length = length(fb$Type))
hour.of.day <- fb$Post.Hour[indices]
comments <- fb$comment[indices]
likes <- fb$like[indices]
shares <- fb$share[indices]
parcoord(data.frame(hour.of.day, comments, likes, shares), rainbow(length(comments)))
})
}
shinyApp(ui = ui, server = server)
|
6e4ff01334c88d6751e1c3ba1517cada9a85e44c
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/diffrprojects/inst/testfiles/dist_mat_absolute/libFuzzer_dist_mat_absolute/dist_mat_absolute_valgrind_files/1609961227-test.R
|
57c4125e502977a3c230bf79fdf20fc6db1c235a
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,359
|
r
|
1609961227-test.R
|
testlist <- list(x = c(-687865865L, -2097153L, -1L, -65536L, 0L, -1L, -393258L, -2049L, -536871026L, -1L, -10726L, 803602431L, -1L, -10726L, 805251109L, 239L, -1L, -1L, -2745809L, -56064L, 61439L, -16776961L, -10497L, -42L, -65495L, 30802099L, NA, -2097153L, -524321L, -702926849L, 1277100031L, -10726L, 805306367L, -10497L, -524321L, -1L, -1L, -2752512L, 15728639L, -16757505L, -1757185L, -1L, 692901427L, 858993663L, 1612328959L, -11785729L, -46004L, -524321L, -1L, -1L, 0L, 65535L, NA), y = c(1948863518L, 704643071L, -699262417L, 692857343L, -59351L, -2687199L, -2820097L, -6316129L, -1616928865L, NA, -2745601L, -2686977L, -65489L, -1L, 692846633L, -11733428L, 0L, 2414106L, 805306367L, -1L, -701287629L, 872414976L, 0L, 255L, -1L, -2687199L, -2820097L, -6316129L, -1616928865L, -539557889L, -1L, -42L, 441537023L, -1L, -1L, -15018449L, -56064L, 7799023L, -1L, -58625L, -10479057L, -56832L, -1L, -1L, -702927021L, 1163415552L, -10726L, 1280061267L, -2687190L, -108L, -1802201964L, -1L, -1L, 855638015L, 13565951L, -1L, -1L, 16777215L, -42L, -2049L, -536870913L, -1L, -16777216L, 255L, -1L, -100673793L, -524321L, -28929L, -1L, -2745809L, -436207617L, -1L, -2745809L, -14146304L, 61439L, -1L, -1L, -702926849L, -14352384L, 15728639L, 65535L, -2686977L, -10497L ))
result <- do.call(diffrprojects:::dist_mat_absolute,testlist)
str(result)
|
ff3067ba6a9dfc165bb7ac82e382ca25aaf5713f
|
03facaf304845a6bb4d1afa92026329a17e90f0c
|
/R/sisesat-class.R
|
61c5e43695ee56d117cf29a8d69e6b2565337cc8
|
[] |
no_license
|
imarpe/imarpe
|
acc7653ac5f96d4d469c69995efe174bb4671245
|
9e4a12157edebb3857143ab3fd1fda618c558b97
|
refs/heads/master
| 2021-04-06T11:44:10.705962
| 2020-08-06T01:45:29
| 2020-08-06T01:45:29
| 16,237,099
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 118
|
r
|
sisesat-class.R
|
# Main functions for the 'sisesat' class
# includes: a function to read data and create the object of class
# sisesat
|
5d348c0c30d476f45b85ed09e6ea082fd93432c2
|
3b9f656885e4822849d265a2e7b4476528ea3337
|
/R/idemo.FIM.R
|
69abbbd74deb4a8e292e554b66dc6066b56cee8b
|
[] |
no_license
|
cran/iDEMO
|
f31322c5e8c1680f84eb5320f7c1a87cad265570
|
ce4fcdc6adefc20177e18f434ff0bab3534bff7a
|
refs/heads/master
| 2020-05-17T11:12:52.543048
| 2012-06-06T00:00:00
| 2012-06-06T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,510
|
r
|
idemo.FIM.R
|
idemo.FIM <-
function( sEta, sB, sE, Time){
if( is.numeric(sE) & is.numeric(sEta) & is.numeric(sB) & is.numeric(Time)){
if( sEta>=0 & sB>=0 & sE>=0 && all(Time>0) ){
m <- length(Time)
Q <- matrix(rep(Time, length(Time)), ncol = length(Time))
Q[row(Q) > col(Q)] <- sort(Q[row(Q) < col(Q)])
O <- sB^2 * Q + sE^2 * diag( x=1, m, m )
S <- sEta^2 * Time %*% t(Time) + O
tSt <- c( t(Time) %*% solve(S) %*% Time )
tOt <- c( t(Time) %*% solve(O) %*% Time )
one_sEta2tOt <- c( 1 + sEta^2 * tOt )
tOQOt <- c( t(Time) %*% solve(O) %*% Q %*% solve(O) %*% Time )
tOOt <- c( t(Time) %*% solve(O) %*% solve(O) %*% Time )
QOQO <- Q %*% solve(O) %*% Q %*% solve(O)
tOQOQOt <- c( t(Time) %*% solve(O) %*% Q %*% solve(O) %*% Q %*% solve(O) %*% Time )
QOO <- Q %*% solve(O) %*% solve(O)
tOOQOt <- c( t(Time) %*% solve(O) %*% solve(O) %*% Q %*% solve(O) %*% Time )
OO <- solve(O) %*% solve(O)
tOOOt <- c( t(Time) %*% solve(O) %*% solve(O) %*% solve(O) %*% Time )
if(sEta!=0 && sB!=0 && sE!=0){
FI <- matrix( 0, 4, 4 )
FI[1,1] <- tSt
FI[2,2] <- (tOt)^2 / ( 2 * one_sEta2tOt^2 )
FI[2,3] <- FI[3,2] <- tOQOt / ( 2 * one_sEta2tOt^2 )
FI[2,4] <- FI[4,2] <- tOOt / ( 2 * one_sEta2tOt^2 )
FI[3,3] <- sum( diag(QOQO) ) / 2 + sEta^4 * tOQOt^2 / ( 2 * one_sEta2tOt^2 ) -
sEta^2 * tOQOQOt / one_sEta2tOt
FI[3,4] <- FI[4,3] <- sum( diag(QOO) ) / 2 + sEta^4 * tOQOt * tOOt / ( 2 * one_sEta2tOt^2 ) -
sEta^2 * tOOQOt / one_sEta2tOt
FI[4,4] <- sum( diag(OO) ) / 2 + sEta^4 * tOOt^2 / ( 2 * one_sEta2tOt^2 ) -
sEta^2 * tOOOt / one_sEta2tOt
return(FI)
}
if(sEta!=0 && sB==0 && sE!=0){
FI <- matrix( 0, 3, 3 )
tSSt <- c( t(Time) %*% solve(S) %*% solve(S) %*% Time )
SS <- solve(S) %*% solve(S)
FI[1,1] <- tSt
FI[2,2] <- tSt^2 / 2
FI[2,3] <- FI[3,2] <- tSSt / 2
FI[3,3] <- sum( diag(SS) ) / 2
return(FI)
}
if(sEta==0 && sB!=0 && sE==0){
Q.inv <- matrix( 0, m, m )
a <- numeric( length = m )
a[1] <- 1 / Time[1]
for(i in 2:m) a[i] <- 1 / ( Time[i] - Time[i-1] )
for(i in 2:(m-1)) Q.inv[ i, c( i-1, i, i+1 ) ] <- c( -a[i], a[i]+a[i+1], -a[i+1] )
Q.inv[ 1, c( 1, 2 ) ] <- c( a[1]+a[2], -a[2] )
Q.inv[ m, c( m-1, m ) ] <- c( -a[m], a[m] )
tQt <- c( t(Time) %*% Q.inv %*% Time )
FI <- matrix( 0, 2, 2 )
FI[1,1] <- tQt / sB^2
FI[2,2] <- m / ( 2 * sB^4 )
return(FI)
}
if(sEta!=0 && sB!=0 && sE==0){
tSQSt <- c( t(Time) %*% solve(S) %*% Q %*% solve(S) %*% Time )
QSQS <- Q %*% solve(S) %*% Q %*% solve(S)
FI <- matrix( 0, 3, 3 )
FI[1,1] <- tSt
FI[2,2] <- (tSt)^2 / 2
FI[2,3] <- FI[3,2] <- tSQSt / 2
FI[3,3] <- sum( diag(QSQS) ) / 2
return(FI)
}
if(sEta==0 && sB==0 && sE!=0){
tOt <- c( t(Time) %*% diag(m) %*% Time / sE^2 )
FI <- matrix( 0, 2, 2 )
FI[1,1] <- tOt
FI[2,2] <- m / ( 2 * sE^4 )
return(FI)
}
if(sEta==0 && sB!=0 && sE!=0){
FI <- matrix( 0, 3, 3 )
FI[1,1] <- tOt
FI[2,2] <- sum( diag(QOQO) ) / 2
FI[2,3] <- FI[3,2] <- sum( diag(QOO) ) / 2
FI[3,3] <- sum( diag(OO) ) / 2
return(FI)
}
}else{
cat("The parameter and measuring time should not be negative and should be a positive vector, respectively!","\n")
}
}else{
cat("The parameter and measuring time should not be negative and should be a positive vector, respectively!","\n")
}
}
|
4315c97d7db049a63dd8e82ac5ea86d5574b871a
|
d63a0723923d0574d4f0218f3e48238666b4c479
|
/man/project_wham.Rd
|
6517dde53d3e364f3aa5af6205a730b72a31a888
|
[] |
no_license
|
ChristineStawitz-NOAA/wham
|
5bf34060ec6508e54c4eb943b55f7153cc1b0b59
|
a7dfd52390b1f5e35a0270edb00d9fc86c4f8773
|
refs/heads/master
| 2020-11-27T06:19:17.368027
| 2019-12-11T20:45:48
| 2019-12-11T20:45:48
| 229,336,441
| 1
| 0
| null | 2019-12-20T21:07:11
| 2019-12-20T21:07:10
| null |
UTF-8
|
R
| false
| true
| 5,837
|
rd
|
project_wham.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/project_wham.R
\name{project_wham}
\alias{project_wham}
\title{Project a fit WHAM model}
\usage{
project_wham(model, proj.opts = list(n.yrs = 3, use.last.F = TRUE,
use.avg.F = FALSE, use.FXSPR = FALSE, proj.F = NULL, proj.catch = NULL,
avg.yrs = NULL, cont.Ecov = TRUE, use.last.Ecov = FALSE, avg.Ecov.yrs =
NULL, proj.Ecov = NULL), n.newton = 3, do.sdrep = TRUE)
}
\arguments{
\item{model}{a previously fit wham model}
\item{proj.opts}{a named list with the following components:
\itemize{
\item \code{$n.yrs} (integer), number of years to project/forecast. Default = \code{3}.
\item \code{$use.last.F} (T/F), use terminal year F for projections. Default = \code{TRUE}.
\item \code{$use.FXSPR} (T/F), calculate F at X% SPR for projections.
\item \code{$proj.F} (vector), user-specified fishing mortality for projections. Length must equal \code{n.yrs}.
\item \code{$proj.catch} (vector), user-specified aggregate catch for projections. Length must equal \code{n.yrs}.
\item \code{$avg.yrs} (vector), specify which years to average over for calculating reference points. Default = last 5 model years, \code{tail(model$years, 5)}.
\item \code{$cont.Ecov} (T/F), continue Ecov process (e.g. random walk or AR1) for projections. Default = \code{TRUE}.
\item \code{$use.last.Ecov} (T/F), use terminal year Ecov for projections.
\item \code{$avg.Ecov.yrs} (vector), specify which years to average over the environmental covariate(s) for projections.
\item \code{$proj.Ecov} (matrix), user-specified environmental covariate(s) for projections. \code{n.yrs} rows.
}}
\item{n.newton}{integer, number of additional Newton steps after optimization. Passed to \code{\link{fit_tmb}}. Default = \code{0} for projections.}
\item{do.sdrep}{T/F, calculate standard deviations of model parameters? See \code{\link[TMB]{sdreport}}. Default = \code{TRUE}.}
}
\value{
a projected WHAM model with additional output if specified:
\describe{
\item{\code{$rep}}{List of derived quantity estimates (see examples)}
\item{\code{$sdrep}}{Parameter estimates (and standard errors if \code{do.sdrep=TRUE})}
\item{\code{$peels}}{Retrospective analysis (if \code{do.retro=TRUE})}
\item{\code{$osa}}{One-step-ahead residuals (if \code{do.osa=TRUE})}
}
}
\description{
Provides projections/forecasts for an existing (already fit) WHAM model.
}
\details{
WHAM implements five options for handling fishing mortality in the projections.
Exactly one of these must be specified in \code{proj.opts}:
\itemize{
\item Use last year F (default). Set \code{proj.opts$use.last.F = TRUE}. WHAM will use F in the terminal model year for projections.
\item Use average F. Set \code{proj.opts$use.avg.F = TRUE}. WHAM will use F averaged over \code{proj.opts$avg.yrs} for projections (as is done for M-, maturity-, and weight-at-age).
\item Use F at X% SPR. Set \code{proj.opts$use.FXSPR = TRUE}. WHAM will calculate F at X% SPR.
\item Specify F. Provide \code{proj.opts$proj.F}, an F vector with length = \code{n.yrs}.
\item Specify catch. Provide \code{proj.opts$proj.catch}, a vector of aggregate catch with length = \code{n.yrs}. WHAM will calculate F to get specified catch.
}
\code{proj.opts$avg.yrs} controls which years will be averaged over in the projections.
The following quantities are averaged:
\itemize{
\item Maturity-at-age
\item Weight-at-age
\item Natural mortality-at-age
\item Fishing mortality-at-age (if \code{proj.opts$use.avgF = TRUE})
}
WHAM implements four options for handling the environmental covariate(s) in the projections.
Exactly one of these must be specified in \code{proj.opts} if \code{Ecov} is in the model:
\describe{
\item{(Default) Continue Ecov process model (e.g. random walk, AR1)}{Set \code{$cont.Ecov = TRUE}. WHAM will estimate the Ecov process in projection years (i.e. continue the random walk / AR1 process).}
\item{Use last year Ecov(s)}{Set \code{$use.last.Ecov = TRUE}. WHAM will use Ecov value from the terminal year (of population model) for projections.}
\item{Use average Ecov(s)}{Provide \code{$avg.yrs.Ecov}, a vector specifying which years to average over the environmental covariate(s) for projections.}
\item{Specify \code{Ecov}}{Provide \code{$proj.Ecov}, a matrix of user-specified environmental covariate(s) to use for projections. Dimensions must be # projection years (\code{proj.opts$n.yrs}) x # Ecovs (\code{ncols(Ecov$mean)}).}
}
If the original model fit the Ecov in years beyond the population model, WHAM will use the already-fit
Ecov values for the projections. If the Ecov model extended at least \code{proj.opts$n.yrs} years
beyond the population model, then none of the above need be specified.
}
\examples{
\dontrun{
data("SNEMA_ytl") # load SNEMA yellowtail flounder data and parameter settings
mod <- fit_wham(input) # using default values (do.proj=T)
mod2 <- fit_wham(input, do.retro=F, do.osa=F, do.proj=F) # fit model without projections, retro analysis, or OSA residuals
mod_proj <- project_wham(mod2) # add projections to previously fit model, using default values: use.lastF = TRUE, n.yrs = 3, avg.yrs = last 5 years
names(mod_proj$rep) # list of derived quantities
tail(mod_proj$rep$SSB, 3) # get 3-year projected SSB estimates (weight, not numbers)
x = summary(mod_proj$sdrep)
unique(rownames(x))) # list of estimated parameters and derived quanitites with SE
x = x[rownames(x) == "log_SSB",] # SSB estimates with SE
ssb.mat = exp(cbind(x, x[,1] + qnorm(0.975)*cbind(-x[,2],x[,2])))/1000 # calculate 95\% CI
colnames(ssb.mat) <- c("SSB","SSB_se","SSB_lower","SSB_upper")
tail(ssb.mat, 3) # 3-year projected SSB estimates with SE and 95\% CI
}
}
\seealso{
\code{\link{fit_wham}}, \code{\link{fit_tmb}}
}
|
6d894cadd56f5fe892653086a35e45a3be5bb719
|
9c035edc307844bf39839ed7275e4aaf8d7932bb
|
/plot2.R
|
3c80e767ba9d1cda8e6714b1f5a9e9e26205e1a5
|
[] |
no_license
|
adeelbaba/ExData_Plotting1
|
c9e6b6f3882cd1b5bb4a529c6937514d01bad914
|
cbd91f3655161028baf9f0d5c53da63337a0e53c
|
refs/heads/master
| 2021-01-18T06:56:59.660344
| 2014-06-08T22:02:13
| 2014-06-08T22:02:13
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 393
|
r
|
plot2.R
|
# Read data
PWR=read.table("~/Downloads/household_power_consumption.txt", sep=";", header=T,na.strings="?")
PWR$DateTime <- strptime(paste(PWR$Date, PWR$Time), "%d/%m/%Y %H:%M:%S")
PWR=subset(PWR, as.Date(DateTime) >= as.Date("2007-02-01") & as.Date(DateTime) <= as.Date("2007-02-02"))
plot(PWR$DateTime, PWR$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
|
62e295b65fceaf55b2e56913c9c7115fc5e4eb97
|
891963b0d2ff7c4a75cee11eac79b3790e0e3410
|
/notes/example_script.R
|
3048f9556ba1dd93ab34a18cd8d841271777d944
|
[] |
no_license
|
gbearden/r_course_evans_school
|
1665969e71a47e9bbe00bc5b0834e20e8fc17b9e
|
093f128348081de9382ec9948cc5e82922bc3c9f
|
refs/heads/master
| 2022-10-26T03:25:26.929943
| 2022-10-25T23:54:33
| 2022-10-25T23:54:33
| 111,721,217
| 1
| 3
| null | 2022-10-25T23:54:34
| 2017-11-22T18:52:50
|
HTML
|
UTF-8
|
R
| false
| false
| 192
|
r
|
example_script.R
|
# --------------
# Example script
# --------------
# Load library
library(tidyverse)
# Import data
airbnb <- read_csv('https://bit.ly/3oadz2L')
# Explore data
head(airbnb)
summary(airbnb)
|
6d36562ea0a64ec5f25e85bce75eb5699eb2afef
|
db3a029c4711f7bb2b469d61b3a117687f936161
|
/Code/poolmeans.R
|
b21bad82f465929a2c59d1e94a9a2cdd0fdacaa5
|
[] |
no_license
|
KatiePelletier/WingShapeBSA
|
455d0f6750c3ca62aae6b6044d84412ab4909eeb
|
42913eb33502269b74c4728f41de18c3203b418c
|
refs/heads/master
| 2023-02-24T09:56:51.615109
| 2023-02-14T14:32:03
| 2023-02-14T14:32:03
| 236,039,708
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,863
|
r
|
poolmeans.R
|
poolwings <- read.csv("../Data/selectedshape_75tails_ds.csv", stringsAsFactors = T)
str(poolwings)
source('~/Dropbox/DworkinLabSharedMaterial/scripts/WINGPLOTSOURCE.R', chdir = TRUE)
selvec <- read.csv( "../Data/seldict_vectors.csv" )
str( selvec )
WingPlot(as.matrix(colMeans(poolwings[,10:105])))
#Houle wings are way bigger (specifically, 100x bigger) than ours. This plot looks funny. Mean center data?
WingEffect( meanshape=as.matrix( colMeans( poolwings[,10:105] ) ), effectplus=as.matrix( selvec[1,3:98]/100 ),
effectminus=as.matrix( selvec[1,3:98]/100), winglabel=paste(selvec[1,1]),
scale.factor=1, wingcol=c("black", "blue", "red") )
ds_vec <- as.matrix(selvec[1,3:98])
vec2 <- read.csv("../Data/newdict_vectors_uncorrelated_to_ds.csv")
neur_vec <- vec2[5,2:97]
norm_vec <- function(x) sqrt(sum(x^2))
#need to get mean shape of each pool
cmo14.left <- colMeans(filter(poolwings %>%
filter(pop_yr == "cmo14") %>%
filter(Tail == "Left") %>%
dplyr::select(x1:y48)
))
cmo14.right <- colMeans(filter(poolwings %>%
filter(pop_yr == "cmo14") %>%
filter(Tail == "Right") %>%
dplyr::select(x1:y48)
))
cmo14.PD <- norm_vec(as.matrix(cmo14.left - cmo14.right))
#0.03328883
cmo14.PD
pho14.left <- colMeans(filter(poolwings %>%
filter(pop_yr == "pho14") %>%
filter(Tail == "Left") %>%
dplyr::select(x1:y48)
))
pho14.right <- colMeans(filter(poolwings %>%
filter(pop_yr == "pho14") %>%
filter(Tail == "Right") %>%
dplyr::select(x1:y48)
))
pho14.PD <- norm_vec(as.matrix(pho14.left - pho14.right))
#0.03618959
pho14.PD
fvw14.left <- colMeans(filter(poolwings %>%
filter(pop_yr == "fvw14") %>%
filter(Tail == "Left") %>%
dplyr::select(x1:y48)
))
fvw14.right <- colMeans(filter(poolwings %>%
filter(pop_yr == "fvw14") %>%
filter(Tail == "Right") %>%
dplyr::select(x1:y48)
))
fvw14.PD <- norm_vec(as.matrix(fvw14.left - fvw14.right))
#0.04126227
fvw14.PD
fvw12.left <- colMeans(filter(poolwings %>%
filter(pop_yr == "fvw12") %>%
filter(Tail == "Left") %>%
dplyr::select(x1:y48)
))
fvw12.right <- colMeans(filter(poolwings %>%
filter(pop_yr == "fvw12") %>%
filter(Tail == "Right") %>%
dplyr::select(x1:y48)
))
fvw12.PD <- norm_vec(as.matrix(fvw12.left - fvw12.right))
#0.03966965
fvw12.PD
#I want an avg of all for of these
#0.03760258
mean(c(cmo14.PD, pho14.PD, fvw12.PD, fvw14.PD))
cor((fvw12.left - fvw12.right), ds_vec[1,]) #0.9187666
cor((fvw14.left - fvw14.right), ds_vec[1,]) #0.9029401
cor((pho14.left - pho14.right), ds_vec[1,]) #0.7881412
cor((cmo14.left - cmo14.right), ds_vec[1,]) #0.9349219
#plotting means vs eachother
#Actually looks pretty legit?
WingPlot(cmo14.left, wingcol="black")
WingPlot(cmo14.right, wingcol="red", add=T)
WingPlot(pho14.left, wingcol="black")
WingPlot(pho14.right, wingcol="red", add=T)
WingPlot(fvw14.left, wingcol="black")
WingPlot(fvw14.right, wingcol="red", add=T)
WingPlot(fvw12.left, wingcol="black")
WingPlot(fvw12.right, wingcol="red", add=T)
#now for the neur pools
neurwings <- read.csv("../Data/selectedshape_75tails_neur.csv")
neur.cmo14.left <- colMeans(filter(neurwings %>%
filter(pop_yr == "cmo14") %>%
filter(tail == "L") %>%
dplyr::select(x1:y48)
))
neur.cmo14.right <- colMeans(filter(neurwings %>%
filter(pop_yr == "cmo14") %>%
filter(tail == "R") %>%
dplyr::select(x1:y48)
))
neur.cmo14.PD <- norm_vec(as.matrix(neur.cmo14.left - neur.cmo14.right))
#0.02748431
neur.cmo14.PD
neur.pho14.left <- colMeans(filter(neurwings %>%
filter(pop_yr == "pho14") %>%
filter(tail == "L") %>%
dplyr::select(x1:y48)
))
neur.pho14.right <- colMeans(filter(neurwings %>%
filter(pop_yr == "pho14") %>%
filter(tail == "R") %>%
dplyr::select(x1:y48)
))
neur.pho14.PD <- norm_vec(as.matrix(neur.pho14.left - neur.pho14.right))
#0.02863765
neur.pho14.PD
neur.fvw14.left <- colMeans(filter(neurwings %>%
filter(pop_yr == "fvw14") %>%
filter(tail == "L") %>%
dplyr::select(x1:y48)
))
neur.fvw14.right <- colMeans(filter(neurwings %>%
filter(pop_yr == "fvw14") %>%
filter(tail == "R") %>%
dplyr::select(x1:y48)
))
neur.fvw14.PD <- norm_vec(as.matrix(neur.fvw14.left - neur.fvw14.right))
#0.04126227
neur.fvw14.PD
neur.fvw12.left <- colMeans(filter(neurwings %>%
filter(pop_yr == "fvw12") %>%
filter(tail == "L") %>%
dplyr::select(x1:y48)
))
neur.fvw12.right <- colMeans(filter(neurwings %>%
filter(pop_yr == "fvw12") %>%
filter(tail == "R") %>%
dplyr::select(x1:y48)
))
neur.fvw12.PD <- norm_vec(as.matrix(neur.fvw12.left - neur.fvw12.right))
#0.03816062
neur.fvw12.PD
#I want an avg of all for of these
#0.03232756
mean(c(neur.cmo14.PD, neur.pho14.PD, neur.fvw12.PD, neur.fvw14.PD))
#plotting means vs eachother
#Actually looks pretty legit?
WingPlot(neur.cmo14.left, wingcol="black")
WingPlot(neur.cmo14.right, wingcol="red", add=T)
WingPlot(neur.pho14.left, wingcol="black")
WingPlot(neur.pho14.right, wingcol="red", add=T)
WingPlot(neur.fvw14.left, wingcol="black")
WingPlot(neur.fvw14.right, wingcol="red", add=T)
WingPlot(neur.fvw12.left, wingcol="black")
WingPlot(neur.fvw12.right, wingcol="red", add=T)
cor((neur.fvw12.left - neur.fvw12.right), t(as.matrix(neur_vec[1,]))) #0.7924834
cor(as.matrix(neur.fvw14.left - neur.fvw14.right), t(neur_vec[1,])) #0.8014295
cor((neur.pho14.left - neur.pho14.right), t(neur_vec[1,])) #0.5859181
cor((neur.cmo14.left - neur.cmo14.right), t(neur_vec[1,]))#0.8329623
#do this
# library(geomorph)
# cord <- as.matrix(poolwings[,10:105])
# shape <- arrayspecs(cord, 48, 2)
# gdf <- geomorph.data.frame(shape = shape,
# CS = poolwings$CSize,
# pop = poolwings$pop,
# pop_year = poolwings$pop_yr,
# sex = poolwings$sex,
# ind = poolwings$ind,
# tail = poolwings$Tail)
#
#
#
#
# #I want to model shape change and then use pairwise to look at diffrences between pools to get an estimate of PD.
#
# mod <- procD.lm(shape ~ CS*pop_year*sex + tail, data = gdf)
#
# #why no third order interactions? Not allowed in geomorph?
# summary(mod)
#
# anova(mod)
#
#
# pair <- pairwise(mod, groups = poolwings$Tail)
#
# #0.03627186
# #About the same as what I saw before.
# #There must be lots of variance in the data that makes the means look diffrent like this.
# summary(pair, test.type = "dist")
|
1328273827e9b2c2016873abebc5d4e2adc46d88
|
4694cbed3552993d8e82c33524f35da8311a3f81
|
/tests/testthat.R
|
840bb559ca874bceeb21bef9130ffb55f467b0d4
|
[] |
no_license
|
RRMaximiliano/constitucionesNic
|
14ac58974fc6d1dab9490c879bf23ea0733b6cfc
|
2f6ba4269ea5b17f3aa0cc290326096e5dcd6e48
|
refs/heads/master
| 2023-05-02T14:50:56.680399
| 2021-05-30T08:14:49
| 2021-05-30T08:14:49
| 372,141,792
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 78
|
r
|
testthat.R
|
library(testthat)
library(constitucionesNic)
test_check("constitucionesNic")
|
b8358b4ff119ab851fc0fbed13db8bc987cec9b8
|
e50ebfa49c005977a7364c9f428d9d8a785439de
|
/R_scripts/script_2_alkaloids_Sorensen_and_select_in_ants.R
|
87f6e13462f88fd3391cccf1c3301834fe4cd257
|
[] |
no_license
|
ivanprates/2019_gh_pumilio
|
fe9805207a9915ef88b0fee3e44ef254ab48c0ad
|
3d3fb253ac11258c21c5f687c05215b166796e2b
|
refs/heads/master
| 2020-06-04T10:45:09.997715
| 2019-11-22T16:13:10
| 2019-11-22T16:13:10
| 191,989,471
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 10,335
|
r
|
script_2_alkaloids_Sorensen_and_select_in_ants.R
|
############################################
## R script by Ivan Prates (ivanprates.org), Smithsonian National Museum of Natural History, Washington, DC, USA, June 2019.
## This script does the following:
## Estimates a Sorensen dissimilarity matrix for the the complete Oophaga pumilio alkaloid dataset (both for individual alkaloids and alkaloid classes).
## Selects only those alkaloids and alkaloid classes known to occur in ants, and then estimates a Sorensen dissimilarity matrix.
## Estimates a matrix of geographic distances between sites sampled for Oophaga pumilio alkaloids.
## Assigns Oophaga pumilio cytochrome B sequences to sites sampled for alkaloids (as informed by Voronoi polygons estimated in ArcGIS).
## PART 1: Getting ready:
## Installing packages:
#install.packages("fossil")
#install.packages("magrittr")
#install.packages("plyr")
#install.packages("reshape2")
## Loading packages:
library(fossil)
library(magrittr)
library(plyr)
library(reshape2)
## Selecting path depending on which computer I'm using:
path = "~/Dropbox/2018_pumilio/2018-07/"
## Creating folders to save the files we'll generate:
dir.create(paste0(path, "DATA_alkaloids_in_ants"))
dir.create(paste0(path, "DATA_dissimilarity_matrices"))
## Selecting alkaloid data type (i.e., individual alkaloids versus alkaloid classes):
data_type = "alkaloid"
#data_type = "alkaloid_class"
## PART 2: Estimating a Sorensen dissimilarity matrix for the alkaloid data:
## Importing presence/absence matrix of alkaloid data:
input_data = read.csv(file = paste0(path, "DATA_alkaloids_saporito2007/2018-07_presabs_", data_type, ".csv"), header = TRUE, row.names = 1) # read presence/abscence matrix
alk_data = input_data[, 6:dim(input_data)[2]] # Selecting columns corresponding to alkaloid data
rownames(alk_data) = input_data$alk_siteID
## Transposing thius matrix as required by the fossil package for estimation of a distance (Sorensen) matrix:
alk_data = t(alk_data)
alk_data
## Estimating a Sorensen distance matrix for the alkaloid data:
pair_sorensen = ecol.dist(alk_data, method = sorenson, type = "dis")
pair_sorensen = as.matrix(pair_sorensen)
pair_sorensen
write.csv(pair_sorensen, file = paste0(path, "DATA_dissimilarity_matrices/2018-07_Sorensen_", data_type, ".csv")) # Writing this matrix as a csv file.
## PART 3: Selecting only those alkaloids from alkaloid classes known to occur in ants:
## Converting a presence/absence matrix to a list:
input_data = read.csv(file = paste0(path, "DATA_alkaloids_saporito2007/2018-07_presabs_", data_type, ".csv"), header = TRUE, row.names = 1) # reads presence/abscence matrix
molten = melt(input_data, id.vars = c("saporito_siteID", "latitude", "longitude", "alk_siteID", "Nskins"))
molten$value
colnames(molten) # lists column names
only_presence = molten[molten$value == 1,]
only_presence = only_presence[, -7]
only_presence$variable %<>% gsub(pattern = "X", replacement = "") # Eliminating the "X" that R adds to headers when they're numbers.
write.csv(only_presence, file = paste0(path, "DATA_alkaloids_saporito2007/2018-07_list_", data_type, ".csv"), row.names = FALSE) # Saving.
## Importing list of alkaloids known to occur in ants (from literature review) to act as criteria for alkaloid selection:
criteria_list = read.csv(file = paste0(path, "DATA_alkaloid_bearing_ants/2018-07_list_only_alkaloids_found_in_ants_including_tricyclics.csv"), header = TRUE)
## Creating a list that will serve as a 'mask' to select only those alkaloids under criteria:
criteria = criteria_list[, colnames(criteria_list) == data_type]
length(unique(criteria))
## Using this mask, extract only those alkaloids (or alkaloid classes) under criteria:
selected = only_presence[only_presence$variable %in% criteria, ]
length(selected$variable)
## Saving final alkaloid list:
write.csv(selected, file = paste0(path, "DATA_alkaloids_in_ants/2018-07_list_", data_type, "_in_ants_including_tricyclics.csv"))
## Converting from list to presence/absence matrix:
matrix = melt(selected, id.var = "variable", measure.var = "alk_siteID")
presabs = with(matrix, table(variable, value))
presabs = t(presabs)
write.csv(presabs, file = paste0(path, "DATA_alkaloids_in_ants/2018-07_presabs_", data_type, "_in_ants_including_tricyclics.csv"))
## Transposing matrix as required by the fossil package for estimation of a distance matrix:
presabs = t(presabs)
presabs
## Estimating a Sorensen distance matrix:
pair_Sorensen = ecol.dist(presabs, method = sorenson, type = "dis")
pair_Sorensen = as.matrix(pair_Sorensen)
pair_Sorensen
write.csv(pair_Sorensen, file = paste0(path, "DATA_dissimilarity_matrices/2018-07_Sorensen_", data_type, "_in_ants_including_tricyclics.csv")) # Saving.
## PART 4: Estimating a matrix of geographic distances between sites sampled for Oophaga pumilio alkaloids:
## Importing site data:
sites = read.csv(file = paste0(path, "DATA_alkaloids_saporito2007/2018-07_sites_46pixels_1km-pixels_corrected_for_land.csv"), header = TRUE, row.names = 1)
lat_long = sites[, 2:3] # Column order matters: Has to be longitude, latitude
lat_long
## Estimating geo dist matrix:
geo = earth.dist(lat_long, dist = FALSE) # geo dist matrix
colnames(geo) = input_data$alk_siteID
rownames(geo) = input_data$alk_siteID
geo
write.csv(geo, file = paste0(path, "DATA_dissimilarity_matrices/2018-07_geographic_distance_matrix.csv")) # Saving.
## PART 5. Assigning cytochrome B sequences to sites sampled for alkaloids:
## Importing genetic data and locality data for matching:
gen_data = read.csv(file = paste0(path, "DATA_genetics/Hauswaldt_et_al_2011_data/pumilio_cytB_Hauswaldt_et_al_2011.csv"), header = TRUE) # Reads genetic data.
sites = read.csv(file = paste0(path, "DATA_alkaloids_saporito2007/2018-07_sites_46pixels_1km-pixels_corrected_for_land.csv"), header = TRUE, row.names = 1)
## Creating an empty list to populate with sequence data:
seq_list = vector("list", length(sites$gen_siteID)) # It will have the same length as the number of alkaloid sites (n = 46).
names(seq_list) = sites$alk_siteID # Changing names.
names(seq_list)
## Loop: Matching DNA sequences with sites sampled for alkaloids (this was informed by Voronoi polygons in ArcGIS):
for(i in 1:nrow(sites)){
gen_siteID = sites$gen_siteID[i] # Picking the name of the site sampled for genetics.
seq_list[[i]] = gen_data$cytb_seq[gen_data$gen_siteID == gen_siteID] # Selecting sequences that match sites sampled for alkaloids.
} # Done
pre_alignment = ldply(seq_list, data.frame) # Converting list into a data frame.
colnames(pre_alignment) = c("alk_siteID", "cytb_seq") # Changing column names.
## Now we'll name individual sequences matching the corresponding sites sampled for alkaloids.
add_ids = vector("list", length(sites$gen_siteID)) # creating empty vector to receive individual number.
## Now preparing alignments:
## Option 1: Fasta format:
## Loop: Changing names of sequences to add individual number and ">" in preparation for fasta file:
for(i in 1:nrow(sites)){
temp_slot = paste0(">", unique(pre_alignment$alk_siteID)[i], "_", 1:table(pre_alignment$alk_siteID)[i]) # Creating numbers for each individual sampled for DNA.
add_ids[[i]] = temp_slot # We're using a temporary object to store the names.
rm(temp_slot) # Removing temporary object.
} # Done.
add_ids = ldply(add_ids, data.frame) # Converting list into a data frame.
pre_alignment$alk_siteID = add_ids[, 1] # Replacing alkaloid site ID to include also individual sequence names.
colnames(pre_alignment) = c("sample", "cytb_seq") # Changing column names.
pre_alignment = pre_alignment[, 1:2] # Using first (individual name) and second (DNA sequence) columns to make alignment.
# Saving as a final alignment:
write.table(pre_alignment, file = paste0(path, "DATA_genetics/pumilio_cytb_matching_alkaloid_sites.fasta"), row.names = FALSE, quote = FALSE, sep = "\n", eol = " \n", col.names = FALSE)
## Option 2: Mega format:
## Loop: Changing names of sequences to add individual number and ">" in preparation for fasta file:
for(i in 1:nrow(sites)){
temp_slot = paste0("#", unique(pre_alignment$alk_siteID)[i], "_", 1:table(pre_alignment$alk_siteID)[i], "{", unique(pre_alignment$alk_siteID)[i], "}") # Creating numbers for each individual sampled for DNA.
add_ids[[i]] = temp_slot # We're using a temporary object to store the names.
rm(temp_slot) # Removing temporary object.
} # Done.
add_ids = ldply(add_ids, data.frame) # Converting list into a data frame.
pre_alignment$alk_siteID = add_ids[, 1] # Replacing alkaloid site ID to include also individual sequence names.
colnames(pre_alignment) = c("sample", "cytb_seq") # Changing column names.
pre_alignment = pre_alignment[, 1:2] # Using first (individual name) and second (DNA sequence) columns to make alignment.
## Saving as a final alignment:
write.table(pre_alignment, file = paste0(path, "DATA_genetics/pumilio_cytb_matching_alkaloid_sites.meg"), row.names = FALSE, quote = FALSE, sep = "\n", eol = " \n", col.names = FALSE)
## Add Mega header manually to the output file, as follows:
## "#Mega"
## "!Title pumilio_cytb_matching_alkaloid_sites;"
## Making the genetic distance matrix generated in MEGA symetrical for use in MMRR:
## Reading half-matrix output by MEGA:
half_matrix = read.csv(file = "C:/Users/RhinellaX/Dropbox/Science/MYPAPERS_ongoing/2018_pumilio/2018-07/DATA_genetics/pumilio_cytb_dist_p-distances_half-matrix.csv", header = TRUE, row.names = 1)
dim(half_matrix)
## Creating a square matrix with the dimensions of the final (symetrical) matrix:
full_matrix = matrix(data = 0, nrow = nrow(half_matrix), ncol = ncol(half_matrix))
dim(full_matrix)
## Making new matrix extract upper half from half matrix:
full_matrix[lower.tri(full_matrix)] = half_matrix[lower.tri(half_matrix)]
full_matrix == half_matrix # Checking whether diagonals match.
full_matrix[upper.tri(half_matrix)] = t(half_matrix)[upper.tri(t(half_matrix))]
isSymmetric(full_matrix) # Check: Did it work (i.e., is matrix now symetrical)?
## Adding column and row names:
row.names(full_matrix) = row.names(half_matrix)
colnames(full_matrix) = row.names(half_matrix)
# Saving final matrix:
write.csv(full_matrix, file = "C:/Users/RhinellaX/Dropbox/Science/MYPAPERS_ongoing/2018_pumilio/2018-07/DATA_genetics/2018-07_pumilio_cytb_dist_p-distances.csv", row.names = TRUE)
# Done!
|
a129995b07d4d5821b19a49b71484d13e84d706e
|
67de61805dd839979d8226e17d1316c821f9b1b4
|
/inst/models/passing/testAllint.R
|
3334ee109b8c24bb46d91bd9bc2a5b68936dc895
|
[
"Apache-2.0"
] |
permissive
|
falkcarl/OpenMx
|
f22ac3e387f6e024eae77b73341e222d532d0794
|
ee2940012403fd94258de3ec8bfc8718d3312c20
|
refs/heads/master
| 2021-01-14T13:39:31.630260
| 2016-01-17T03:08:46
| 2016-01-17T03:08:46
| 49,652,924
| 1
| 0
| null | 2016-01-14T14:41:06
| 2016-01-14T14:41:05
| null |
UTF-8
|
R
| false
| false
| 6,074
|
r
|
testAllint.R
|
#
# Copyright 2007-2016 The OpenMx Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------
# Program: testAllint.R
# Author: Mike Neale and Tim Brick
# Date: 2010-02-20
#
# Simple tests for omxAllint().
#
# -----------------------------------------------------------------------
require(OpenMx)
mxOption(NULL, 'mvnMaxPointsC', 100)
# Simple case: 1 var, 3 levels
nv <- 1
maxnt <- 3
nvminus1 <- nv - 1
testAllint1 <- mxModel("TestAllint",
mxMatrix(type="Stand", nrow=nv, free=F, name="Cov"),
mxMatrix(type="Full", nrow=1, ncol=nv, free=F, name="Means"),
mxMatrix(type="Full", nrow=maxnt, ncol=nv, free=F, values = c(-Inf, 2.326, Inf), name="Thresh"),
mxAlgebra(omxAllInt(Cov, Means, Thresh), name="testAllint"))
testAllintFit1 <- mxRun(testAllint1)
#Test 2x3
nv <- 2
maxnt <- 3
nvminus1 <- nv - 1
testAllint2 <- mxModel("TestAllint",
mxMatrix(type="Stand", nrow=nv, free=F, name="Cov"),
mxMatrix(type="Full", nrow=1, ncol=nv, free=F, name="Means"),
mxMatrix(type="Full", free=F, values = cbind(c(-Inf, 0, Inf), c(-Inf, 1.96, Inf)), name="Thresh"),
mxAlgebra(omxAllInt(Cov, Means, Thresh), name="testAllint"))
testAllintFit2 <- mxRun(testAllint2)
#Test 2x4
nv <- 2
maxnt <- 4
nvminus1 <- nv - 1
testAllint3 <- mxModel("TestAllint",
mxMatrix(type="Stand", nrow=nv, free=F, name="Cov"),
mxMatrix(type="Full", nrow=1, ncol=nv, free=F, name="Means"),
mxMatrix(type="Full", free=F, values = cbind(c(-Inf, 0, 1, Inf), c(-Inf, 1.96, 2.326, Inf)), name="Thresh"),
mxAlgebra(omxAllInt(Cov, Means, Thresh), name="testAllint"))
testAllintFit3 <- mxRun(testAllint3)
#Test 2x4,1x5 with NAs
nv <- 3
maxnt <- 5
nvminus1 <- nv - 1
testAllint4 <- mxModel("TestAllint",
mxMatrix(type="Stand", nrow=nv, free=F, name="Cov"),
mxMatrix(type="Full", nrow=nv, ncol=1, free=F, name="Means"),
mxMatrix(type="Full", free=F, values = cbind(c(-Inf, 0, 1, Inf, NA), c(-Inf, 1.96, 2.326, Inf, NA), c(-Inf, -1, 0, 1, Inf)), name="Thresh"),
mxAlgebra(omxAllInt(Cov, Means, Thresh), name="testAllint"))
testAllintFit4 <- mxRun(testAllint4)
# Test different sizes of matrix
nv <- 3
maxnt <- 5
nvminus1 <- nv - 1
testAllint5 <- mxModel("TestAllint",
mxMatrix(type="Stand", nrow=nv, free=F, name="Cov"),
mxMatrix(type="Full", nrow=1, ncol=nv, free=F, name="Means"),
mxMatrix(type="Full", free=F, values = cbind(c(-Inf, 0, 1, Inf), c(-Inf, 1.96, 2.326, Inf)), name="Thresh1"),
mxMatrix(type="Full", free=F, values = cbind(c(-Inf, -1, 0, 1, Inf)), name="Thresh2"),
mxAlgebra(omxAllInt(Cov, Means, Thresh1, Thresh2), name="testAllint"))
testAllintFit5 <- mxRun(testAllint5)
#Test against Mx1 solutions
omxCheckCloseEnough(testAllintFit1[['testAllint']]$result, as.matrix(c(.99, .01)), 0.001)
omxCheckCloseEnough(testAllintFit2[['testAllint']]$result, as.matrix(c(0.4875, 0.0125, 0.4875, 0.0125)), 0.001)
omxCheckCloseEnough(testAllintFit3[['testAllint']]$result, as.matrix(c(0.4875, 0.0075, 0.0050, 0.3328, 0.0051, 0.0034, 0.1547, 0.0024, 0.0016)), 0.001)
omxCheckCloseEnough(testAllintFit4[['testAllint']]$result, as.matrix(c(7.7345E-02, 1.6641E-01, 1.6641E-01, 7.7345E-02, 1.1890E-03, 2.5581E-03, 2.5581E-03, 1.1890E-03, 7.9401E-04, 1.7083E-03, 1.7083E-03, 7.9401E-04, 5.2802E-02, 1.1360E-01, 1.1360E-01, 5.2802E-02, 8.1173E-04, 1.7464E-03, 1.7464E-03, 8.1173E-04, 5.4206E-04, 1.1662E-03, 1.1662E-03, 5.4206E-04, 2.4542E-02, 5.2802E-02, 5.2802E-02, 2.4542E-02, 3.7729E-04, 8.1173E-04, 8.1173E-04, 3.7729E-04, 2.5195E-04, 5.4206E-04, 5.4206E-04, 2.5195E-04)), 0.001)
omxCheckCloseEnough(testAllintFit5[['testAllint']]$result, as.matrix(c(7.7345E-02, 1.6641E-01, 1.6641E-01, 7.7345E-02, 1.1890E-03, 2.5581E-03, 2.5581E-03, 1.1890E-03, 7.9401E-04, 1.7083E-03, 1.7083E-03, 7.9401E-04, 5.2802E-02, 1.1360E-01, 1.1360E-01, 5.2802E-02, 8.1173E-04, 1.7464E-03, 1.7464E-03, 8.1173E-04, 5.4206E-04, 1.1662E-03, 1.1662E-03, 5.4206E-04, 2.4542E-02, 5.2802E-02, 5.2802E-02, 2.4542E-02, 3.7729E-04, 8.1173E-04, 8.1173E-04, 3.7729E-04, 2.5195E-04, 5.4206E-04, 5.4206E-04, 2.5195E-04)), 0.001)
omxCheckCloseEnough(mxEval(omxAllInt(Cov, Means, Thresh), testAllint1), as.matrix(c(.99, .01)), 0.001)
omxCheckCloseEnough(mxEval(omxAllInt(Cov, Means, Thresh), testAllint2), as.matrix(c(0.4875, 0.0125, 0.4875, 0.0125)), 0.001)
omxCheckCloseEnough(mxEval(omxAllInt(Cov, Means, Thresh), testAllint3), as.matrix(c(0.4875, 0.0075, 0.0050, 0.3328, 0.0051, 0.0034, 0.1547, 0.0024, 0.0016)), 0.001)
omxCheckCloseEnough(mxEval(omxAllInt(Cov, Means, Thresh), testAllint4), as.matrix(c(7.7345E-02, 1.6641E-01, 1.6641E-01, 7.7345E-02, 1.1890E-03, 2.5581E-03, 2.5581E-03, 1.1890E-03, 7.9401E-04, 1.7083E-03, 1.7083E-03, 7.9401E-04, 5.2802E-02, 1.1360E-01, 1.1360E-01, 5.2802E-02, 8.1173E-04, 1.7464E-03, 1.7464E-03, 8.1173E-04, 5.4206E-04, 1.1662E-03, 1.1662E-03, 5.4206E-04, 2.4542E-02, 5.2802E-02, 5.2802E-02, 2.4542E-02, 3.7729E-04, 8.1173E-04, 8.1173E-04, 3.7729E-04, 2.5195E-04, 5.4206E-04, 5.4206E-04, 2.5195E-04)), 0.001)
omxCheckCloseEnough(mxEval(omxAllInt(Cov, Means, Thresh1, Thresh2), testAllint5), as.matrix(c(7.7345E-02, 1.6641E-01, 1.6641E-01, 7.7345E-02, 1.1890E-03, 2.5581E-03, 2.5581E-03, 1.1890E-03, 7.9401E-04, 1.7083E-03, 1.7083E-03, 7.9401E-04, 5.2802E-02, 1.1360E-01, 1.1360E-01, 5.2802E-02, 8.1173E-04, 1.7464E-03, 1.7464E-03, 8.1173E-04, 5.4206E-04, 1.1662E-03, 1.1662E-03, 5.4206E-04, 2.4542E-02, 5.2802E-02, 5.2802E-02, 2.4542E-02, 3.7729E-04, 8.1173E-04, 8.1173E-04, 3.7729E-04, 2.5195E-04, 5.4206E-04, 5.4206E-04, 2.5195E-04)), 0.001)
|
137edd97069061e6504ab5072ab6591ef4b48bed
|
1c81f97fd067260b7657019abe8028093b10d49f
|
/LAMBDA/R_script/RSA_Relabel_trial_ID_discrete_distance_18_ID.R
|
19ef714e5b3e7804873906da53ebf2221737b2f1
|
[] |
no_license
|
yunshiuan/fMRI_LAMBDA
|
367f2f52ec9e511f8d5cb19cb9de7022a7ab33fb
|
f72e1d45b5183222d7aa5bcfc4309936151d5179
|
refs/heads/master
| 2021-05-11T19:32:31.574804
| 2018-11-25T08:23:00
| 2018-11-25T08:23:00
| 117,878,829
| 4
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 8,398
|
r
|
RSA_Relabel_trial_ID_discrete_distance_18_ID.R
|
#===========================================================================================
# Re-label trial ID [Trial 4: discrete distance with 18 IDs (Trial 5 in the RSA README.txt)] : combining format(with location) and distance(with sign) information
# Note: the old "trial ID" only accounts for distance,
# but not format(i.e.,trials with same trial ID might differ in terms of format)
# 18 IDs:
# Note Each two trial in a run shares an ID, so that there'll be 18 IDs
# (18 conditions: 3 formats x 2 directions x 3 distance levels)
# [LL: 12 trials = 3 distance levels x 2 direction x 2 repetition]
# [FF: 12 trials = 3 distance levels x 2 direction x 2 repetition]
# [cross-notation: 12 trials = 3 distance levels x 2 direction x 2 [repetition]
# The motivation is to increase the statistical efficiency (Conditions in the Trial 4 is too noisy),
# while still keep that same-notation trials have the same statistical efficiency
# as the cross-notation trials (as Trial 4 did).
# (Having more repetition in a condition entails a smaller degree of random noise.)
# Note for the ID rule:
# Each id should be unique (thus one need to combine all csv in order to see the overall unique trials)
# Note the order of the id is determined by
# (1) Format in the following order: FF-> Cross-notation -> LL
# (2) Within the same format, order trials by absolute distance (discrete scale: 0~1)
# (make sure to distinguish trials that share the same distance but with opposite sign)
# Note: Continuous distance approach has its problem:
# (1) RSA toolbox only allows all subject run having the same model RDS (every run should have the same RSA IDs)
# (2) Discrete approach could ensure every subject run to have the same RSA IDs, although with the tradeoff that distance information becomes blurred.
#===========================================================================================
library("dplyr")
library("tidyr")
library("stringr")
library("reshape2")
library("pbapply")
# save(list = c("df_all_trials"),file = "D:\\GoogleDrive\\Lambda_code\\R_script\\RData\\all_beh_data_tidy.RData")
#Constants-----------------------
PATH_BEH_DATA="D:\\Yun-Shiuan_LAMBDA\\Adult\\EprimeData_raw"
FILE_VALID_RUN="D:\\Yun-Shiuan_LAMBDA\\Adult\\Run_inclusion_info\\inclusive_runs_indexes.csv"
NUM_TRIAL_IDS=18
#Labeling------------------------
#Step 1: Read in run data
temp=list.files(path = PATH_BEH_DATA,pattern = "tidy.csv",full.names = T,recursive = T)
df_list=pblapply(X = temp,
FUN = function(csv){
df=read.csv(csv,header = T,stringsAsFactors = F)%>%
select(-X,-X.1)%>%
filter(!is.na(trial_id))
})
df_all_trials=do.call(what = "rbind",args = df_list)
# Step 2: Add trial ID for RSA (combining format(with location) and distance(with sign) information)
# Each id should be unique (thus one need to combine all csv in order to see the overall unique trials)
# Note the order of the id is determined by
# (1) Format in the following order: FF-> Cross-notation -> LL
# (2) Within the same format, order trials by absolute discrete distance
# (3) make sure to distinguish trials that share the same distance but with opposite sign)
a=
df_all_trials%>%
filter(trial_id!="NULL")%>%
mutate(fraccomp_sti_format = toupper(fraccomp_sti_type),
fraccomp_sti_format = ifelse(fraccomp_sti_format %in% c("F - L","L - F"),yes="C - N",no=fraccomp_sti_format))%>%
mutate(fraccomp_sti_format = factor(fraccomp_sti_format,levels = c("F - F","C - N","L - L")))%>%
mutate(fraccomp_sti_dis_discrete_abs=ifelse(fraccomp_sti_dis_type=="Near",yes = 1,
no = ifelse(fraccomp_sti_dis_type=="Medium",yes = 2,
no = ifelse(fraccomp_sti_dis_type=="Far",yes = 3,no = NA))),
fraccomp_sti_dis_discrete=(fraccomp_sti_dis>0)*(fraccomp_sti_dis_discrete_abs)+
(fraccomp_sti_dis<0)*(-fraccomp_sti_dis_discrete_abs))%>%
mutate(trial_id=as.numeric(trial_id))%>%
arrange(sub_id,run_num,fraccomp_sti_format,abs(fraccomp_sti_dis_discrete),fraccomp_sti_dis_discrete,abs(fraccomp_sti_dis))
#Check if each subject run's batch has 4 repetitions (36trials= 3 formats x 3 distance levels x 4 repetitions)
a%>%
group_by(sub_id,run_num,fraccomp_sti_dis_discrete,fraccomp_sti_format)%>%
summarise(repetition=n())%>%View()
# Generate trial ID based on 1)format, 2)absolute discrete distance, 3)signed discrete distance,
# Note that the dataframe a is already sorted in the desired order (2 repeared trials share the same ID)
a=a%>%
mutate(trial_id_RSA_discrete_18=rep(1:NUM_TRIAL_IDS,each=2,length.out=nrow(a)))
df_all_trials=a
# save(list = c("df_all_trials"),file = "D:\\GoogleDrive\\Lambda_code\\R_script\\RData\\all_beh_data_tidy.RData")
# # Add the "valid run" info
# valid_runs=
# read.csv(FILE_VALID_RUN,header = T,stringsAsFactors = F)%>%
# select(-X)%>%
# mutate(sub.id=gsub(pattern="^df","",sub.id))
# valid_runs_collapsed=paste0(valid_runs$sub.id,"-",valid_runs$run.num)
#
# df_all_trials=
# df_all_trials%>%
# mutate(id_run=paste0(sub_id,"-",run_num))%>%
# mutate(valid_run=ifelse(id_run%in%valid_runs_collapsed,yes = 1,no = 0),
# trial_id=as.numeric(trial_id))%>%
# select(-id_run)%>%
# arrange(sub_id,run_num,trial_id_RSA_discrete_18)
# See the properties of each unique trial------------------------
trial_info=
a%>%
select(trial_id_RSA_discrete_18,fraccomp_sti_format,fraccomp_sti_dis_discrete)%>%
unique()
trial_info_with_sub_id=
a%>%
select(trial_id_RSA_discrete_18,fraccomp_sti_format,fraccomp_sti_dis_discrete,sub_id)%>%
unique()
collect_sub_id_unique_amount=c()
collect_sub_id_list_collapsed=c()
for(trial_id in 1:NUM_TRIAL_IDS){
sub_id_list=
trial_info_with_sub_id%>%
filter(trial_id_RSA_discrete_18==trial_id)%>%
select(sub_id)%>%
pull()%>%
gsub("^10","",x = .)
sub_id_unique_amount=length(sub_id_list)
sub_id_list_collapsed=paste(sub_id_list,collapse = ";")
collect_sub_id_unique_amount=append(collect_sub_id_unique_amount,sub_id_unique_amount)
collect_sub_id_list_collapsed=append(collect_sub_id_list_collapsed,sub_id_list_collapsed)
}
trial_info$sub_id_unique_amount=collect_sub_id_unique_amount
trial_info$sub_id_list_collapsed=collect_sub_id_list_collapsed
# Check if every subject run has 18 ID
a%>%
group_by(sub_id,run_num)%>%
summarise(n_trials_unique=length(unique(trial_id_RSA_discrete_18)))%>%
View()
# Check the stimulus properties of each RSA ID
df_all_trials%>%
group_by(RSA_ID=trial_id_RSA_discrete_18)%>%
summarise(num_total_runs=n(),
format=unique(fraccomp_sti_format),
dis_discrete=mean(fraccomp_sti_dis_discrete),
dis_discrete_abs=mean(fraccomp_sti_dis_discrete_abs),
dis_abs=mean(abs(fraccomp_sti_dis))
)%>%View
# Write back to the tidy.csv----------------------------------
temp=list.files(path = PATH_BEH_DATA,pattern = "tidy.csv",full.names = T,recursive = T)
df_list=pblapply(X = temp,
FUN = function(csv){
csv_sub_id=as.integer(unlist(str_extract_all(string = csv,pattern = "(?<=/df)\\d+(?=_Run)")))
csv_run_num=paste0("r",unlist(str_extract_all(string = csv,pattern = "(?<=Run)\\d+(?=_tidy)")))
df=read.csv(csv,header = T,stringsAsFactors = F)%>%
mutate(trial_id=as.numeric(trial_id))
# Skip if already joined the trial_id_RSA info.
if("trial_id_RSA_discrete_18" %in% names(df)){
print(paste0("Already done: id:",csv_sub_id," ; run: ", csv_run_num))
}else{
df_all_trials_selected=
df_all_trials%>%
select(sub_id,run_num,trial_id_RSA_discrete_18,trial_id,fraccomp_sti_dis_discrete)%>%
filter(sub_id==csv_sub_id,run_num==csv_run_num)
df=df%>%
select(-X.1)%>%
left_join(df_all_trials_selected,by = c("sub_id","run_num","trial_id"))
write.csv(df,file = csv)
}
})
|
9d48e0c4b4339e4e416a7149dd051b0cb40e2c13
|
fd870c2f684c2d62ab31ebf4bdfcdc208a3bc2e4
|
/week5/week5_mapmaking.R
|
63099662fc15c1bada741825c5e4f4740f6ccb4e
|
[] |
no_license
|
LingruFeng/gis_code
|
9d42f53f13be62dac03b051c417ca3be5563ddd8
|
988c4fa1a7fa0ae8289d60df7848ebe980e71623
|
refs/heads/main
| 2023-02-04T11:33:56.499000
| 2020-12-25T10:16:57
| 2020-12-25T10:16:57
| 308,058,560
| 1
| 0
| null | 2020-10-28T18:38:05
| 2020-10-28T15:24:39
|
R
|
UTF-8
|
R
| false
| false
| 7,938
|
r
|
week5_mapmaking.R
|
##Load all our data
library(sf)
library(tmap)
library(tmaptools)
library(tidyverse)
library(here)
## read in all the spatial data and
## reproject it
OSM <- st_read(here::here("greater-london-latest-free.shp",
"gis_osm_pois_a_free_1.shp")) %>%
st_transform(., 27700) %>%
#select hotels only
filter(fclass == 'hotel')
Worldcities <- st_read(here::here("World_Cities",
"a4013257-88d6-4a68-b916-234180811b2d202034-1-1fw6kym.nqo.shp")) %>%
st_transform(., 27700)
UK_outline <- st_read(here::here("gadm36_GBR_gpkg", "gadm36_GBR.gpkg"),layer = 'gadm36_GBR_0') %>%
st_transform(., 27700)
#London Borough data is already in 277000
Londonborough <- st_read(here::here("statistical-gis-boundaries-london",
"statistical-gis-boundaries-london",
"ESRI",
"London_Borough_Excluding_MHW.shp"))%>%
st_transform(., 27700)
## read in the .csv
## and make it into spatial data
Airbnb <- read_csv("listings.csv") %>%
st_as_sf(., coords = c("longitude", "latitude"),
crs = 4326) %>%
st_transform(., 27700)%>%
#select entire places that are available all year
filter(room_type == 'Entire home/apt' & availability_365 =='365')
# make a function for the join
# functions are covered in practical 7
# but see if you can work out what is going on
# hint all you have to do is replace data1 and data2
# with the data you want to use
Joinfun <- function(data1, data2){
output<- data1 %>%
st_join(data2,.)%>%
add_count(GSS_CODE, name="hotels_in_borough")
return(output)
}
# use the function for hotels
Hotels <- Joinfun(OSM, Londonborough)
# then for airbnb
Airbnb <- Joinfun(Airbnb, Londonborough)
Worldcities2 <- Worldcities %>%
filter(CNTRY_NAME=='United Kingdom'&
Worldcities$CITY_NAME=='Birmingham'|
Worldcities$CITY_NAME=='London'|
Worldcities$CITY_NAME=='Edinburgh')
newbb <- c(xmin=-296000, ymin=5408, xmax=655696, ymax=1000000)
UK_outlinecrop <- st_crop(UK_outline$geom, newbb)
Hotels <- Hotels %>%
#at the moment each hotel is a row for the borough
#we just one one row that has number of airbnbs
group_by(., GSS_CODE, NAME)%>%
summarise(`Accomodation count` = unique(hotels_in_borough))
Airbnb <- Airbnb %>%
group_by(., GSS_CODE, NAME)%>%
summarise(`Accomodation count` = unique(hotels_in_borough))
##############
#Make the map
tmap_mode("plot")
# set the breaks
# for our mapped data
breaks = c(0, 5, 12, 26, 57, 420)
# plot each map
tm1 <- tm_shape(Hotels) +
tm_polygons("Accomodation count",
breaks=breaks,
palette="PuBu")+
tm_legend(show=FALSE)+
tm_layout(frame=FALSE)+
tm_credits("(a)", position=c(0,0.85), size=1.5)
tm2 <- tm_shape(Airbnb) +
tm_polygons("Accomodation count",
breaks=breaks,
palette="PuBu") +
tm_legend(show=FALSE)+
tm_layout(frame=FALSE)+
tm_credits("(b)", position=c(0,0.85), size=1.5)
tm3 <- tm_shape(UK_outlinecrop)+
tm_polygons(col="darkslategray1")+
tm_layout(frame=FALSE)+
tm_shape(Worldcities2) +
tm_symbols(col = "red", scale = .5)+
tm_text("CITY_NAME", xmod=-1, ymod=-0.5)
legend <- tm_shape(Hotels) +
tm_polygons("Accomodation count",
palette="PuBu") +
tm_scale_bar(position=c(0.2,0.04), text.size=0.6)+
tm_compass(north=0, position=c(0.65,0.6))+
tm_layout(legend.only = TRUE, legend.position=c(0.2,0.25),asp=0.1)+
tm_credits("(c) OpenStreetMap contrbutors and Air b n b", position=c(0.0,0.0))
t=tmap_arrange(tm1, tm2, tm3, legend, ncol=2)
t
#We can also arrage our maps using the grid package
library(grid)
grid.newpage()
pushViewport(viewport(layout=grid.layout(2,2)))
print(tm1, vp=viewport(layout.pos.col=1, layout.pos.row=1, height=5))
print(tm2, vp=viewport(layout.pos.col=2, layout.pos.row=1, height=5))
print(tm3, vp=viewport(layout.pos.col=1, layout.pos.row=2, height=5))
print(legend, vp=viewport(layout.pos.col=2, layout.pos.row=2, height=5))
###########insert map
Londonbb = st_bbox(Airbnb,
crs = st_crs(Airbnb)) %>%
st_as_sfc()
main <- tm_shape(Airbnb, bbbox = Londonbb) +
tm_polygons("Accomodation count",
breaks=breaks,
palette="PuBu")+
tm_scale_bar(position = c("left", "bottom"), text.size = .75)+
tm_layout(legend.position = c("right","top"),
legend.text.size=.75,
legend.title.size = 1.1,
frame=FALSE)+
tm_credits("(c) OpenStreetMap contrbutors and Air b n b", position=c(0.0,0.0))+
tm_text(text = "NAME", size = .5, along.lines =T, remove.overlap=T, auto.placement=F)+
tm_compass(type = "8star", position = c("left", "top")) +
#bottom left top right
tm_layout(inner.margin=c(0.02,0.02,0.02,0.2))
inset = tm_shape(UK_outlinecrop) + tm_polygons() +
tm_shape(Londonbb)+
tm_borders(col = "grey40", lwd = 3)+
tm_layout(frame=FALSE,
bg.color = "transparent")+
tm_shape(Worldcities2) +
tm_symbols(col = "red", scale = .5)+
tm_text("CITY_NAME", xmod=-1.5, ymod=-0.5)
main
print(inset, vp = viewport(0.86, 0.29, width = 0.5, height = 0.55))
#export
tmap_save(t, 'hotelsandairbnbR.png')
library(grid)
tmap_save(main,insets_tm = inset,insets_vp=viewport(x=0.86, y=0.29, width=.5, height=.55), filename="test.pdf", dpi=600)
#interactive map
tmap_mode("view")
tm_shape(Airbnb) +
tm_polygons("Accomodation count", breaks=breaks)
tm_shape(Hotels) +
tm_polygons("Accomodation count", breaks=breaks)
#Advanced interactive map
# library for pop up boxes
library(leafpop)
library(leaflet)
#join data
Joined <- Airbnb%>%
st_join(., Hotels, join = st_equals)%>%
dplyr::select(GSS_CODE.x, NAME.x, `Accomodation count.x`, `Accomodation count.y`)%>%
dplyr::rename(`GSS code` =`GSS_CODE.x`,
`Borough` = `NAME.x`,
`Airbnb count` = `Accomodation count.x`,
`Hotel count`= `Accomodation count.y`)%>%
st_transform(., 4326)
#remove the geometry for our pop up boxes to avoid
popupairbnb <-Joined %>%
st_drop_geometry()%>%
dplyr::select(`Airbnb count`, Borough)%>%
popupTable()
popuphotel <-Joined %>%
st_drop_geometry()%>%
dplyr::select(`Hotel count`, Borough)%>%
popupTable()
tmap_mode("view")
# set the colour palettes using our previously defined breaks
pal1 <- Joined %>%
colorBin(palette = "YlOrRd", domain=.$`Airbnb count`, bins=breaks)
pal1 <-colorBin(palette = "YlOrRd", domain=Joined$`Airbnb count`, bins=breaks)
pal2 <- Joined %>%
colorBin(palette = "YlOrRd", domain=.$`Hotel count`, bins=breaks)
map<- leaflet(Joined) %>%
# add basemap options
addTiles(group = "OSM (default)") %>%
addProviderTiles(providers$Stamen.Toner, group = "Toner") %>%
addProviderTiles(providers$Stamen.TonerLite, group = "Toner Lite") %>%
addProviderTiles(providers$CartoDB.Positron, group = "CartoDB")%>%
#add our polygons, linking to the tables we just made
addPolygons(color="white",
weight = 2,
opacity = 1,
dashArray = "3",
popup = popupairbnb,
fillOpacity = 0.7,
fillColor = ~pal2(`Airbnb count`),
group = "Airbnb")%>%
addPolygons(fillColor = ~pal2(`Hotel count`),
weight = 2,
opacity = 1,
color = "white",
dashArray = "3",
popup = popupairbnb,
fillOpacity = 0.7,group = "Hotels")%>%
# add a legend
addLegend(pal = pal2, values = ~`Hotel count`, group = c("Airbnb","Hotel"),
position ="bottomleft", title = "Accomodation count") %>%
# specify layers control
addLayersControl(
baseGroups = c("OSM (default)", "Toner", "Toner Lite", "CartoDB"),
overlayGroups = c("Airbnb", "Hotels"),
options = layersControlOptions(collapsed = FALSE)
)
# plot the map
map
|
53b8394c167d6c886eee008ecc2a9e9e35515b7c
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/adiv/examples/QE.Rd.R
|
47ac79e94285395b5a18a4740eac883b6662dfd1
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,275
|
r
|
QE.Rd.R
|
library(adiv)
### Name: QE
### Title: Quadratic Entropy
### Aliases: QE discomQE
### Keywords: models
### ** Examples
data(ecomor, package="ade4")
dtaxo <- dist.taxo(ecomor$taxo)
QE(t(ecomor$habitat), dtaxo, formula="EDI")
QE(t(ecomor$habitat), dtaxo^2/2, formula="QE")
table.value(as.matrix(discomQE(t(ecomor$habitat), dtaxo, formula="EDI")))
EDIcom <- discomQE(t(ecomor$habitat), dtaxo, formula="EDI")
QEcom <- discomQE(t(ecomor$habitat), dtaxo^2/2, formula="QE")
QEcom
EDIcom^2/2
bird.QE <- QE(t(ecomor$habitat), dtaxo, formula="EDI")
dotchart(bird.QE$diversity, labels = rownames(bird.QE))
data(humDNAm, package="ade4")
QE(t(humDNAm$samples), humDNAm$distances/2, formula="QE")
QE(t(humDNAm$samples), sqrt(humDNAm$distances), formula="EDI")
QEhumDNA.dist <- discomQE(t(humDNAm$samples),
humDNAm$distances/2, humDNAm$structures)
is.euclid(QEhumDNA.dist$communities)
is.euclid(QEhumDNA.dist$regions)
EDIhumDNA.dist <- discomQE(t(humDNAm$samples),
sqrt(humDNAm$distances), humDNAm$structures, formula="EDI")
is.euclid(EDIhumDNA.dist$communities)
is.euclid(EDIhumDNA.dist$regions)
QEhumDNA.dist$communities
EDIhumDNA.dist$communities^2/2
hum.QE <- QE(t(humDNAm$samples), humDNAm$distances/2, formula="QE")
dotchart(hum.QE$diversity, labels = rownames(hum.QE))
|
b8c765ddd5c3d7bc265c101d2a3645f054d3e684
|
589b566fc6d258a5c342cfc76782371b1681e4f3
|
/Testing Functions/Standard Elite Testing/hard_elitism_Experiment.R
|
f30b10a103ae52ec29812c6ab3fb6601f2192ed6
|
[] |
no_license
|
Fozefy/GeneticAlgorithm
|
c36061e7d098649ee480d25f37c145ef07f331ab
|
db2f3ce9ad58a0ef5aa65a51a8c74afa5a5d0609
|
refs/heads/master
| 2016-09-06T07:06:36.741214
| 2016-03-08T21:31:22
| 2016-03-08T21:31:22
| 12,627,009
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,757
|
r
|
hard_elitism_Experiment.R
|
n=50
graph = gridConstructor(100)
hard.elite.one = c(1)
for (i in 1:n)
{
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(fitness.fn=twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elite.size=1,elitism=TRUE,spatial.selection.fn=spatial.child.selection.random.hardElite,adjMatrix=graph), verbose=FALSE,reporting.fn=reportNone.report.fn)
generational.ga(ga)
hard.elite.one[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(hard.elite.one,file="hard.elite.one")
graph = gridConstructor(100)
hard.elite.two = c(1)
for (i in 1:n)
{
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(fitness.fn=twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elite.size=2,elitism=TRUE,spatial.selection.fn=spatial.child.selection.random.hardElite,adjMatrix=graph), verbose=FALSE,reporting.fn = reportNone.report.fn)
generational.ga(ga)
hard.elite.two[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(hard.elite.two,file="hard.elite.two")
graph = gridConstructor(100)
hard.elite.three = c(1)
for (i in 1:n)
{
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(fitness.fn=twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elite.size=3,elitism=TRUE,spatial.selection.fn=spatial.child.selection.random.hardElite,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
hard.elite.three[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(hard.elite.three,file="hard.elite.three")
graph = gridConstructor(100)
hard.elite.five = c(1)
for (i in 1:n)
{
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(fitness.fn=twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elite.size=5,elitism=TRUE,spatial.selection.fn=spatial.child.selection.random.hardElite,adjMatrix=graph), verbose=FALSE,reporting.fn=reportNone.report.fn)
generational.ga(ga)
hard.elite.five[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(hard.elite.five,file="hard.elite.five")
graph = gridConstructor(100)
hard.elite.ten = c(1)
for (i in 1:n)
{
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(fitness.fn=twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elite.size=10,elitism=TRUE,spatial.selection.fn=spatial.child.selection.random.hardElite,adjMatrix=graph), verbose=FALSE,reporting.fn=reportNone.report.fn)
generational.ga(ga)
hard.elite.ten[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(hard.elite.ten,file="hard.elite.ten")
graph = gridConstructor(100)
hard.elite.fifty = c(1)
for (i in 1:n)
{
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(fitness.fn=twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elite.size=50,elitism=TRUE,spatial.selection.fn=spatial.child.selection.random.hardElite,adjMatrix=graph), verbose=FALSE,reporting.fn=reportNone.report.fn)
generational.ga(ga)
hard.elite.fifty[i] = ga$gen
print(paste(i,"Complete -",ga$gen))
rm(ga)
}
save(hard.elite.fifty,file="hard.elite.fifty")
graph = gridConstructor(100)
hard.elite.full = c(1)
for (i in 1:n)
{
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(fitness.fn=twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elite.size=100,elitism=TRUE,spatial.selection.fn=spatial.child.selection.random.hardElite,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
hard.elite.full[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(hard.elite.full,file="hard.elite.full")
#boxplot(hard.elite.one,hard.elite.two,hard.elite.three,hard.elite.five,hard.elite.ten,names=c("One","Two","Three","Five","Ten"), main="Elites on Coevo - 4 Grid")
#Large Grid
graph = gridConstructor.withDiag(100)
hard.elite.one.BigGrid = c(1)
for (i in 1:n)
{
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(fitness.fn=twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elite.size=1,elitism=TRUE,spatial.selection.fn=spatial.child.selection.random.hardElite,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
hard.elite.one.BigGrid[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(hard.elite.one.BigGrid,file="hard.elite.one.BigGrid")
graph = gridConstructor.withDiag(100)
hard.elite.two.BigGrid = c(1)
for (i in 1:n)
{
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(fitness.fn=twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elite.size=2,elitism=TRUE,spatial.selection.fn=spatial.child.selection.random.hardElite,adjMatrix=graph), verbose=FALSE,reporting.fn=reportNone.report.fn)
generational.ga(ga)
hard.elite.two.BigGrid[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(hard.elite.two.BigGrid,file="hard.elite.two.BigGrid")
graph = gridConstructor.withDiag(100)
hard.elite.three.BigGrid = c(1)
for (i in 1:n)
{
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(fitness.fn=twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elite.size=3,elitism=TRUE,spatial.selection.fn=spatial.child.selection.random.hardElite,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
hard.elite.three.BigGrid[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(hard.elite.three.BigGrid,file="hard.elite.three.BigGrid")
graph = gridConstructor.withDiag(100)
hard.elite.five.BigGrid = c(1)
for (i in 1:n)
{
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(fitness.fn=twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elite.size=5,elitism=TRUE,spatial.selection.fn=spatial.child.selection.random.hardElite,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
hard.elite.five.BigGrid[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(hard.elite.five.BigGrid,file="hard.elite.five.BigGrid")
graph = gridConstructor.withDiag(100)
hard.elite.ten.BigGrid = c(1)
for (i in 1:n)
{
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(fitness.fn=twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elite.size=10,elitism=TRUE,spatial.selection.fn=spatial.child.selection.random.hardElite,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
hard.elite.ten.BigGrid[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(hard.elite.ten.BigGrid,file="hard.elite.ten.BigGrid")
graph = gridConstructor.withDiag(100)
hard.elite.full.BigGrid = c(1)
for (i in 1:n)
{
ga = new.GA.env(GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(fitness.fn=twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:100, 1:100), nrow=100, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elite.size=100,elitism=TRUE,spatial.selection.fn=spatial.child.selection.random.hardElite,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
hard.elite.full.BigGrid[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(hard.elite.full.BigGrid,file="hard.elite.full.BigGrid")
#boxplot(hard.elite.one.BigGrid,hard.elite.two.BigGrid,hard.elite.three.BigGrid,hard.elite.five.BigGrid,hard.elite.ten.BigGrid,names=c("One","Two","Three","Five","Ten"), main="Elites on Coevo - 8 Grid")
#Big Pop
graph = gridConstructor(1024)
hard.elite.one.BigPop = c(1)
for (i in 1:n)
{
ga = new.GA.env(pop.size=1024,GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(fitness.fn=twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:1024, 1:1024), nrow=1024, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elite.size=1,elitism=TRUE,spatial.selection.fn=spatial.child.selection.random.hardElite,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
hard.elite.one.BigPop[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(hard.elite.one.BigPop,file="hard.elite.one.BigPop")
graph = gridConstructor(1024)
hard.elite.two.BigPop = c(1)
for (i in 1:n)
{
ga = new.GA.env(pop.size=1024,GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(fitness.fn=twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:1024, 1:1024), nrow=1024, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elite.size=2,elitism=TRUE,spatial.selection.fn=spatial.child.selection.random.hardElite,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
hard.elite.two.BigPop[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(hard.elite.two.BigPop,file="hard.elite.two.BigPop")
graph = gridConstructor(1024)
hard.elite.three.BigPop = c(1)
for (i in 1:n)
{
ga = new.GA.env(pop.size=1024,GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(fitness.fn=twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:1024, 1:1024), nrow=1024, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elite.size=3,elitism=TRUE,spatial.selection.fn=spatial.child.selection.random.hardElite,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
hard.elite.three.BigPop[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(hard.elite.three.BigPop,file="hard.elite.three.BigPop")
graph = gridConstructor(1024)
hard.elite.five.BigPop = c(1)
for (i in 1:n)
{
ga = new.GA.env(pop.size=1024,GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(fitness.fn=twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:1024, 1:1024), nrow=1024, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elite.size=5,elitism=TRUE,spatial.selection.fn=spatial.child.selection.random.hardElite,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
hard.elite.five.BigPop[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(hard.elite.five.BigPop,file="hard.elite.five.BigPop")
graph = gridConstructor(1024)
hard.elite.ten.BigPop = c(1)
for (i in 1:n)
{
ga = new.GA.env(pop.size=1024,GA.base.args=new.GA.base.args(max.gen=5000,numPop=2), fitness.args=new.fitness.args(fitness.fn=twoPop.one.max.withCoupling(.5), goal=30, externalConnectionsMatrix=matrix(c(1:1024, 1:1024), nrow=1024, ncol=2)), xover.args = new.xover.args(keepSecondaryParent=FALSE), selection.args=new.selection.args(elite.size=10,elitism=TRUE,spatial.selection.fn=spatial.child.selection.random.hardElite,adjMatrix=graph), verbose=FALSE)
generational.ga(ga)
hard.elite.ten.BigPop[i] = ga$gen
print(paste(i,"Complete"))
rm(ga)
}
save(hard.elite.ten.BigPop,file="hard.elite.ten.BigPop")
|
92292c3309105a79660482e5e123c87ad0f9d48c
|
70711fc73084b483c55cbcc32212724de4ba0ef9
|
/man/syno_mycobank.Rd
|
bd253d2e8c4025c27dec7d0ac3e4790bd61a6b0e
|
[] |
no_license
|
FranzKrah/rmycobank
|
6b5023a3a8900c233110ef4c374b5f61a0eb1f8a
|
c7edfdd1d3413c3380cac64b7b110a0bc1b6c3e3
|
refs/heads/master
| 2021-01-10T17:23:25.375075
| 2016-02-17T13:31:34
| 2016-02-17T13:31:34
| 51,508,884
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,027
|
rd
|
syno_mycobank.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/syno_mycobank.R
\name{syno_mycobank}
\alias{syno_mycobank}
\title{Downloads synonyms from mycobank.org}
\usage{
syno_mycobank(taxon)
}
\arguments{
\item{taxon}{a \code{character} containing fungal species name}
}
\value{
list with two vectors of class \code{character}
}
\description{
Searches and downloads synonmys from the mycobank.org website for a given fungal species name
}
\details{
The function searches all synonym entries on the MycoBank on-line database. The data
may then be further processes, e.g. a search which of the synonmys are present at GenBank
(syns_on_ncbi).
Mycobank devides synonyms into obligate (=) and facultative (≡) synonyms. Obligate
synonyms have 2 different materials, while facultative synonyms refer to the same material. The
output of this function produces two lists according to this definition.
}
\examples{
syns <- syno_mycobank(taxon = "Heterobasidion annosum")
}
\author{
Franz-Sebastian Krah
}
|
9a8b8f09fddd3f0074732cd5458e58ea0af8009b
|
2b01128d3fa2d06bd046cc0c80c334a6b7fbe25e
|
/Cp BIC AdjR2.R
|
01a35b896c03e699dea70e460a4502a74c64a8b4
|
[] |
no_license
|
diarabit/Regression---many-type
|
9ca4e4b2e3fa0f31d4e3fd7a1ce35b650bbf55f0
|
fbb7a48bdea2783d007e53af95a3c878236788a9
|
refs/heads/master
| 2020-05-28T11:32:04.141421
| 2015-12-02T03:12:58
| 2015-12-02T03:12:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 806
|
r
|
Cp BIC AdjR2.R
|
#B6. (a) (g)
res <- function(n){
return (1 - (1 - 1/n)**n)
}
k <- 1:100000
plot(k, res(k), type = 'n')
#B6. (b)
X = rnorm(100)
eps = rnorm(100)
beta0 = 5
beta1 = 2
beta2 = -5
beta3 = 13
Y = beta0 + beta1 * X + beta2 * X * X + beta3 * X * X * X + eps
library(leaps)
dataxy = data.frame(y = Y, x = X)
mp = regsubsets(y ~ poly(x, 10, raw = T), data = dataxy, nvmax = 10)
mpx = summary(mp)
which.min(mpx$cp)
which.min(mpx$bic)
which.max(mpx$adjr2)
plot(mpx$cp, xlab = "Subset", ylab = "Cp", type = "l")
points(3, mpx$cp[3], pch = 1, col = "red", lwd = 5)
plot(mpx$bic, xlab = "Subset", ylab = "BIC", type = "l")
points(3, mpx$bic[3], pch = 1, col = "red", lwd = 5)
plot(mpx$adjr2, xlab = "Subset", ylab = "Adj R2", type = "l")
points(4, mpx$adjr2[4], pch = 1, col = "red", lwd = 5)
coef(mp, id = 3)
|
756cf9adc0dfe1a3e58d1d2785e49c3d91f84bfa
|
2041acc62d31b18ffdb8d4205962151433e81629
|
/man/get_alumno_coe.Rd
|
a934e5d3c4184a5754c6a226837815d60f8c073a
|
[] |
no_license
|
mguevara/HES
|
9571efd6d69613f21a94c55cf3b13b287e543dd0
|
669063774d4c9ef3eeaa8861b1bcdb8143dd437d
|
refs/heads/master
| 2022-04-08T12:21:51.275725
| 2020-02-19T16:45:44
| 2020-02-19T16:45:44
| 234,790,645
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 865
|
rd
|
get_alumno_coe.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/load_data.R
\name{get_alumno_coe}
\alias{get_alumno_coe}
\title{Get Alumno coe}
\usage{
get_alumno_coe(g = ches1217, classification, data_postulaciones)
}
\arguments{
\item{g}{Graph containing a network of kind Higher Education Space HES. Default value is ches1227, CHilean HES for 2012-2017 dataset.}
\item{classification}{Dataframe with students' information previously obtained with function get_classification().}
\item{data_postulaciones}{Dataframe with students' information previously obtained with function get_data_postulaciones()}
}
\value{
Data useful to perform regressions and analysis. It includes information of Coherence of network HES and information of dropped and not-dropped.
}
\description{
Alumnos merged with postulaciones merged with coherence.
}
\examples{
}
|
47b42c2cf669698b379289d6158f59a73f19cd13
|
d425c2a2482195442314960ed968d653dc6fca65
|
/scripts/03_kmeans_all.R
|
723bb592ad4a79e15411d3ade28a04d1a28c3707
|
[] |
no_license
|
foreverst8/KERS_analysis
|
53389869295f578fc95657ef422538318631f991
|
9d30bf1a9f9109f25a4e666d8ff8f09d7e8c23e1
|
refs/heads/master
| 2023-03-21T13:41:57.410584
| 2020-12-14T15:02:05
| 2020-12-14T15:02:05
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,940
|
r
|
03_kmeans_all.R
|
suppressPackageStartupMessages(library(chipmine))
suppressPackageStartupMessages(library(foreach))
suppressPackageStartupMessages(library(doParallel))
## this script performs k-means clustering on (-2kb)-TSS-TES-(+1kb) profile matrix
## generated for all genes in each TF ChIPseq data
rm(list = ls())
##################################################################################
file_exptInfo <- here::here("data", "reference_data", "sampleInfo.txt")
file_genes <- here::here("data", "reference_data", "AN_genesForPolII.bed")
TF_dataPath <- here::here("..", "data", "A_nidulans", "TF_data")
polII_dataPath <- here::here("..", "data", "A_nidulans", "polII_data")
hist_dataPath <- here::here("..", "data", "A_nidulans", "histone_data")
other_dataPath <- here::here("..", "data", "A_nidulans", "other_data")
file_tf_macs2 <- paste(TF_dataPath, "/", "sample_tf_macs2.list", sep = "")
file_tf <- paste(TF_dataPath, "/", "sample_tf.list", sep = "")
set.seed(20)
doClustering <- TRUE
clusters <- 7
tfYlim <- 0.996 ##0.999
geneFilter <- c("AN5245", "AN3245")
cl <- makeCluster(4) #not to overload your computer
registerDoParallel(cl)
##################################################################################
# geneSet <- data.table::fread(file = file_genes, header = F,
# col.names = c("chr", "start", "end", "name", "score", "strand")) %>%
# dplyr::mutate(length = end - start) %>%
# dplyr::filter(! name %in% geneFilter)
geneSet <- suppressMessages(
readr::read_tsv(
file = file_genes,col_names = c("chr", "start", "end", "geneId", "score", "strand")
)) %>%
dplyr::mutate(length = end - start) %>%
dplyr::filter(! geneId %in% geneFilter)
tfSampleList <- suppressMessages(
readr::read_tsv(file = file_tf_macs2, col_names = c("id"), comment = "#")
)
# tfSampleList <- data.frame(id = c("An_ecoA_20h_HA_1", "An_ecoA_48h_HA_1", "An_kdmB_20h_HA_1", "An_kdmB_48h_HA_1", "An_rpdA_20h_HA_1", "An_rpdA_48h_HA_1", "An_sntB_20h_HA_1", "An_sntB_48h_HA_1", "An_kdmB_20h_HA_2", "An_kdmB_48h_HA_2", "An_rpdA_20h_HA_2", "An_rpdA_48h_HA_2", "An_sntB_20h_HA_2", "An_sntB_48h_HA_2", "An_ecoA_kdmB_del_20h_HA_1", "An_ecoA_kdmB_del_48h_HA_1", "An_rpdA_kdmB_del_20h_HA_1", "An_rpdA_kdmB_del_48h_HA_1", "An_sntB_kdmB_del_20h_HA_1", "An_sntB_kdmB_del_48h_HA_1", "An_ecoA_20h_HA_2", "An_ecoA_48h_HA_2", "An_ecoA_kdmB_del_20h_HA_2", "An_ecoA_kdmB_del_48h_HA_2", "An_rpdA_kdmB_del_20h_HA_2", "An_rpdA_kdmB_del_48h_HA_2", "An_sntB_kdmB_del_20h_HA_2", "An_sntB_kdmB_del_48h_HA_2", "An_ecoA_sntB_del_20h_HA_2", "An_ecoA_sntB_del_48h_HA_2", "An_kdmB_laeA_del_20h_HA_1", "An_kdmB_laeA_del_48h_HA_1", "An_laeA_kdmB_del_20h_HA_1", "An_laeA_kdmB_del_48h_HA_1", "An_sudA_kdmB_del_20h_HA_1", "An_sudA_kdmB_del_48h_HA_1"))
tf_info <- get_sample_information(
exptInfoFile = file_exptInfo,
samples = tfSampleList$id,
dataPath = TF_dataPath,
profileMatrixSuffix = "normalized_profile"
)
# i <- 1
foreach(i = 1:nrow(tf_info),
.packages = c("chipmine")) %dopar% {
## read the profile matrix
mat1 <- chipmine::import_profile_from_file(
file = tf_info$matFile[i],
source = "deeptools",
signalName = tf_info$sampleId[i],
selectGenes = geneSet$geneId)
## check the distribution in data
quantile(mat1, c(seq(0, 0.9, by = 0.1), 0.95, 0.99, 0.992, 0.995, 0.999, 0.9999, 1), na.rm = T)
# col_fun <- colorRamp2(quantile(mat1, c(0.50, 0.995), na.rm = T), c("white", "red"))
km <- chipmine::profile_matrix_kmeans(
mat = mat1,
km = clusters,
clustFile = "temp.kmeans.tab",
# clustFile = tf_info$clusterFile[i],
name = tf_info$sampleId[i])
cat(as.character(Sys.time()), "Done...", tf_info$sampleId[i], "\n\n")
}
stopCluster(cl)
|
fe8f6520fa66c7641ae9a62c67f4123cd250e041
|
21ea1c16f10f03487f17dabc1671018b545d1a10
|
/man/theme_hrbrmstr_cabin.Rd
|
e957d0ae065fedf1a0cd27f3f1edd55b04f44739
|
[] |
no_license
|
tpopenfoose/hrbrmisc
|
055cc2b33124592d3e9df79d6fa9df93807331dd
|
c7e6f85c22ae4f0b0f69ea435e7bf217ff8e1e43
|
refs/heads/master
| 2021-01-11T14:36:47.677412
| 2017-01-22T17:32:47
| 2017-01-22T17:32:47
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 297
|
rd
|
theme_hrbrmstr_cabin.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/themes.r
\name{theme_hrbrmstr_cabin}
\alias{theme_hrbrmstr_cabin}
\title{Cabin theme}
\usage{
theme_hrbrmstr_cabin(grid = TRUE, axis = FALSE, ticks = FALSE, ...)
}
\description{
\url{http://www.impallari.com/cabin/}
}
|
a52345db3ac54cb2d986e12af815ea92cdfe7377
|
9f5059f7c029b7ad55bf52cd75431134244c0d16
|
/R/installR.R
|
f36f4771892c0977cf8507ab8399ace89d048e57
|
[
"MIT"
] |
permissive
|
mxfeinberg/electricShine
|
e5a1137aee77707803177f7a1a1537eda7bac324
|
d8ddefa8bc6ad1186ad36a69d4cd72247326699a
|
refs/heads/master
| 2020-07-26T02:52:31.718346
| 2019-09-14T06:30:27
| 2019-09-14T07:54:28
| 208,512,099
| 0
| 0
|
NOASSERTION
| 2019-09-14T22:34:33
| 2019-09-14T22:34:33
| null |
UTF-8
|
R
| false
| false
| 881
|
r
|
installR.R
|
#' Install R from MRAN date into electricShine folder
#'
#' @param exe find exe name
#' @param path path to electricShine directory
#' @param date MRAN date from which to install R
#'
#' @return nothing
#' @export
#'
installR <- function(date,
exe = "R-[0-9.]+.+-win\\.exe",
path){
gluey <- glue::glue("https://cran.microsoft.com/snapshot/{date}/bin/windows/base/")
readCran <- base::readLines(gluey, warn = FALSE)
filename <- stats::na.omit(stringr::str_extract(readCran, exe))[1]
URL <- base::paste(gluey, filename, sep = '')
tmp <- base::file.path(tempdir(), filename)
utils::download.file(URL, tmp, mode = "wb")
to_copy_into <- base::file.path(path, "r_win")
base::dir.create(to_copy_into)
to_copy_into <- base::shQuote(to_copy_into)
base::system(glue::glue("{tmp} /VERYSILENT /DIR={to_copy_into}"))
}
|
aed799502f495eb7aa2a6a5ff655f5cd1531ddba
|
f6d92cc450dfe87c348a3dd9bdb0d9b8f2d5fe0e
|
/R/data.mkl.R
|
95051d08e7e25516a9b2f15f4678b05c7cc071ac
|
[] |
no_license
|
EarlyWarningProject/2015-Statistical-Risk-Assessment
|
21302673679e58380bdab5b1050ff44037581c10
|
320e97b80731ed3b7faf2c19bb719f6e00f06692
|
refs/heads/master
| 2021-01-19T08:07:41.978644
| 2017-02-15T21:13:23
| 2017-02-15T21:13:23
| 82,070,084
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 44,257
|
r
|
data.mkl.R
|
# MASS KILLING EPISODE DATA MAKER
# Jay Ulfelder
# 2015-03-20
# Clear workspace
rm(list=ls(all=TRUE))
# Get working directory
wd <- getwd()
# Load required functions
source(paste0(wd, "/r/f.countryyearrackit.r"))
source(paste0(wd, "/r/f.pitfcodeit.r"))
# Create rack of names and years, starting in 1945 and ending in 2014, with PITF codes
rack <- pitfcodeit(countryyearrackit(1945, 2014), "country")
# Create columns for new variables with zeros
rack$mkl.start <- 0 # Onset of any new episodes during year
rack$mkl.end <- 0 # End of any mkl.ongoing episodes during year
rack$mkl.ongoing <- 0 # Any mkl.ongoing episodes during year
rack$mkl.type <- 0 # Context in which new episode started: 1=civil war, 2=uprising, 3=repression, 4 other
rack$mkl.ever <- 0 # Any episodes in the country since 1945
# Bulgaria, 1944-1956 (repression)
rack$mkl.start[rack$country=="Bulgaria" & rack$year==1944] <- 1
rack$mkl.end[rack$country=="Bulgaria" & rack$year==1956] <- 1
rack$mkl.ongoing[rack$country=="Bulgaria" & rack$year>=1944 & rack$year<=1956] <- 1
rack$mkl.type[rack$country=="Bulgaria" & rack$year==1944] <- 3
rack$mkl.ever[rack$country=="Bulgaria" & rack$year>=1944] <- 1
# Albania, 1944-1985 (repression)
rack$mkl.start[rack$country=="Albania" & rack$year==1944] <- 1
rack$mkl.end[rack$country=="Albania" & rack$year==1985] <- 1
rack$mkl.ongoing[rack$country=="Albania" & rack$year>=1944 & rack$year<=1985] <- 1
rack$mkl.type[rack$country=="Albania" & rack$year==1944] <- 3
rack$mkl.ever[rack$country=="Albania" & rack$year>=1944] <- 1
# Poland, 1945-1956 (repression)
rack$mkl.start[rack$country=="Poland" & rack$year==1945] <- 1
rack$mkl.end[rack$country=="Poland" & rack$year==1956] <- 1
rack$mkl.ongoing[rack$country=="Poland" & rack$year>=1945 & rack$year<=1956] <- 1
rack$mkl.type[rack$country=="Poland" & rack$year==1945] <- 3
rack$mkl.ever[rack$country=="Poland" & rack$year>=1945] <- 1
# Poland, 1945-1948 (expulsion of Germans)
# Subsumed under preceding case
# Poland, 1945-1946 (Ukranian nationalists)
# Subsumed under preceding case
# Hungary, 1945-1960 (repression)
rack$mkl.start[rack$country=="Hungary" & rack$year==1945] <- 1
rack$mkl.end[rack$country=="Hungary" & rack$year==1960] <- 1
rack$mkl.ongoing[rack$country=="Hungary" & rack$year>=1945 & rack$year<=1960] <- 1
rack$mkl.type[rack$country=="Hungary" & rack$year==1945] <- 3
rack$mkl.ever[rack$country=="Hungary" & rack$year>=1945] <- 1
# Yugoslavia, 1945-1956 (repression)
rack$mkl.start[rack$country=="Yugoslavia" & rack$year==1945] <- 1
rack$mkl.end[rack$country=="Yugoslavia" & rack$year==1956] <- 1
rack$mkl.ongoing[rack$country=="Yugoslavia" & rack$year>=1945 & rack$year<=1956] <- 1
rack$mkl.type[rack$country=="Yugoslavia" & rack$year==1945] <- 3
rack$mkl.ever[rack$country=="Yugoslavia" & rack$year>=1945] <- 1
# Yugoslavia, 1945-1948 (expulsion of Germans)
# Subsumed under preceding case
# Romania, 1945-1989 (repression)
rack$mkl.start[rack$country=="Romania" & rack$year==1945] <- 1
rack$mkl.end[rack$country=="Romania" & rack$year==1989] <- 1
rack$mkl.ongoing[rack$country=="Romania" & rack$year>=1945 & rack$year<=1989] <- 1
rack$mkl.type[rack$country=="Romania" & rack$year==1945] <- 3
rack$mkl.ever[rack$country=="Romania" & rack$year>=1945] <- 1
# Czechoslovakia, 1945-1946 (expulsion of Germans)
rack$mkl.start[rack$country=="Czechoslovakia" & rack$year==1945] <- 1
rack$mkl.end[rack$country=="Czechoslovakia" & rack$year==1946] <- 1
rack$mkl.ongoing[rack$country=="Czechoslovakia" & rack$year>=1945 & rack$year<=1946] <- 1
rack$mkl.type[rack$country=="Czechoslovakia" & rack$year==1945] <- 4
rack$mkl.ever[rack$country=="Czechoslovakia" & rack$year>=1945] <- 1
# Czechoslovakia, 1948-1963 (repression)
rack$mkl.start[rack$country=="Czechoslovakia" & rack$year==1948] <- 1
rack$mkl.end[rack$country=="Czechoslovakia" & rack$year==1963] <- 1
rack$mkl.ongoing[rack$country=="Czechoslovakia" & rack$year>=1948 & rack$year<=1963] <- 1
rack$mkl.type[rack$country=="Czechoslovakia" & rack$year==1948] <- 3
# Philippines, 1946-1954 (Huks)
rack$mkl.start[rack$country=="Philippines" & rack$year==1946] <- 1
rack$mkl.end[rack$country=="Philippines" & rack$year==1954] <- 1
rack$mkl.ongoing[rack$country=="Philippines" & rack$year>=1946 & rack$year<=1954] <- 1
rack$mkl.type[rack$country=="Philippines" & rack$year==1946] <- 1
rack$mkl.ever[rack$country=="Philippines" & rack$year>=1946] <- 1
# Philippines, 1969-2010 (New Peoples Army)
rack$mkl.start[rack$country=="Philippines" & rack$year==1969] <- 1
rack$mkl.end[rack$country=="Philippines" & rack$year==2010] <- 1
rack$mkl.ongoing[rack$country=="Philippines" & rack$year>=1969 & rack$year<=2010] <- 1
rack$mkl.type[rack$country=="Philippines" & rack$year==1969] <- 1
# Philippines, 1972-1986 (Moro Liberation)
rack$mkl.start[rack$country=="Philippines" & rack$year==1972] <- 1
rack$mkl.end[rack$country=="Philippines" & rack$year==1986] <- 1
rack$mkl.ongoing[rack$country=="Philippines" & rack$year>=1972 & rack$year<=1986] <- 1
rack$mkl.type[rack$country=="Philippines" & rack$year==1972] <- 1
# China-Taiwan, 1947
rack$mkl.start[rack$country=="China" & rack$year==1947] <- 1
rack$mkl.end[rack$country=="China" & rack$year==1947] <- 1
rack$mkl.ongoing[rack$country=="China" & rack$year==1947] <- 1
rack$mkl.type[rack$country=="China" & rack$year==1947] <- 2
rack$mkl.ever[rack$country=="China" & rack$year>=1947] <- 1
# China, 1949-1977 (Communist)
rack$mkl.start[rack$country=="China" & rack$year==1949] <- 1
rack$mkl.end[rack$country=="China" & rack$year==1977] <- 1
rack$mkl.ongoing[rack$country=="China" & rack$year>=1949 & rack$year<=1977] <- 1
rack$mkl.type[rack$country=="China" & rack$year==1949] <- 3
# China, 1954-1977 (Tibet)
rack$mkl.start[rack$country=="China" & rack$year==1954] <- 1
rack$mkl.end[rack$country=="China" & rack$year==1977] <- 1
rack$mkl.ongoing[rack$country=="China" & rack$year>=1954 & rack$year<=1977] <- 1
rack$mkl.type[rack$country=="China" & rack$year==1954] <- 1
# Myanmar, 1948- (ethnic separatists)
rack$mkl.start[rack$country=="Myanmar" & rack$year==1948] <- 1
rack$mkl.ongoing[rack$country=="Myanmar" & rack$year>=1948] <- 1
rack$mkl.type[rack$country=="Myanmar" & rack$year==1948] <- 1
rack$mkl.ever[rack$country=="Myanmar" & rack$year>=1948] <- 1
# Myanmar, 1948-1990 (Communist insurgency/repression)
# Subsumed under preceding case
# Korea, 1948-1950 (civil violence in south - Cheju and Yosu)
rack$mkl.start[rack$country=="South Korea" & rack$year==1948] <- 1
rack$mkl.end[rack$country=="South Korea" & rack$year==1950] <- 1
rack$mkl.ongoing[rack$country=="South Korea" & rack$year>=1948 & rack$year<=1950] <- 1
rack$mkl.type[rack$country=="South Korea" & rack$year==1948] <- 1
rack$mkl.ever[rack$country=="South Korea" & rack$year>=1948] <- 1
# North Korea, 1948- (political repression)
rack$mkl.start[rack$country=="North Korea" & rack$year==1948] <- 1
rack$mkl.ongoing[rack$country=="North Korea" & rack$year>=1948] <- 1
rack$mkl.type[rack$country=="North Korea" & rack$year==1948] <- 3
rack$mkl.ever[rack$country=="North Korea" & rack$year>=1948] <- 1
# Guatemala, 1954-1996 (civil war and repression)
rack$mkl.start[rack$country=="Guatemala" & rack$year==1954] <- 1
rack$mkl.end[rack$country=="Guatemala" & rack$year==1996] <- 1
rack$mkl.ongoing[rack$country=="Guatemala" & rack$year>=1954 & rack$year<=1996] <- 1
rack$mkl.type[rack$country=="Guatemala" & rack$year==1954] <- 1
rack$mkl.ever[rack$country=="Guatemala" & rack$year>=1954] <- 1
# North Vietnam, 1954-1957 (suspected political opponents)
rack$mkl.start[rack$country=="North Vietnam" & rack$year==1954] <- 1
rack$mkl.end[rack$country=="North Vietnam" & rack$year==1957] <- 1
rack$mkl.ongoing[rack$country=="North Vietnam" & rack$year>=1954 & rack$year<=1957] <- 1
rack$mkl.type[rack$country=="North Vietnam" & rack$year==1954] <- 3
rack$mkl.ever[rack$country=="North Vietnam" & rack$year>=1954] <- 1
# South Vietnam, 1954-1975 (alleged Viet Cong supporters)
rack$mkl.start[rack$country=="South Vietnam" & rack$year==1954] <- 1
rack$mkl.end[rack$country=="South Vietnam" & rack$year==1975] <- 1
rack$mkl.ongoing[rack$country=="South Vietnam" & rack$year>=1954 & rack$year<=1975] <- 1
rack$mkl.type[rack$country=="South Vietnam" & rack$year==1954] <- 1
rack$mkl.ever[rack$country=="South Vietnam" & rack$year>=1954] <- 1
# Vietnam, 1975-1986 (post-war repression)
rack$mkl.start[rack$country=="Vietnam" & rack$year==1975] <- 1
rack$mkl.end[rack$country=="Vietnam" & rack$year==1986] <- 1
rack$mkl.ongoing[rack$country=="Vietnam" & rack$year>=1975 & rack$year<=1986] <- 1
rack$mkl.type[rack$country=="Vietnam" & rack$year==1975] <- 1
rack$mkl.ever[rack$country=="Vietnam" & rack$year>=1975] <- 1
# Sudan, 1955-1972 (civil war)
rack$mkl.start[rack$country=="Sudan" & rack$year==1955] <- 1
rack$mkl.end[rack$country=="Sudan" & rack$year==1972] <- 1
rack$mkl.ongoing[rack$country=="Sudan" & rack$year>=1955 & rack$year<=1972] <- 1
rack$mkl.type[rack$country=="Sudan" & rack$year==1955] <- 1
rack$mkl.ever[rack$country=="Sudan" & rack$year>=1955] <- 1
# Sudan, 1983-2005 (secessionist non-Muslim southerners and Nuba)
rack$mkl.start[rack$country=="Sudan" & rack$year==1983] <- 1
rack$mkl.end[rack$country=="Sudan" & rack$year==2005] <- 1
rack$mkl.ongoing[rack$country=="Sudan" & rack$year>=1983 & rack$year<=2005] <- 1
rack$mkl.type[rack$country=="Sudan" & rack$year==1983] <- 1
# Haiti, 1958-1986 (Duvalier-repression)
rack$mkl.start[rack$country=="Haiti" & rack$year==1958] <- 1
rack$mkl.end[rack$country=="Haiti" & rack$year==1986] <- 1
rack$mkl.ongoing[rack$country=="Haiti" & rack$year>=1958 & rack$year<=1986] <- 1
rack$mkl.type[rack$country=="Haiti" & rack$year==1958] <- 3
rack$mkl.ever[rack$country=="Haiti" & rack$year>=1958] <- 1
# Cuba, 1959-1970 (Castro-repression)
rack$mkl.start[rack$country=="Cuba" & rack$year==1959] <- 1
rack$mkl.end[rack$country=="Cuba" & rack$year==1970] <- 1
rack$mkl.ongoing[rack$country=="Cuba" & rack$year>=1959 & rack$year<=1970] <- 1
rack$mkl.type[rack$country=="Cuba" & rack$year==1959] <- 3
rack$mkl.ever[rack$country=="Cuba" & rack$year>=1959] <- 1
# Iraq, 1959 (Mosul uprising)
rack$mkl.start[rack$country=="Iraq" & rack$year==1959] <- 1
rack$mkl.end[rack$country=="Iraq" & rack$year==1959] <- 1
rack$mkl.ongoing[rack$country=="Iraq" & rack$year==1959] <- 1
rack$mkl.type[rack$country=="Iraq" & rack$year==1959] <- 1
rack$mkl.ever[rack$country=="Iraq" & rack$year>=1959] <- 1
# Iraq, 1961-1991 (Kurds)
rack$mkl.start[rack$country=="Iraq" & rack$year==1961] <- 1
rack$mkl.end[rack$country=="Iraq" & rack$year==1991] <- 1
rack$mkl.ongoing[rack$country=="Iraq" & rack$year>=1961 & rack$year<=1991] <- 1
rack$mkl.type[rack$country=="Iraq" & rack$year==1961] <- 1
# Iraq, 1963-2003 (Saddam-repression)
rack$mkl.start[rack$country=="Iraq" & rack$year==1963] <- 1
rack$mkl.end[rack$country=="Iraq" & rack$year==2003] <- 1
rack$mkl.ongoing[rack$country=="Iraq" & rack$year>=1963 & rack$year<=2003] <- 1
rack$mkl.type[rack$country=="Iraq" & rack$year==1963] <- 3
# Guinea, 1960-1980 (Sekou Toure-repression)
rack$mkl.start[rack$country=="Guinea" & rack$year==1960] <- 1
rack$mkl.end[rack$country=="Guinea" & rack$year==1980] <- 1
rack$mkl.ongoing[rack$country=="Guinea" & rack$year>=1960 & rack$year<=1980] <- 1
rack$mkl.type[rack$country=="Guinea" & rack$year==1960] <- 3
rack$mkl.ever[rack$country=="Guinea" & rack$year>=1960] <- 1
# Laos, 1959-1973 (Communists-civil war)
rack$mkl.start[rack$country=="Laos" & rack$year==1959] <- 1
rack$mkl.end[rack$country=="Laos" & rack$year==1973] <- 1
rack$mkl.ongoing[rack$country=="Laos" & rack$year>=1959 & rack$year<=1973] <- 1
rack$mkl.type[rack$country=="Laos" & rack$year==1959] <- 1
rack$mkl.ever[rack$country=="Laos" & rack$year>=1959] <- 1
# Laos, 1975-1991 (Communist repression/Hmong civil war)
rack$mkl.start[rack$country=="Laos" & rack$year==1975] <- 1
rack$mkl.end[rack$country=="Laos" & rack$year==1991] <- 1
rack$mkl.ongoing[rack$country=="Laos" & rack$year>=1975 & rack$year<=1991] <- 1
rack$mkl.type[rack$country=="Laos" & rack$year==1975] <- 1
# Congo, 1960-1963 (Kasai)
rack$mkl.start[rack$country=="Congo-Kinshasa" & rack$year==1960] <- 1
rack$mkl.end[rack$country=="Congo-Kinshasa" & rack$year==1963] <- 1
rack$mkl.ongoing[rack$country=="Congo-Kinshasa" & rack$year>=1960 & rack$year<=1963] <- 1
rack$mkl.type[rack$country=="Congo-Kinshasa" & rack$year==1960] <- 1
rack$mkl.ever[rack$country=="Congo-Kinshasa" & rack$year>=1960] <- 1
# Congo, 1964-1965 (CNL-Simbas)
rack$mkl.start[rack$country=="Congo-Kinshasa" & rack$year==1964] <- 1
rack$mkl.end[rack$country=="Congo-Kinshasa" & rack$year==1965] <- 1
rack$mkl.ongoing[rack$country=="Congo-Kinshasa" & rack$year>=1964 & rack$year<=1965] <- 1
rack$mkl.type[rack$country=="Congo-Kinshasa" & rack$year==1964] <- 1
# Ethiopia, 1961-1991 (Eritrea-civil war)
rack$mkl.start[rack$country=="Ethiopia" & rack$year==1961] <- 1
rack$mkl.end[rack$country=="Ethiopia" & rack$year==1991] <- 1
rack$mkl.ongoing[rack$country=="Ethiopia" & rack$year>=1961 & rack$year<=1991] <- 1
rack$mkl.type[rack$country=="Ethiopia" & rack$year==1961] <- 1
rack$mkl.ever[rack$country=="Ethiopia" & rack$year>=1961] <- 1
# Ethiopia, 1974-1991 (political repression by Dergue-Tigre civil war)
rack$mkl.start[rack$country=="Ethiopia" & rack$year==1974] <- 1
rack$mkl.end[rack$country=="Ethiopia" & rack$year==1991] <- 1
rack$mkl.ongoing[rack$country=="Ethiopia" & rack$year>=1974 & rack$year<=1991] <- 1
rack$mkl.type[rack$country=="Ethiopia" & rack$year==1974] <- 3
# Ethiopia, 1977-1985 (Ogaden)
rack$mkl.start[rack$country=="Ethiopia" & rack$year==1977] <- 1
rack$mkl.end[rack$country=="Ethiopia" & rack$year==1985] <- 1
rack$mkl.ongoing[rack$country=="Ethiopia" & rack$year>=1977 & rack$year<=1985] <- 1
rack$mkl.type[rack$country=="Ethiopia" & rack$year==1977] <- 1
# Rwanda, 1963-1967
rack$mkl.start[rack$country=="Rwanda" & rack$year==1963] <- 1
rack$mkl.end[rack$country=="Rwanda" & rack$year==1967] <- 1
rack$mkl.ongoing[rack$country=="Rwanda" & rack$year>=1963 & rack$year<=1967] <- 1
rack$mkl.type[rack$country=="Rwanda" & rack$year==1963] <- 1
rack$mkl.ever[rack$country=="Rwanda" & rack$year>=1963] <- 1
# Algeria, 1962 (post-independence retribution)
rack$mkl.start[rack$country=="Algeria" & rack$year==1962] <- 1
rack$mkl.end[rack$country=="Algeria" & rack$year==1962] <- 1
rack$mkl.ongoing[rack$country=="Algeria" & rack$year==1962] <- 1
rack$mkl.type[rack$country=="Algeria" & rack$year==1962] <- 1
rack$mkl.ever[rack$country=="Algeria" & rack$year>=1962] <- 1
# Yemen, 1962-1970
rack$mkl.start[rack$country=="North Yemen" & rack$year==1962] <- 1
rack$mkl.end[rack$country=="North Yemen" & rack$year==1970] <- 1
rack$mkl.ongoing[rack$country=="North Yemen" & rack$year>=1962 & rack$year<=1970] <- 1
rack$mkl.type[rack$country=="North Yemen" & rack$year==1962] <- 1
rack$mkl.ever[rack$country=="North Yemen" & rack$year>=1962] <- 1
# Zanzibar, 1964 (political repression)
rack$mkl.start[rack$country=="Tanzania" & rack$year==1964] <- 1
rack$mkl.end[rack$country=="Tanzania" & rack$year==1964] <- 1
rack$mkl.ongoing[rack$country=="Tanzania" & rack$year==1964] <- 1
rack$mkl.type[rack$country=="Tanzania" & rack$year==1964] <- 3
rack$mkl.ever[rack$country=="Tanzania" & rack$year>=1964] <- 1
# Malawi, 1964-1994 (political repression)
rack$mkl.start[rack$country=="Malawi" & rack$year==1964] <- 1
rack$mkl.end[rack$country=="Malawi" & rack$year==1994] <- 1
rack$mkl.ongoing[rack$country=="Malawi" & rack$year>=1964 & rack$year<=1994] <- 1
rack$mkl.type[rack$country=="Malawi" & rack$year==1964] <- 3
rack$mkl.ever[rack$country=="Malawi" & rack$year>=1964] <- 1
# Colombia, 1948-1958 (la Violencia) [JU: Is this state-sponsored?]
rack$mkl.start[rack$country=="Colombia" & rack$year==1948] <- 1
rack$mkl.end[rack$country=="Colombia" & rack$year==1958] <- 1
rack$mkl.ongoing[rack$country=="Colombia" & rack$year>=1948 & rack$year<=1958] <- 1
rack$mkl.type[rack$country=="Colombia" & rack$year==1948] <- 1
rack$mkl.ever[rack$country=="Colombia" & rack$year>=1948] <- 1
# Colombia, 1965-2010 (FARC, ELN, etc.)
rack$mkl.start[rack$country=="Colombia" & rack$year==1965] <- 1
rack$mkl.end[rack$country=="Colombia" & rack$year==2010] <- 1
rack$mkl.ongoing[rack$country=="Colombia" & rack$year>=1965 & rack$year<=2010] <- 1
rack$mkl.type[rack$country=="Colombia" & rack$year==1965] <- 1
# Dominican Republic, 1965-1978 (civil war)
rack$mkl.start[rack$country=="Dominican Republic" & rack$year==1965] <- 1
rack$mkl.end[rack$country=="Dominican Republic" & rack$year==1978] <- 1
rack$mkl.ongoing[rack$country=="Dominican Republic" & rack$year>=1965 & rack$year<=1978] <- 1
rack$mkl.type[rack$country=="Dominican Republic" & rack$year==1965] <- 1
rack$mkl.ever[rack$country=="Dominican Republic" & rack$year>=1965] <- 1
# Indonesia, 1949-1962 (Darul Islam)
rack$mkl.start[rack$country=="Indonesia" & rack$year==1949] <- 1
rack$mkl.end[rack$country=="Indonesia" & rack$year==1962] <- 1
rack$mkl.ongoing[rack$country=="Indonesia" & rack$year>=1949 & rack$year<=1962] <- 1
rack$mkl.type[rack$country=="Indonesia" & rack$year==1949] <- 1
rack$mkl.ever[rack$country=="Indonesia" & rack$year>=1949] <- 1
# Indonesia, 1965-1966 (anti-Communist massacres)
rack$mkl.start[rack$country=="Indonesia" & rack$year==1965] <- 1
rack$mkl.end[rack$country=="Indonesia" & rack$year==1966] <- 1
rack$mkl.ongoing[rack$country=="Indonesia" & rack$year>=1965 & rack$year<=1966] <- 1
rack$mkl.type[rack$country=="Indonesia" & rack$year==1965] <- 3
# Indonesia, 1969-2007 (West Papua)
rack$mkl.start[rack$country=="Indonesia" & rack$year==1969] <- 1
rack$mkl.end[rack$country=="Indonesia" & rack$year==2007] <- 1
rack$mkl.ongoing[rack$country=="Indonesia" & rack$year>=1969 & rack$year<=2007] <- 1
rack$mkl.type[rack$country=="Indonesia" & rack$year==1969] <- 1
# Indonesia, 1975-1999 (East Timor)
rack$mkl.start[rack$country=="Indonesia" & rack$year==1975] <- 1
rack$mkl.end[rack$country=="Indonesia" & rack$year==1999] <- 1
rack$mkl.ongoing[rack$country=="Indonesia" & rack$year>=1975 & rack$year<=1999] <- 1
rack$mkl.type[rack$country=="Indonesia" & rack$year==1975] <- 1
# Burundi, 1965-1973
rack$mkl.start[rack$country=="Burundi" & rack$year==1965] <- 1
rack$mkl.end[rack$country=="Burundi" & rack$year==1973] <- 1
rack$mkl.ongoing[rack$country=="Burundi" & rack$year>=1965 & rack$year<=1973] <- 1
rack$mkl.type[rack$country=="Burundi" & rack$year==1965] <- 1
rack$mkl.ever[rack$country=="Burundi" & rack$year>=1965] <- 1
# Cambodia, 1967-1975
rack$mkl.start[rack$country=="Cambodia" & rack$year==1967] <- 1
rack$mkl.end[rack$country=="Cambodia" & rack$year==1975] <- 1
rack$mkl.ongoing[rack$country=="Cambodia" & rack$year>=1967 & rack$year<=1975] <- 1
rack$mkl.type[rack$country=="Cambodia" & rack$year==1967] <- 1
rack$mkl.ever[rack$country=="Cambodia" & rack$year>=1967] <- 1
# Cambodia, 1975-1979 (Khmer Rouge)
rack$mkl.start[rack$country=="Cambodia" & rack$year==1975] <- 1
rack$mkl.end[rack$country=="Cambodia" & rack$year==1979] <- 1
rack$mkl.ongoing[rack$country=="Cambodia" & rack$year>=1975 & rack$year<=1979] <- 1
rack$mkl.type[rack$country=="Cambodia" & rack$year==1975] <- 3
# Nigeria, 1967-1970 (Biafra)
rack$mkl.start[rack$country=="Nigeria" & rack$year==1967] <- 1
rack$mkl.end[rack$country=="Nigeria" & rack$year==1970] <- 1
rack$mkl.ongoing[rack$country=="Nigeria" & rack$year>=1967 & rack$year<=1970] <- 1
rack$mkl.type[rack$country=="Nigeria" & rack$year==1967] <- 1
rack$mkl.ever[rack$country=="Nigeria" & rack$year>=1967] <- 1
# Nigeria, 1980 (Kano)
rack$mkl.start[rack$country=="Nigeria" & rack$year==1980] <- 1
rack$mkl.end[rack$country=="Nigeria" & rack$year==1980] <- 1
rack$mkl.ongoing[rack$country=="Nigeria" & rack$year==1980] <- 1
rack$mkl.type[rack$country=="Nigeria" & rack$year==1980] <- 1
# Equatorial Guinea, 1969-1979
rack$mkl.start[rack$country=="Equatorial Guinea" & rack$year==1969] <- 1
rack$mkl.end[rack$country=="Equatorial Guinea" & rack$year==1979] <- 1
rack$mkl.ongoing[rack$country=="Equatorial Guinea" & rack$year>=1969 & rack$year<=1979] <- 1
rack$mkl.type[rack$country=="Equatorial Guinea" & rack$year==1969] <- 3
rack$mkl.ever[rack$country=="Equatorial Guinea" & rack$year>=1969] <- 1
# Jordan, 1970-1971 (PLO) [Black September]
rack$mkl.start[rack$country=="Jordan" & rack$year==1970] <- 1
rack$mkl.end[rack$country=="Jordan" & rack$year==1971] <- 1
rack$mkl.ongoing[rack$country=="Jordan" & rack$year>=1970 & rack$year<=1971] <- 1
rack$mkl.type[rack$country=="Jordan" & rack$year==1970] <- 1
rack$mkl.ever[rack$country=="Jordan" & rack$year>=1970] <- 1
# Uganda, 1971-1979 (political opponents of Amin)
rack$mkl.start[rack$country=="Uganda" & rack$year==1971] <- 1
rack$mkl.end[rack$country=="Uganda" & rack$year==1979] <- 1
rack$mkl.ongoing[rack$country=="Uganda" & rack$year>=1971 & rack$year<=1979] <- 1
rack$mkl.type[rack$country=="Uganda" & rack$year==1971] <- 3
rack$mkl.ever[rack$country=="Uganda" & rack$year>=1971] <- 1
# Uganda, 1981-1986 (political and tribal rivals of Obote)
rack$mkl.start[rack$country=="Uganda" & rack$year==1981] <- 1
rack$mkl.end[rack$country=="Uganda" & rack$year==1986] <- 1
rack$mkl.ongoing[rack$country=="Uganda" & rack$year>=1981 & rack$year<=1986] <- 1
rack$mkl.type[rack$country=="Uganda" & rack$year==1981] <- 1
rack$mkl.ever[rack$country=="Uganda" & rack$year>=1981] <- 1
#Pakistan, 1971 (Bangladesh)
rack$mkl.start[rack$country=="Pakistan" & rack$year==1971] <- 1
rack$mkl.end[rack$country=="Pakistan" & rack$year==1971] <- 1
rack$mkl.ongoing[rack$country=="Pakistan" & rack$year==1971] <- 1
rack$mkl.type[rack$country=="Pakistan" & rack$year==1971] <- 1
rack$mkl.ever[rack$country=="Pakistan" & rack$year>=1971] <- 1
#Pakistan, 1973-1977 (Baluchistan)
rack$mkl.start[rack$country=="Pakistan" & rack$year==1973] <- 1
rack$mkl.end[rack$country=="Pakistan" & rack$year==1977] <- 1
rack$mkl.ongoing[rack$country=="Pakistan" & rack$year>=1973 & rack$year<=1977] <- 1
rack$mkl.type[rack$country=="Pakistan" & rack$year==1973] <- 1
#Sri Lanka, 1971 (JVP)
rack$mkl.start[rack$country=="Sri Lanka" & rack$year==1971] <- 1
rack$mkl.end[rack$country=="Sri Lanka" & rack$year==1971] <- 1
rack$mkl.ongoing[rack$country=="Sri Lanka" & rack$year==1971] <- 1
rack$mkl.type[rack$country=="Sri Lanka" & rack$year==1971] <- 1
rack$mkl.ever[rack$country=="Sri Lanka" & rack$year>=1971] <- 1
# Sri Lanka, 1983-2002 (Tamil)
rack$mkl.start[rack$country=="Sri Lanka" & rack$year==1983] <- 1
rack$mkl.end[rack$country=="Sri Lanka" & rack$year==2002] <- 1
rack$mkl.ongoing[rack$country=="Sri Lanka" & rack$year>=1983 & rack$year<=2002] <- 1
rack$mkl.type[rack$country=="Sri Lanka" & rack$year==1983] <- 1
# Zimbabwe, 1972-1979 (civil war)
rack$mkl.start[rack$country=="Zimbabwe" & rack$year==1972] <- 1
rack$mkl.end[rack$country=="Zimbabwe" & rack$year==1979] <- 1
rack$mkl.ongoing[rack$country=="Zimbabwe" & rack$year>=1972 & rack$year<=1979] <- 1
rack$mkl.type[rack$country=="Zimbabwe" & rack$year==1972] <- 1
rack$mkl.ever[rack$country=="Zimbabwe" & rack$year>=1972] <- 1
# Zimbabwe, 1982-1987 (civil war)
rack$mkl.start[rack$country=="Zimbabwe" & rack$year==1982] <- 1
rack$mkl.end[rack$country=="Zimbabwe" & rack$year==1987] <- 1
rack$mkl.ongoing[rack$country=="Zimbabwe" & rack$year>=1982 & rack$year<=1987] <- 1
rack$mkl.type[rack$country=="Zimbabwe" & rack$year==1982] <- 1
# Chile, 1973-1978
rack$mkl.start[rack$country=="Chile" & rack$year==1973] <- 1
rack$mkl.end[rack$country=="Chile" & rack$year==1978] <- 1
rack$mkl.ongoing[rack$country=="Chile" & rack$year>=1973 & rack$year<=1978] <- 1
rack$mkl.type[rack$country=="Chile" & rack$year==1973] <- 3
rack$mkl.ever[rack$country=="Chile" & rack$year>=1973] <- 1
# Nicaragua, 1974-1979 (Somoza) [Sandinista insurgency]
rack$mkl.start[rack$country=="Nicaragua" & rack$year==1974] <- 1
rack$mkl.end[rack$country=="Nicaragua" & rack$year==1979] <- 1
rack$mkl.ongoing[rack$country=="Nicaragua" & rack$year>=1974 & rack$year<=1979] <- 1
rack$mkl.type[rack$country=="Nicaragua" & rack$year==1974] <- 1
rack$mkl.ever[rack$country=="Nicaragua" & rack$year>=1974] <- 1
# Nicaragua, 1981-1988 (Contras)
rack$mkl.start[rack$country=="Nicaragua" & rack$year==1981] <- 1
rack$mkl.end[rack$country=="Nicaragua" & rack$year==1988] <- 1
rack$mkl.ongoing[rack$country=="Nicaragua" & rack$year>=1981 & rack$year<=1988] <- 1
rack$mkl.type[rack$country=="Nicaragua" & rack$year==1981] <- 1
# Mozambique, 1975-1992 (RENAMO)
rack$mkl.start[rack$country=="Mozambique" & rack$year==1975] <- 1
rack$mkl.end[rack$country=="Mozambique" & rack$year==1992] <- 1
rack$mkl.ongoing[rack$country=="Mozambique" & rack$year>=1975 & rack$year<=1992] <- 1
rack$mkl.type[rack$country=="Mozambique" & rack$year==1975] <- 1
rack$mkl.ever[rack$country=="Mozambique" & rack$year>=1975] <- 1
# Angola, 1975-2002 (civil war)
rack$mkl.start[rack$country=="Angola" & rack$year==1975] <- 1
rack$mkl.end[rack$country=="Angola" & rack$year==2002] <- 1
rack$mkl.ongoing[rack$country=="Angola" & rack$year>=1975 & rack$year<=2002] <- 1
rack$mkl.type[rack$country=="Angola" & rack$year==1975] <- 1
rack$mkl.ever[rack$country=="Angola" & rack$year>=1975] <- 1
# Argentina, 1976-1983 (political opponents)
rack$mkl.start[rack$country=="Argentina" & rack$year==1976] <- 1
rack$mkl.end[rack$country=="Argentina" & rack$year==1983] <- 1
rack$mkl.ongoing[rack$country=="Argentina" & rack$year>=1976 & rack$year<=1983] <- 1
rack$mkl.type[rack$country=="Argentina" & rack$year==1976] <- 1
rack$mkl.ever[rack$country=="Argentina" & rack$year>=1976] <- 1
# South Africa, 1976-1994
rack$mkl.start[rack$country=="South Africa" & rack$year==1976] <- 1
rack$mkl.end[rack$country=="South Africa" & rack$year==1994] <- 1
rack$mkl.ongoing[rack$country=="South Africa" & rack$year>=1976 & rack$year<=1994] <- 1
rack$mkl.type[rack$country=="South Africa" & rack$year==1976] <- 1
rack$mkl.ever[rack$country=="South Africa" & rack$year>=1976] <- 1
# El Salvador, 1977-1992 [leftist guerrillas & supposed sympathizers]
rack$mkl.start[rack$country=="El Salvador" & rack$year==1977] <- 1
rack$mkl.end[rack$country=="El Salvador" & rack$year==1992] <- 1
rack$mkl.ongoing[rack$country=="El Salvador" & rack$year>=1977 & rack$year<=1992] <- 1
rack$mkl.type[rack$country=="El Salvador" & rack$year==1977] <- 1
rack$mkl.ever[rack$country=="El Salvador" & rack$year>=1977] <- 1
# Iran, 1978-1979 (political repression)
rack$mkl.start[rack$country=="Iran" & rack$year==1978] <- 1
rack$mkl.end[rack$country=="Iran" & rack$year==1979] <- 1
rack$mkl.ongoing[rack$country=="Iran" & rack$year>=1978 & rack$year<=1979] <- 1
rack$mkl.type[rack$country=="Iran" & rack$year==1978] <- 3
rack$mkl.ever[rack$country=="Iran" & rack$year>=1978] <- 1
# Iran, 1979-2010 (post-revolution, Kurds)
rack$mkl.start[rack$country=="Iran" & rack$year==1979] <- 1
rack$mkl.end[rack$country=="Iran" & rack$year==2010] <- 1
rack$mkl.ongoing[rack$country=="Iran" & rack$year>=1979 & rack$year<=2010] <- 1
rack$mkl.type[rack$country=="Iran" & rack$year==1979] <- 3
# Afghanistan, 1978-1992
rack$mkl.start[rack$country=="Afghanistan" & rack$year==1978] <- 1
rack$mkl.end[rack$country=="Afghanistan" & rack$year==1992] <- 1
rack$mkl.ongoing[rack$country=="Afghanistan" & rack$year>=1978 & rack$year<=1992] <- 1
rack$mkl.type[rack$country=="Afghanistan" & rack$year==1978] <- 1
rack$mkl.ever[rack$country=="Afghanistan" & rack$year>=1978] <- 1
# Syria, 1979-1985 (Muslim Brotherhood)
rack$mkl.start[rack$country=="Syria" & rack$year==1979] <- 1
rack$mkl.end[rack$country=="Syria" & rack$year==1985] <- 1
rack$mkl.ongoing[rack$country=="Syria" & rack$year>=1979 & rack$year<=1985] <- 1
rack$mkl.type[rack$country=="Syria" & rack$year==1979] <- 1
rack$mkl.ever[rack$country=="Syria" & rack$year>=1979] <- 1
# Bangladesh, 1980-1997 (Chittagong Hills insurgency)
rack$mkl.start[rack$country=="Bangladesh" & rack$year==1980] <- 1
rack$mkl.end[rack$country=="Bangladesh" & rack$year==1997] <- 1
rack$mkl.ongoing[rack$country=="Bangladesh" & rack$year>=1980 & rack$year<=1997] <- 1
rack$mkl.type[rack$country=="Bangladesh" & rack$year==1980] <- 1
rack$mkl.ever[rack$country=="Bangladesh" & rack$year>=1980] <- 1
# Peru, 1980-1992 (Shining Path)
rack$mkl.start[rack$country=="Peru" & rack$year==1980] <- 1
rack$mkl.end[rack$country=="Peru" & rack$year==1992] <- 1
rack$mkl.ongoing[rack$country=="Peru" & rack$year>=1980 & rack$year<=1992] <- 1
rack$mkl.type[rack$country=="Peru" & rack$year==1980] <- 1
rack$mkl.ever[rack$country=="Peru" & rack$year>=1980] <- 1
# Somalia, 1982-1990 (Barre vs. Issaqs & others)
rack$mkl.start[rack$country=="Somalia" & rack$year==1982] <- 1
rack$mkl.end[rack$country=="Somalia" & rack$year==1990] <- 1
rack$mkl.ongoing[rack$country=="Somalia" & rack$year>=1982 & rack$year<=1990] <- 1
rack$mkl.type[rack$country=="Somalia" & rack$year==1982] <- 1
rack$mkl.ever[rack$country=="Somalia" & rack$year>=1982] <- 1
# Chad, 1982-1990 (political repression/civil war Habre regime)
rack$mkl.start[rack$country=="Chad" & rack$year==1982] <- 1
rack$mkl.end[rack$country=="Chad" & rack$year==1990] <- 1
rack$mkl.ongoing[rack$country=="Chad" & rack$year>=1982 & rack$year<=1990] <- 1
rack$mkl.type[rack$country=="Chad" & rack$year==1982] <- 1
rack$mkl.ever[rack$country=="Chad" & rack$year>=1982] <- 1
# India, 1984-1994 (Punjab-Sikh) [insurgency]
rack$mkl.start[rack$country=="India" & rack$year==1984] <- 1
rack$mkl.end[rack$country=="India" & rack$year==1994] <- 1
rack$mkl.ongoing[rack$country=="India" & rack$year>=1984 & rack$year<=1994] <- 1
rack$mkl.type[rack$country=="India" & rack$year==1984] <- 1
rack$mkl.ever[rack$country=="India" & rack$year>=1984] <- 1
# Turkey, 1984-1999 (Kurds)
rack$mkl.start[rack$country=="Turkey" & rack$year==1984] <- 1
rack$mkl.end[rack$country=="Turkey" & rack$year==1999] <- 1
rack$mkl.ongoing[rack$country=="Turkey" & rack$year>=1984 & rack$year<=1999] <- 1
rack$mkl.type[rack$country=="Turkey" & rack$year==1984] <- 1
rack$mkl.ever[rack$country=="Turkey" & rack$year>=1984] <- 1
# South Yemen, 1986
rack$mkl.start[rack$country=="South Yemen" & rack$year==1986] <- 1
rack$mkl.end[rack$country=="South Yemen" & rack$year==1986] <- 1
rack$mkl.ongoing[rack$country=="South Yemen" & rack$year==1986] <- 1
rack$mkl.type[rack$country=="South Yemen" & rack$year==1986] <- 1
rack$mkl.ever[rack$country=="South Yemen" & rack$year>=1986] <- 1
# Uganda, 1986-2006 (LRA)
rack$mkl.start[rack$country=="Uganda" & rack$year==1986] <- 1
rack$mkl.end[rack$country=="Uganda" & rack$year==2006] <- 1
rack$mkl.ongoing[rack$country=="Uganda" & rack$year>=1986 & rack$year<=2006] <- 1
rack$mkl.type[rack$country=="Uganda" & rack$year==1986] <- 1
# Burma, 1988 (political repression)
rack$mkl.start[rack$country=="Myanmar" & rack$year==1986] <- 1
rack$mkl.end[rack$country=="Myanmar" & rack$year==1986] <- 1
rack$mkl.ongoing[rack$country=="Myanmar" & rack$year==1986] <- 1
rack$mkl.type[rack$country=="Myanmar" & rack$year==1986] <- 2
# Burundi, 1988-2005
rack$mkl.start[rack$country=="Burundi" & rack$year==1988] <- 1
rack$mkl.end[rack$country=="Burundi" & rack$year==2005] <- 1
rack$mkl.ongoing[rack$country=="Burundi" & rack$year>=1988 & rack$year<=2005] <- 1
rack$mkl.type[rack$country=="Burundi" & rack$year==1988] <- 1
# Papua New Guinea, 1988-1998 (Bouganville)
rack$mkl.start[rack$country=="Papua New Guinea" & rack$year==1988] <- 1
rack$mkl.end[rack$country=="Papua New Guinea" & rack$year==1998] <- 1
rack$mkl.ongoing[rack$country=="Papua New Guinea" & rack$year>=1988 & rack$year<=1998] <- 1
rack$mkl.type[rack$country=="Papua New Guinea" & rack$year==1988] <- 1
rack$mkl.ever[rack$country=="Papua New Guinea" & rack$year>=1988] <- 1
# Indonesia, 1989-2005 (Aceh)
rack$mkl.start[rack$country=="Indonesia" & rack$year==1989] <- 1
rack$mkl.end[rack$country=="Indonesia" & rack$year==2005] <- 1
rack$mkl.ongoing[rack$country=="Indonesia" & rack$year>=1989 & rack$year<=2005] <- 1
rack$mkl.type[rack$country=="Indonesia" & rack$year==1989] <- 1
# Sri Lanka, 1989-1992 (JVP2)
rack$mkl.start[rack$country=="Sri Lanka" & rack$year==1989] <- 1
rack$mkl.end[rack$country=="Sri Lanka" & rack$year==1992] <- 1
rack$mkl.ongoing[rack$country=="Sri Lanka" & rack$year>=1989 & rack$year<=1992] <- 1
rack$mkl.type[rack$country=="Sri Lanka" & rack$year==1992] <- 1
# Romania, 1989
rack$mkl.start[rack$country=="Romania" & rack$year==1989] <- 1
rack$mkl.end[rack$country=="Romania" & rack$year==1989] <- 1
rack$mkl.ongoing[rack$country=="Romania" & rack$year==1989] <- 1
rack$mkl.type[rack$country=="Romania" & rack$year==1989] <- 2
# Liberia, 1989-1990 (civil war)
rack$mkl.start[rack$country=="Liberia" & rack$year==1989] <- 1
rack$mkl.end[rack$country=="Liberia" & rack$year==1990] <- 1
rack$mkl.ongoing[rack$country=="Liberia" & rack$year>=1989 & rack$year<=1990] <- 1
rack$mkl.type[rack$country=="Liberia" & rack$year==1989] <- 1
rack$mkl.ever[rack$country=="Liberia" & rack$year>=1989] <- 1
# India, 1990-2011 (Kashmir)
rack$mkl.start[rack$country=="India" & rack$year==1990] <- 1
rack$mkl.end[rack$country=="India" & rack$year==2011] <- 1
rack$mkl.ongoing[rack$country=="India" & rack$year>=1990 & rack$year<=2011] <- 1
rack$mkl.type[rack$country=="India" & rack$year==1990] <- 1
# Rwanda, 1990-1994 (Hutu-Tutsi)
rack$mkl.start[rack$country=="Rwanda" & rack$year==1990] <- 1
rack$mkl.end[rack$country=="Rwanda" & rack$year==1994] <- 1
rack$mkl.ongoing[rack$country=="Rwanda" & rack$year>=1990 & rack$year<=1994] <- 1
rack$mkl.type[rack$country=="Rwanda" & rack$year==1990] <- 1
# Nigeria, 1990-2009 (Niger Delta)
rack$mkl.start[rack$country=="Nigeria" & rack$year==1990] <- 1
rack$mkl.end[rack$country=="Nigeria" & rack$year==2009] <- 1
rack$mkl.ongoing[rack$country=="Nigeria" & rack$year>=1990 & rack$year<=2009] <- 1
rack$mkl.type[rack$country=="Nigeria" & rack$year==1990] <- 1
# India, 1990-1991 (Assam)
rack$mkl.start[rack$country=="India" & rack$year==1990] <- 1
rack$mkl.end[rack$country=="India" & rack$year==1991] <- 1
rack$mkl.ongoing[rack$country=="India" & rack$year>=1990 & rack$year<=1991] <- 1
rack$mkl.type[rack$country=="India" & rack$year==1990] <- 1
# Chad, 1991-2003 (repression/war)
rack$mkl.start[rack$country=="Chad" & rack$year==1991] <- 1
rack$mkl.end[rack$country=="Chad" & rack$year==2003] <- 1
rack$mkl.ongoing[rack$country=="Chad" & rack$year>=1991 & rack$year<=2003] <- 1
rack$mkl.type[rack$country=="Chad" & rack$year==1991] <- 1
# Sierra Leone, 1991-2002
rack$mkl.start[rack$country=="Sierra Leone" & rack$year==1991] <- 1
rack$mkl.end[rack$country=="Sierra Leone" & rack$year==2002] <- 1
rack$mkl.ongoing[rack$country=="Sierra Leone" & rack$year>=1991 & rack$year<=2002] <- 1
rack$mkl.type[rack$country=="Sierra Leone" & rack$year==1991] <- 1
rack$mkl.ever[rack$country=="Sierra Leone" & rack$year>=1991] <- 1
# Iraq, 1991-2003 (Shiites)
rack$mkl.start[rack$country=="Iraq" & rack$year==1991] <- 1
rack$mkl.end[rack$country=="Iraq" & rack$year==2003] <- 1
rack$mkl.ongoing[rack$country=="Iraq" & rack$year>=1991 & rack$year<=2003] <- 1
rack$mkl.type[rack$country=="Iraq" & rack$year==1991] <- 1
# Yugoslavia, 1991-1992 (Croatian civil war)
rack$mkl.start[rack$country=="Yugoslavia" & rack$year==1991] <- 1
rack$mkl.end[rack$country=="Yugoslavia" & rack$year==1992] <- 1
rack$mkl.ongoing[rack$country=="Yugoslavia" & rack$year>=1991 & rack$year<=1992] <- 1
rack$mkl.type[rack$country=="Yugoslavia" & rack$year==1991] <- 1
# Algeria, 1991-2005 (Islamists)
rack$mkl.start[rack$country=="Algeria" & rack$year==1991] <- 1
rack$mkl.end[rack$country=="Algeria" & rack$year==2005] <- 1
rack$mkl.ongoing[rack$country=="Algeria" & rack$year>=1991 & rack$year<=2005] <- 1
rack$mkl.type[rack$country=="Algeria" & rack$year==1991] <- 1
# Azerbaijan, 1991-1994 (Nagorny Karabakh)
rack$mkl.start[rack$country=="Azerbaijan" & rack$year==1991] <- 1
rack$mkl.end[rack$country=="Azerbaijan" & rack$year==1994] <- 1
rack$mkl.ongoing[rack$country=="Azerbaijan" & rack$year>=1991 & rack$year<=1994] <- 1
rack$mkl.type[rack$country=="Azerbaijan" & rack$year==1991] <- 1
rack$mkl.ever[rack$country=="Azerbaijan" & rack$year>=1991] <- 1
# Haiti, 1991-1994 (political repression)
rack$mkl.start[rack$country=="Haiti" & rack$year==1991] <- 1
rack$mkl.end[rack$country=="Haiti" & rack$year==1994] <- 1
rack$mkl.ongoing[rack$country=="Haiti" & rack$year>=1991 & rack$year<=1994] <- 1
rack$mkl.type[rack$country=="Haiti" & rack$year==1991] <- 3
# Bosnia and Herzegovina, 1992-1995 (Bosnia)
rack$mkl.start[rack$country=="Bosnia and Herzegovina" & rack$year==1992] <- 1
rack$mkl.end[rack$country=="Bosnia and Herzegovina" & rack$year==1995] <- 1
rack$mkl.ongoing[rack$country=="Bosnia and Herzegovina" & rack$year>=1992 & rack$year<=1995] <- 1
rack$mkl.type[rack$country=="Bosnia and Herzegovina" & rack$year==1992] <- 1
rack$mkl.ever[rack$country=="Bosnia and Herzegovina" & rack$year>=1992] <- 1
# Afghanistan, 1992-1996 (Rabbani/Massoud v. Taliban et al.)
rack$mkl.start[rack$country=="Afghanistan" & rack$year==1992] <- 1
rack$mkl.end[rack$country=="Afghanistan" & rack$year==1996] <- 1
rack$mkl.ongoing[rack$country=="Afghanistan" & rack$year>=1992 & rack$year<=1996] <- 1
rack$mkl.type[rack$country=="Afghanistan" & rack$year==1992] <- 1
# Tajikistan, 1992-1997 (United Opposition)
rack$mkl.start[rack$country=="Tajikistan" & rack$year==1992] <- 1
rack$mkl.end[rack$country=="Tajikistan" & rack$year==1997] <- 1
rack$mkl.ongoing[rack$country=="Tajikistan" & rack$year>=1992 & rack$year<=1997] <- 1
rack$mkl.type[rack$country=="Tajikistan" & rack$year==1992] <- 1
rack$mkl.ever[rack$country=="Tajikistan" & rack$year==1992] <- 1
# Georgia, 1992-1993 (Abkhazia)
rack$mkl.start[rack$country=="Georgia" & rack$year==1992] <- 1
rack$mkl.end[rack$country=="Georgia" & rack$year==1993] <- 1
rack$mkl.ongoing[rack$country=="Georgia" & rack$year>=1992 & rack$year<=1993] <- 1
rack$mkl.type[rack$country=="Georgia" & rack$year==1992] <- 1
rack$mkl.ever[rack$country=="Georgia" & rack$year>=1992] <- 1
# Rep. of Congo, 1992-1997 (Lissouba regime)
rack$mkl.start[rack$country=="Congo-Brazzaville" & rack$year==1992] <- 1
rack$mkl.end[rack$country=="Congo-Brazzaville" & rack$year==1997] <- 1
rack$mkl.ongoing[rack$country=="Congo-Brazzaville" & rack$year>=1992 & rack$year<=1997] <- 1
rack$mkl.type[rack$country=="Congo-Brazzaville" & rack$year==1992] <- 3
rack$mkl.ever[rack$country=="Congo-Brazzaville" & rack$year>=1992] <- 1
# Congo-Kinshasa, 1993-1997 (Kabila vs. Mobutu)
rack$mkl.start[rack$country=="Congo-Kinshasa" & rack$year==1993] <- 1
rack$mkl.end[rack$country=="Congo-Kinshasa" & rack$year==1997] <- 1
rack$mkl.ongoing[rack$country=="Congo-Kinshasa" & rack$year>=1993 & rack$year<=1997] <- 1
rack$mkl.type[rack$country=="Congo-Kinshasa" & rack$year==1993] <- 1
# Rwanda, 1994-1999 (Tutsi vs. Hutu)
rack$mkl.start[rack$country=="Rwanda" & rack$year==1994] <- 1
rack$mkl.end[rack$country=="Rwanda" & rack$year==1999] <- 1
rack$mkl.ongoing[rack$country=="Rwanda" & rack$year>=1994 & rack$year<=1999] <- 1
rack$mkl.type[rack$country=="Rwanda" & rack$year==1994] <- 1
# Russia, 1994-2009 (Chechnya) [mkl.end date added]
rack$mkl.start[rack$country=="Russia" & rack$year==1994] <- 1
rack$mkl.end[rack$country=="Russia" & rack$year==2009] <- 1
rack$mkl.ongoing[rack$country=="Russia" & rack$year>=1994 & rack$year<=2009] <- 1
rack$mkl.type[rack$country=="Russia" & rack$year==1994] <- 1
rack$mkl.ever[rack$country=="Russia" & rack$year>=1994] <- 1
# Nepal, 1995-2006 (Maoists) [end date added]
rack$mkl.start[rack$country=="Nepal" & rack$year==1995] <- 1
rack$mkl.end[rack$country=="Nepal" & rack$year==2006] <- 1
rack$mkl.ongoing[rack$country=="Nepal" & rack$year>=1995 & rack$year<=2006] <- 1
rack$mkl.type[rack$country=="Nepal" & rack$year==1995] <- 1
rack$mkl.ever[rack$country=="Nepal" & rack$year>=1995] <- 1
# Afghanistan, 1996-2001 (Taliban v. United Front)
rack$mkl.start[rack$country=="Afghanistan" & rack$year==1996] <- 1
rack$mkl.end[rack$country=="Afghanistan" & rack$year==2001] <- 1
rack$mkl.ongoing[rack$country=="Afghanistan" & rack$year>=1996 & rack$year<=2001] <- 1
rack$mkl.type[rack$country=="Afghanistan" & rack$year==1996] <- 1
# Congo-Brazzaville, 1997-2003 (Sassou regime)
rack$mkl.start[rack$country=="Congo-Brazzaville" & rack$year==1997] <- 1
rack$mkl.end[rack$country=="Congo-Brazzaville" & rack$year==2003] <- 1
rack$mkl.ongoing[rack$country=="Congo-Brazzaville" & rack$year>=1997 & rack$year<=2003] <- 1
rack$mkl.type[rack$country=="Congo-Brazzaville" & rack$year==1997] <- 1
# Former Yugoslavia, 1998-1999 (Kosovo)
rack$mkl.start[rack$country=="Serbia and Montenegro" & rack$year==1998] <- 1
rack$mkl.end[rack$country=="Serbia and Montenegro" & rack$year==1999] <- 1
rack$mkl.ongoing[rack$country=="Serbia and Montenegro" & rack$year>=1998 & rack$year<=1999] <- 1
rack$mkl.type[rack$country=="Serbia and Montenegro" & rack$year==1998] <- 1
rack$mkl.ever[rack$country=="Serbia and Montenegro" & rack$year>=1998] <- 1
# Congo-Kinshasa, 1998- (various counter-insurgency)
rack$mkl.start[rack$country=="Congo-Kinshasa" & rack$year==1998] <- 1
rack$mkl.ongoing[rack$country=="Congo-Kinshasa" & rack$year>=1998] <- 1
rack$mkl.type[rack$country=="Congo-Kinshasa" & rack$year==1998] <- 1
# Liberia, 2000-2003 (civil war-LURD & MODEL)
rack$mkl.start[rack$country=="Liberia" & rack$year==2000] <- 1
rack$mkl.end[rack$country=="Liberia" & rack$year==2003] <- 1
rack$mkl.ongoing[rack$country=="Liberia" & rack$year>=2000 & rack$year<=2003] <- 1
rack$mkl.type[rack$country=="Liberia" & rack$year==2000] <- 1
# Sudan, 2003- (Darfur)
rack$mkl.start[rack$country=="Sudan" & rack$year==2003] <- 1
rack$mkl.ongoing[rack$country=="Sudan" & rack$year>=2003] <- 1
rack$mkl.type[rack$country=="Sudan" & rack$year==2003] <- 1
# Sri Lanka, 2009 (northern offensive) [episode added]
rack$mkl.start[rack$country=="Sri Lanka" & rack$year==2009] <- 1
rack$mkl.end[rack$country=="Sri Lanka" & rack$year==2009] <- 1
rack$mkl.ongoing[rack$country=="Sri Lanka" & rack$year==2009] <- 1
rack$mkl.type[rack$country=="Sri Lanka" & rack$year==2009] <- 1
# Ivory Coast, 2010-2011 (suspected supporters of Outtara) [episode added]
rack$mkl.start[rack$country=="Ivory Coast" & rack$year==2010] <- 1
rack$mkl.end[rack$country=="Ivory Coast" & rack$year==2011] <- 1
rack$mkl.ongoing[rack$country=="Ivory Coast" & rack$year>=2010 & rack$year<=2011] <- 1
rack$mkl.type[rack$country=="Ivory Coast" & rack$year==2010] <- 1
rack$mkl.ever[rack$country=="Ivory Coast" & rack$year>=2010] <- 1
# Libya, 2011 (civil war) [episode added]
rack$mkl.start[rack$country=="Libya" & rack$year==2011] <- 1
rack$mkl.end[rack$country=="Libya" & rack$year==2011] <- 1
rack$mkl.ongoing[rack$country=="Libya" & rack$year==2011] <- 1
rack$mkl.type[rack$country=="Libya" & rack$year==2011] <- 1
rack$mkl.ever[rack$country=="Libya" & rack$year>=2011] <- 1
# Syria, 2011- (repression of civil resistance) [episode added]
rack$mkl.start[rack$country=="Syria" & rack$year==2011] <- 1
rack$mkl.ongoing[rack$country=="Syria" & rack$year>=2011] <- 1
rack$mkl.type[rack$country=="Syria" & rack$year==2011] <- 2
# Sudan, 2011- (South Kordofan/Blue Nile) [episode added]
rack$mkl.start[rack$country=="Sudan" & rack$year==2011] <- 1
rack$mkl.ongoing[rack$country=="Sudan" & rack$year>=2011] <- 1
rack$mkl.type[rack$country=="Sudan" & rack$year==2011] <- 1
# Egypt, 2013- (Muslim Brotherhood and other "terrorist" and opposition) [episode added]
rack$mkl.start[rack$country=="Egypt" & rack$year==2013] <- 1
rack$mkl.ongoing[rack$country=="Egypt" & rack$year>=2013] <- 1
rack$mkl.type[rack$country=="Egypt" & rack$year==2011] <- 2
# Nigeria, 2013- (counter-Boko Haram efforts) [episode added]
rack$mkl.start[rack$country=="Nigeria" & rack$year==2013] <- 1
rack$mkl.ongoing[rack$country=="Nigeria" & rack$year>=2013] <- 1
rack$mkl.type[rack$country=="Nigeria" & rack$year==2013] <- 1
# South Sudan, 2013- (Nuer and others) [episode added]
rack$mkl.start[rack$country=="South Sudan" & rack$year==2013] <- 1
rack$mkl.ongoing[rack$country=="South Sudan" & rack$year>=2013] <- 1
rack$mkl.type[rack$country=="South Sudan" & rack$year==2013] <- 1
rack$mkl.ever[rack$country=="South Sudan" & rack$year>=2013] <- 1
# Cut down to ids and new data and order by country name and year.
rack <- subset(rack, select = c(12, 2, 13:17))
rack <- rack[order(rack$sftgcode, rack$year),]
# Write out the file.
write.csv(rack, "data.out/mkl.csv", row.names = FALSE)
|
9fe6f8c474475db33b48fcc254b0280a52372896
|
557dab06d7b9fb268de1e9f6b4af549d2189a03d
|
/man/sentometrics-deprecated.Rd
|
02cde9e6ea4992b0cbc49b39967b237af6fd5608
|
[] |
no_license
|
vanpeltj/sentometrics
|
42072f2e42ae6e0881a733670828fa15b71bf2ae
|
505e45cd4a94a65b2bb3429a2c479025938475eb
|
refs/heads/master
| 2020-06-02T13:23:02.715948
| 2019-06-07T08:12:37
| 2019-06-07T08:12:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 6,004
|
rd
|
sentometrics-deprecated.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/deprecated.R
\name{sentometrics-deprecated}
\alias{sentometrics-deprecated}
\alias{fill_measures}
\alias{merge_measures}
\alias{to_global}
\alias{subset_measures}
\alias{select_measures}
\alias{setup_lexicons}
\alias{retrieve_attributions}
\alias{perform_agg}
\alias{plot_attributions}
\alias{almons}
\alias{exponentials}
\title{Deprecated functions}
\usage{
fill_measures(sentomeasures, fill)
merge_measures(...)
to_global(sentomeasures, lexicons, features, time)
subset_measures(sentomeasures, subset)
select_measures(sentomeasures, toSelect)
setup_lexicons(lexiconsIn, valenceIn, do.split)
retrieve_attributions(model, sentomeasures, do.normalize, refDates, factor)
perform_agg(sentiment, ctr)
plot_attributions(attributions, group, ...)
almons(n, orders, do.inverse, do.normalize)
exponentials(n, alphas)
}
\arguments{
\item{sentomeasures}{an appropriate \code{sentomeasures} object created using \code{\link{sento_measures}}.}
\item{fill}{an element of \code{c("zero", "latest", NA)}; the first and last assume missing dates represent zero sentiment,
the second assumes missing dates represent constant sentiment.}
\item{...}{(other) allowed input arguments.}
\item{lexicons}{a \code{numeric} vector of weights, of size \code{length(sentomeasures$lexicons)}, in the same order.
By default set to 1, which means equally weighted.}
\item{features}{a \code{numeric} vector of weights, of size \code{length(sentomeasures$features)}, in the same order.
By default set to 1, which means equally weighted.}
\item{time}{a \code{numeric} vector of weights, of size \code{length(sentomeasures$time)}, in the same order. By default
set to 1, which means equally weighted.}
\item{subset}{a logical expression indicating the rows to keep.}
\item{toSelect}{a \code{character} vector of the lexicon, feature and time weighting scheme names, to indicate which
measures need to be selected, or as a \code{list} of \code{character} vectors, possibly with separately specified
combinations (only consisting of one lexicon, one feature, and one time weighting scheme at maximum).}
\item{lexiconsIn}{a named \code{list} of (raw) lexicons, each element as a \code{data.table} or a \code{data.frame} with
respectively a words column and a polarity score column. A subset of the already formatted built-in lexicons
accessible via \code{list_lexicons} should be passed here first.}
\item{valenceIn}{a single valence word list as a \code{data.table} or a \code{data.frame} with respectively a \code{"x"}
and a \code{"y"} or \code{"t"} column. The first column has the words, \code{"y"} has the values for bigram
shifting, and \code{"t"} has the types of the valence shifter for a clustered approach to sentiment calculation
(supported types: \code{1} = negators, \code{2} = amplifiers, \code{3} = deamplifiers). If three columns
are provided, the first two will be considered only. This argument can be one of the already formatted
built-in valence word lists accessible via \code{list_valence_shifters}. A word that appears in both a lexicon
and the valence word list is prioritized as a lexical entry during sentiment calculation. If \code{NULL}, no valence word
list is part of this function's output, and is thus not applied in the sentiment analysis.}
\item{do.split}{a \code{logical} that if \code{TRUE} splits every lexicon into a separate positive polarity and negative
polarity lexicon.}
\item{model}{a \code{sentomodel} or \code{sentomodeliter} object created with \code{\link{sento_model}}.}
\item{do.normalize}{a \code{logical}, \code{TRUE} divides each element of every attribution vector at a given date by its
L2-norm at that date, normalizing the values between -1 and 1. The document attributions are not normalized. Or, for
\code{\link{almons}}, if \code{TRUE}, then polynomials should be normalized to unity.}
\item{refDates}{the dates (as \code{"yyyy-mm-dd"}) at which attribution is to be performed. These should be between the latest
date available in the input \code{sentomeasures} object and the first estimation sample date (that is, \code{model$dates[1]}
if \code{model} is a \code{sentomodel} object). All dates should also be in \code{get_dates(sentomeasures)}. If
\code{NULL} (default), attribution is calculated for all in-sample dates. Ignored if \code{model} is a \code{sentomodeliter}
object, for which attribution is calculated for all out-of-sample prediction dates.}
\item{factor}{the factor level as a single \code{character} vector for which attribution has to be calculated in
case of (a) multinomial model(s). Ignored for linear and binomial models.}
\item{sentiment}{output from a \code{\link{compute_sentiment}} call, computed from a \code{sentocorpus} object.}
\item{ctr}{output from a \code{\link{ctr_agg}} call. The \code{howWithin} and \code{nCore} elements are ignored.}
\item{attributions}{an \code{attributions} object created with \code{\link{attributions}}.}
\item{group}{a value from \code{c("lags", "lexicons", "features", "time")}.}
\item{orders}{a \code{numeric} vector as the sequence of the Almon orders (cf., \emph{b}). The maximum value
corresponds to \emph{B}.}
\item{do.inverse}{\code{TRUE} if the inverse Almon polynomials should be calculated as well.}
\item{alphas}{a \code{numeric} vector of decay factors.}
}
\description{
Functions deprecated due to changed naming or because functionality is discarded. Deprecated functions are made defunct
every 1 major or every 2 minor package updates. See the NEWS file for more information about since when or why functions
have been deprecated.
}
\seealso{
\code{\link{measures_fill}}
\code{\link{measures_merge}}
\code{\link{measures_global}}
\code{\link{measures_subset}}
\code{\link{measures_select}}
\code{\link{sento_lexicons}}
\code{\link{attributions}}
\code{\link{aggregate}}
\code{\link{plot.attributions}}
\code{\link{weights_almon}}
\code{\link{weights_exponential}}
}
\keyword{internal}
|
af46be08c421c5de554fc471e93f8f624316387b
|
c118908b1c8bad0914e38e43f1148b58364accc2
|
/tests/testthat.R
|
923d63343303f16f99505725ad251dcca81a2ce1
|
[] |
no_license
|
andrewcparnell/Bchron
|
baf98d6642a328ba3c83e8fcf2e04b6c0af86974
|
faa14f54444e7ec417e0e389596014a1c7645349
|
refs/heads/master
| 2023-06-27T02:01:46.417288
| 2023-06-08T11:17:34
| 2023-06-08T11:17:34
| 40,361,984
| 30
| 12
| null | 2022-04-05T20:46:28
| 2015-08-07T13:33:16
|
R
|
UTF-8
|
R
| false
| false
| 56
|
r
|
testthat.R
|
library(testthat)
library(Bchron)
test_check("Bchron")
|
f66f9381650d2fe18d8348d5bcd4e8efc181e75d
|
9bcced85ea09fbbe1ad811d793e26110f03e3131
|
/inst/src/tabs/norm_data/svr_norm.R
|
2a31adb08a2cbbb86a73326d1724996949574802
|
[
"MIT"
] |
permissive
|
LUMC/dgeAnalysis
|
dd1f2b3b9ddd2cb58da81cb2975742f210cbf9d2
|
283dc32e68c8b4990b212ccd973589525d52f5b2
|
refs/heads/master
| 2023-04-15T21:43:13.639460
| 2022-08-11T08:24:58
| 2022-08-11T08:24:58
| 240,005,238
| 14
| 6
|
MIT
| 2022-08-11T08:25:22
| 2020-02-12T12:16:13
|
R
|
UTF-8
|
R
| false
| false
| 5,261
|
r
|
svr_norm.R
|
## Create table with normalized counts
output[["normalized_counts"]] <- DT::renderDataTable({
tryCatch({
checkReload()
DT::datatable(inUse_normDge$counts,
options = list(pageLength = 50, scrollX = TRUE))
}, error = function(err) {
return(DT::datatable(data.frame(c(
"No data available in table"
)), rownames = FALSE, colnames = ""))
})
})
## Normalized distribution plot line
output[["norm_dist_line"]] <- renderPlotly({
tryCatch({
checkReload()
## Only plot if UI is loaded
if(is.null(input$norm_line_color)) {
break
}
## Get input data
plot_data <- count_dist(inUse_normDge)
text <- 'paste("Sample:", sample,
"\nLog2CPM:", round(x, 2))'
## Create plot
ggplotly(
line_plot(
df = plot_data,
x = "x",
y = "y",
text = text,
group = input$norm_line_color,
title = "Gene count distribution",
xlab = "Log2CPM",
ylab = "Density"
),
tooltip = "text"
)
}, error = function(err) {
return(NULL)
})
})
## Select a group to color line plot
output[["norm_line_color"]] <- renderUI({
tryCatch({
checkReload()
selectInput(
inputId = "norm_line_color",
label = "Group by:",
choices = c("Samples" = "sample", colnames(data_samples()))
)
}, error = function(err) {
return(NULL)
})
})
## Normalized distribution plot violin
output[["norm_dist_violin"]] <- renderPlotly({
tryCatch({
checkReload()
## Only plot if UI is loaded
if(is.null(input$norm_violin_group)) {
break
}
## Get input data
plot_data <- violin_dist(inUse_normDge, input$norm_violin_group)
text <- 'paste("Sample:", sample)'
## Create plot
gg <- ggplotly(
violin_plot(
df = plot_data,
text = text,
group = input$norm_violin_group,
title = "Gene count distribution",
xlab = "",
ylab = "Log2CPM"
),
tooltip = "text"
)
## Fix labels & plot
fix_violin_hover(gg)
}, error = function(err) {
return(NULL)
})
})
## Select a group to group violin plot
output[["norm_violin_group"]] <- renderUI({
tryCatch({
checkReload()
selectInput(
inputId = "norm_violin_group",
label = "Group by:",
choices = c("Samples" = "sample", colnames(data_samples()))
)
}, error = function(err) {
return(NULL)
})
})
## Normalized voom plot
output[["norm_voom_plot"]] <- renderPlotly({
tryCatch({
checkReload()
## Get input data
plot_data <- voom_data(inUse_normDge)
index <- round(seq(1, nrow(plot_data), length.out = 1000))
text <- 'paste("Gene:", gene)'
## Create plot
toWebGL(
ggplotly(
scatter_plot(
df = plot_data,
x = "x",
y = "y",
text = text,
group = "Genes",
index = index,
key = "gene",
title = "Voom Plot",
xlab = "Average Log2 count",
ylab = "SQRT (Standart Deviation)"
),
source = "norm_voom",
tooltip = "text"
) %>% layout(dragmode = "select", clickmode = "event+select") %>%
style(hoverinfo = "text")
)
}, error = function(err) {
return(NULL)
})
})
## Show amount of genes left after filtering
output[["norm_voom_ngenes"]] <- renderUI({
tryCatch({
checkReload()
h2("After filtering:", br(), nrow(normDge$counts), "Genes")
}, error = function(err) {
return(NULL)
})
})
## Selected data points norm_voom_plot
output[["selected_norm_voom"]] <- DT::renderDataTable({
tryCatch({
checkReload()
s <- event_data(event = "plotly_selected", source = "norm_voom")
if (is.null(s)) {
throw()
}
DT::datatable(data.frame(inUse_normDge$counts)[unlist(s$key), ],
options = list(pageLength = 15, scrollX = TRUE))
}, error = function(err) {
return(DT::datatable(data.frame(c(
"No data available in table"
)), rownames = FALSE, colnames = ""))
})
})
## INFORMATION BOXES
output[["norm_dist_line_info"]] <- renderUI({
infoText <-
"The line plot shows the density of the log2CPM values per sample. The density shows the
distribution of counts per sample and is used to detect large differences between
samples."
informationBox(infoText)
})
output[["norm_dist_violin_info"]] <- renderUI({
infoText <-
"The violin plot serves a similar purpose to the line plot, but the data can be viewed in a different way
format. The distribution can be seen between the Log2CPM at the corresponding
samples."
informationBox(infoText)
})
output[["norm_voom_plot_info"]] <- renderUI({
infoText <-
"The voom plot provides a check on the filtering, which is performed at the beginning of the
analysis. The method to calculate this is 'voom'. Voom is an acronym for
mean variance modeling at the observational level. This means that the mean variance in
the data is calculated and gives each observation a certain weight. Problems during the
filtering of low expressed genes will be visible in this plot."
informationBox(infoText)
})
|
9fff8d6bb8ed78c07d1257fd38d18bec0f7cc103
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/sysid/examples/sim.Rd.R
|
4444ef44d3bf57cb848e262cb73008fe010417b4
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 257
|
r
|
sim.Rd.R
|
library(sysid)
### Name: sim
### Title: Simulate response of dynamic system
### Aliases: sim
### ** Examples
# ARX Model
u <- idinput(300,"rgs")
model <- idpoly(A=c(1,-1.5,0.7),B=c(0.8,-0.25),ioDelay=1,
noiseVar=0.1)
y <- sim(model,u,addNoise=TRUE)
|
9288ff61ed77bb737bcdb80696e5989a1812ec9a
|
fcf3fc149193ee9c7250012d9c0a4633fe0050d2
|
/ajustements.R
|
55549f7ae31d6412066ad1def979464738dddc5b
|
[] |
no_license
|
cpauvert/m2-HMM-pRediction
|
9f7ec1ecde8b134abb509a86032254136bbea613
|
3c8c5635ec76c51a9c742c7c7d899d7a715dd979
|
refs/heads/master
| 2021-01-25T08:43:31.569571
| 2015-09-19T16:57:43
| 2015-09-19T16:57:43
| 41,036,581
| 0
| 0
| null | 2015-09-14T09:16:37
| 2015-08-19T13:43:12
|
R
|
UTF-8
|
R
| false
| false
| 4,600
|
r
|
ajustements.R
|
# Librairie seqinr
require(seqinr)
# Chargement des fonctions définis
source("fonctions.R")
# Question A
# Pour des modèles d'ordre de 0 à m
ajustementRapportVrais<-function(mMax, fasta, alphabet, seuil=0.05){
# En prenant le test de rapport de
# vraisemblance comme critère d'ajustement,
# on cherche l'ordre de chaîne de Markov
# pour lequel on ne peut plus rejetter H0.
#
# Tableau trace
# R commence à 0; et m+1 nécessaire pour comparer
Trace<-data.frame(matrix(nrow = mMax+1,
ncol = 1,
dimnames=list(0:mMax,"pvalue")))
# print(Trace)
m<-0
repeat {
L0<-LogVraisCorr(matTrans = estimMarkovK(fasta, m, alphabet),
fasta = fasta,
ordre = m,
alphabet)
L1<-LogVraisCorr(matTrans = estimMarkovK(fasta, m+1, alphabet),
fasta = fasta,
ordre = m+1,
alphabet)
# Calcul pvalue pour test du rapport de vraisemblance :
# H0: modèle de markov ordre m
# H1: modèle de markov ordre m+1
##################################
# /!\ le Trace[m+1] correspond à l'ordre m (artifice de numérotation R)
##################################
Trace[m+1, 1]<-rapportLogVrais(alphabet, ordre = m, L0, L1)
if(is.finite(Trace[m+1,1]) ) {
# Test si le rapport est NaN.
# cat(paste0("DBG: m=",m," trace[m+1]=",Trace[m+1,1],"\n"))
if ( (m == mMax) ){
# On arrête l'ajustement lorsque l'on a
# atteint l'itération maximale, ou
# que l'on a pas incrémenter l'ordre m
cat(paste0("Itération max. Ordre de la chaîne : ",m,"\n"))
break
}
if( Trace[m+1,1] > seuil ){
# On arrête l'ajustement lorsque l'on a
# l'on rejette H1. (Pvalue > seuil)
# On conclut donc à l'ordre m si m = 0.
# et m-1 sinon.
if ( m == 0){
cat(paste0("Ordre chaîne Markov ajusté ",m,"\n"))
} else {
cat(paste0("Ordre chaîne Markov ajusté ",m-1,"\n"))
}
break
}
} else{
# On arrête l'ajustement si la valeur calculée est NaN
cat(paste0("NaN calculée\nOrdre chaîne Markov ajusté ",m-1,"\n"))
break
}
m<-m+1
}
return(Trace)
}
ajustementCritere<-function(critere, mMax, fasta, alphabet, eps=0.1){
# En prenant critère d'ajustement à minimiser (AIC/BIC par ex),
# on cherche l'ordre de chaîne de Markov optimal
# dans la limite de mMax itérations.
#
# Tableau trace
# R commence à 0; et m+1 nécessaire pour comparer
Trace<-data.frame(matrix(nrow = mMax+1,
ncol = 1,
dimnames=list(0:mMax,critere)))
m<-0
repeat {
L0<-LogVraisCorr(matTrans = estimMarkovK(fasta, m, alphabet),
fasta = fasta,
ordre = m,
alphabet)
# Estimation (grossière) du nombre de paramètres (non optimisé)
Nparametre<-length(alphabet)*length(alphabet)^m
# Valeur du critère pour l'ordre m
Trace[m+1, 1]<-switch(critere,
"AIC" = AIC(L0, K = Nparametre),
"BIC" = BIC(L0, K = Nparametre,n=length(fasta)))
# Sauvegarde du critère précédent
# initialisé à Inf pour m = 0.
critere.old<-ifelse(m == 0,Inf, Trace[m,1] )
cat(paste0("m: ",m," LL: ",L0," Trace:", Trace[m+1,1]," OLD: ", critere.old,"\n"))
if(is.finite(Trace[m+1,1]) ) {
if ( m == mMax ) {
# On arrête l'ajustement lorsque
# si l'on a atteint l'itération maximale.
cat(paste0("Itération max. Ordre de la chaîne avec ", critere," : ",m,"\n"))
break
}
if ( Trace[m+1,1] > critere.old ){
cat(paste0("Ordre chaîne Markov ajusté avec ",critere," : ",m-1,"\n"))
break
}
} else{
# On arrête l'ajustement si la valeur calculée est NaN
cat(paste0("NaN calculée\nOrdre chaîne Markov ajustée ",critere," : ",m-1,"\n"))
break
}
m<-m+1
}
return(Trace)
}
# Alphabet nucléique standard
ALPH<-c("a", "c", "g", "t")
# Données
pfu50<-read.fasta("headN50_Pfu_DSM3638.fasta")[[1]]
pfu<-read.fasta("complete_genome_Pfu_DSM3638.fasta")[[1]]
# Ajustements
pfuRapport<-ajustementRapportVrais(mMax = 7, fasta = pfu,alphabet = ALPH)
pfuAIC<-ajustementCritere("AIC",mMax = 7, fasta = pfu,ALPH)
pfuBIC<-ajustementCritere("BIC",mMax = 7, fasta = pfu,ALPH)
|
ef28ae33b13d9adeee1abb488b616fa32e2d565f
|
5982a3e5a3a21387dc3fc165ca412d155d74c996
|
/src/covariates_region_age.R
|
8d82b97261808c506b642265a230ac5f7cea608a
|
[
"MIT"
] |
permissive
|
bayesiandemography/iceland_migration
|
1e22fa3dd3910a983e5aa9de274217c1a45cdf26
|
1afda4c2ff170119f55a1271ec3e911943bfb03a
|
refs/heads/master
| 2021-03-04T03:26:14.462514
| 2020-03-09T13:17:15
| 2020-03-09T13:17:15
| 246,006,307
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 606
|
r
|
covariates_region_age.R
|
library(methods)
library(dplyr)
library(dembase)
library(docopt)
'
Usage:
covariates_region_age.R [options]
Options:
--last_year_train [default: 2008]
' -> doc
opts <- docopt(doc)
last_year_train <- opts$last_year_train
population_train <- readRDS("out/population_train.rds")
covariates_region_age <- population_train %>%
subarray(time == last_year_train) %>%
collapseDimension(margin = c("region", "age")) %>%
as.data.frame() %>%
mutate(logpopn = log(count)) %>%
select(region, age, logpopn)
saveRDS(covariates_region_age,
file = "out/covariates_region_age.rds")
|
9346b17d000d158da00a1f7e42fcbf11e3209b07
|
2b61ecac75f9600f6f643d75c38e07aec77fd8a0
|
/priv/R/testJsonLines.r
|
cdc67480e1b2392e08b50b19e5a0396278d9664c
|
[] |
no_license
|
ProtoLife/rscriptex
|
3d378ff89f51fd4d31f60d4533dbf30893e4dcd5
|
a5890806521990386dd7a8f8d23535fbe4f7b931
|
refs/heads/master
| 2022-03-07T09:50:21.235638
| 2019-10-22T18:42:39
| 2019-10-22T18:42:39
| 216,657,507
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 698
|
r
|
testJsonLines.r
|
#!/usr/bin/r
library(jsonlite, quietly = TRUE)
# Read JSON data from stdin
readJsonLines <- function() {
f <- file("stdin")
open(f)
strData <- readLines(con = f, n = 1)
if (length(strData) > 0) {
close(f)
jsonData <- try(jsonlite::fromJSON(strData), silent = TRUE)
return(jsonData)
} else {
return(try(stop("No input received"), silent = TRUE))
}
}
# For littler, use argv
# For Rscript, pick up any args after --args
args <- if (exists("argv")) argv else commandArgs(trailingOnly = TRUE)
cat(paste("args:", paste(args, collapse = " "), "\n"))
jsonData <- readJsonLines()
if (is(jsonData, "try-error")) {
cat("Missing or invalid data")
} else {
cat("Parsed!")
}
|
03141c38a39f36259d224cbe59c216da4bd8da70
|
4c177a38f944b6cb417d8a59e3f6b8984c37671c
|
/1pop/Power_1pop.r
|
a90b3cf65391f36f338078faa78b70772e50c8e8
|
[] |
no_license
|
anbena/ABC-FDSS
|
3e13b7d4af08bc4ad8e5308a5d3d388ac0b0c0ab
|
6b9098a00dc558e0798894bd95f5be5a2f7179f0
|
refs/heads/master
| 2021-06-22T21:47:32.412209
| 2020-12-09T10:11:20
| 2020-12-09T10:11:20
| 162,409,395
| 2
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,838
|
r
|
Power_1pop.r
|
#Usage: Rscript --vanilla ./Power_1pop.r <locuslength> <nrloci> <rec.rate> <nrchr>
require(abcrf)
args<-commandArgs(trailingOnly=TRUE)
nsim<-50000
pods<-1000
ntrees<-500
fdir<-"ms1pop/"
ll<-as.character(args[1])
nl<-as.character(args[2])
r<-as.character(args[3])
chr<-as.character(args[4])
parcomb<-length(ll)*length(nl)*length(r)*length(chr)
ris<-matrix(ncol=13,nrow=parcomb)#rows=parameter combination, columns=classification errors, prior error rate, true positive rates and mean posterior probability
colnames(ris)<-c("const_ClErr","bott_ClErr","expgrow_ClErr","struct_ClErr","PriorErrRate","const_pow","bott_pow","expgrow_pow","struct_pow","const_mpp","bott_mpp","expgrow_mpp","struct_mpp")
ris1<-matrix(ncol=8,nrow=pods)#model preferred and posterior probability for each model
colnames(ris1)<-c("const_modind","const_postpr","bott_modind","bott_postpr","expgrow_modind","expgrow_postpr","struct_modind","struct_postpr")
pc<-c()
q<-0
for (locusl in ll){
for (nloci in nl){
for (rec in r){
for (cr in chr){
a<-c()
q<-q+1
print(paste("Parameter combination: ",as.character(q),"/",as.character(parcomb),sep=""))
pc[q]<-paste("ll",locusl,"_nl",nloci,"_r",rec,"_nc",cr,sep="")
name<-paste(fdir,"const","_ll",locusl,"_nl",nloci,"_r",rec,"_nc",cr,".tab",sep="")
if(file.exists(name)){
m1<-read.table(name)
}else{
next
}
name<-paste(fdir,"bott","_ll",locusl,"_nl",nloci,"_r",rec,"_nc",cr,".tab",sep="")
if(file.exists(name)){
m2<-read.table(name)
}else{
next
}
name<-paste(fdir,"expgrow","_ll",locusl,"_nl",nloci,"_r",rec,"_nc",cr,".tab",sep="")
if(file.exists(name)){
m3<-read.table(name)
}else{
next
}
name<-paste(fdir,"struct","_ll",locusl,"_nl",nloci,"_r",rec,"_nc",cr,".tab",sep="")
if(file.exists(name)){
m4<-read.table(name)
}else{
next
}
s<-rbind(m1[(pods+1):nsim,],m2[(pods+1):nsim,],m3[(pods+1):nsim,],m4[(pods+1):nsim,])
i<-factor(c(rep(1,nsim-pods),rep(2,nsim-pods),rep(3,nsim-pods),rep(4,nsim-pods)))
f<-apply(s,2,var)!=0
da<-data.frame(i,s[,f])
#Training Forest
a<-abcrf(i~.,data=da,lda=T,ntree=ntrees,paral=T,ncores=6)
#Power Constant model
b<-predict(object=a,obs=m1[1:pods,f],training=da,ntree=ntrees,paral=T,paral.predict=T,ncores=3,ncores.predict=3)
ris1[,1]<-b$allocation
ris1[,2]<-b$post.prob
v1<-sum(b$allocation==1)/length(b$allocation)
p1<-mean(b$post.prob[b$allocation==1])
#Power Bottleneck model
b<-predict(object=a,obs=m2[1:pods,f],training=da,ntree=ntrees,paral=T,paral.predict=T,ncores=3,ncores.predict=3)
ris1[,3]<-b$allocation
ris1[,4]<-b$post.prob
v2<-sum(b$allocation==2)/length(b$allocation)
p2<-mean(b$post.prob[b$allocation==2])
#Power Exponential model
b<-predict(object=a,obs=m3[1:pods,f],training=da,ntree=ntrees,paral=T,paral.predict=T,ncores=3,ncores.predict=3)
ris1[,5]<-b$allocation
ris1[,6]<-b$post.prob
v3<-sum(b$allocation==3)/length(b$allocation)
p3<-mean(b$post.prob[b$allocation==3])
#Power Stucture model
b<-predict(object=a,obs=m4[1:pods,f],training=da,ntree=ntrees,paral=T,paral.predict=T,ncores=3,ncores.predict=3)
ris1[,7]<-b$allocation
ris1[,8]<-b$post.prob
v4<-sum(b$allocation==4)/length(b$allocation)
p4<-mean(b$post.prob[b$allocation==4])
#Output files power analysis one-population models
k<-a$model.rf$confusion[,"class.error"]
ris[q,]<-c(k[1],k[2],k[3],k[4],a$prior.err,v1,v2,v3,v4,p1,p2,p3,p4)
write.table(ris1,paste("ll",locusl,"_nl",nloci,"_r",rec,"_nc",cr,"_FDSS.raw",sep=""),row.names=F,quote=F,sep="\t")
}
}
}
}
rownames(ris)<-pc
write.table(ris,paste("ll",locusl,"_nl",nloci,"_r",rec,"_nc",cr,"_FDSS.summary",sep=""),quote=F,sep="\t")
|
c3c893277fa2aea1b21d9dc76bd2633036167c62
|
99230d873a58fca944a5f8929850eb4597cf1bd8
|
/R/mergeSort.R
|
0769ae6fb0fcd022456a6b9f0d554b784c20324b
|
[
"BSD-2-Clause"
] |
permissive
|
sktivd/collabExample
|
51c1dc9ad85dd39e1b3c8d15ad862082374bef7d
|
b2496721351bf94f818ceccc171d51ec5e1cd7af
|
refs/heads/master
| 2021-01-16T17:53:11.838956
| 2017-09-03T23:59:47
| 2017-09-03T23:59:47
| 100,021,645
| 1
| 1
| null | 2017-09-04T00:18:35
| 2017-08-11T10:40:32
|
R
|
UTF-8
|
R
| false
| false
| 377
|
r
|
mergeSort.R
|
#' Merge Sort
#'
#' sort a numeric array by merge sort algorithm
#'
#' @param unsorted a numeric array
#' @param ascending sorted by ascending value (default: TRUE)
#' @return sorted numeric array
#' @examples
#' mergeSort(c(324, 234, 243))
#' @export
mergeSort <- function(unsorted, ascending = TRUE) {
.Call("merge_sort", unsorted, ascending, PACKAGE = "collabExample")
}
|
2da7de01e00b90e43784195d56e1789e23502a76
|
d06c45000e06134211c99add0beea81ee0452244
|
/02.DS-Visualization/S5-Data Visualization Principles/5.3 Data Visualization Principles 3/02. Exercise.R
|
3c30f7ad2b4eb01a179cbfb0e0d801da554e93da
|
[] |
no_license
|
dgpaniagua/data-science-assessments
|
24eb2d2c0f50a5a5f8b76db86e4f6fad0d803022
|
5c9af8ce74701f7df6431744128885e124cc7fe0
|
refs/heads/main
| 2023-09-05T14:01:52.179350
| 2021-11-22T04:40:59
| 2021-11-22T04:40:59
| 376,401,461
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,035
|
r
|
02. Exercise.R
|
##STATEMENT:
library(dplyr)
library(ggplot2)
library(dslabs)
library(RColorBrewer)
data(us_contagious_diseases)
the_disease = "Measles"
dat <- us_contagious_diseases %>%
filter(!state%in%c("Hawaii","Alaska") & disease == the_disease) %>%
mutate(rate = count / population * 10000) %>%
mutate(state = reorder(state, rate))
avg <- us_contagious_diseases %>%
filter(disease==the_disease) %>% group_by(year) %>%
summarize(us_rate = sum(count, na.rm=TRUE)/sum(population, na.rm=TRUE)*10000)
dat %>% ggplot() +
geom_line(aes(year, rate, group = state), color = "grey50",
show.legend = FALSE, alpha = 0.2, size = 1) +
geom_line(mapping = aes(year, us_rate), data = avg, size = 1, color = "black") +
scale_y_continuous(trans = "sqrt", breaks = c(5,25,125,300)) +
ggtitle("Cases per 10,000 by state") +
xlab("") +
ylab("") +
geom_text(data = data.frame(x=1955, y=50), mapping = aes(x, y, label="US average"), color="black") +
geom_vline(xintercept=1963, col = "blue")
##ANSWER
library(dplyr)
library(ggplot2)
library(dslabs)
library(RColorBrewer)
data(us_contagious_diseases)
the_disease = "Smallpox"
dat <- us_contagious_diseases %>%
filter(!state%in%c("Hawaii","Alaska") & disease == the_disease & weeks_reporting >= 10) %>%
mutate(rate = count / population * 10000) %>%
mutate(state = reorder(state, rate))
avg <- us_contagious_diseases %>%
filter(disease==the_disease) %>% group_by(year) %>%
summarize(us_rate = sum(count, na.rm=TRUE)/sum(population, na.rm=TRUE)*10000)
dat %>% ggplot() +
geom_line(aes(year, rate, group = state), color = "grey50",
show.legend = FALSE, alpha = 0.2, size = 1) +
geom_line(mapping = aes(year, us_rate), data = avg, size = 1, color = "black") +
scale_y_continuous(trans = "sqrt", breaks = c(5,25,125,300)) +
ggtitle("Cases per 10,000 by state") +
xlab("") +
ylab("") +
geom_text(data = data.frame(x=1955, y=50), mapping = aes(x, y, label="US average"), color="black") +
geom_vline(xintercept=1963, col = "blue")
|
74de740bf14bce70359a12461debf2514de6f170
|
671f8f374ab512a5e0b2742101252b8bea266c03
|
/src/runFishFlowallFolders.R
|
eafadd506ff98d78d0e840c2ea45feab7cbb15e1
|
[] |
no_license
|
MirceaDavidEsc/placozoa_internal_coordination
|
f8c3f2abd06ddeda876bfc3a73d05923334e0e9e
|
65657e04dd810baadbda03911dd304970cfdad7c
|
refs/heads/master
| 2023-07-15T01:23:21.069337
| 2021-08-26T09:19:11
| 2021-08-26T09:19:11
| 261,339,535
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,041
|
r
|
runFishFlowallFolders.R
|
# This script creates a list of all subdirectories of the dataSourceFolder directory. It copies the videos from the external hard drive to the local disk. Then it runs fishFlow on all the movies. The result is that all movies will have original-tracks.hdf5 made.
dataSourceFolder = "/Volumes/Untitled/ForFishFlow/"
dataProcessFolder = "/Users/mircea/Desktop/ForFishFlow/"
dir.create(dataProcessFolder)
setwd(dataSourceFolder)
allFilesToProcess = list.files(pattern="movie.avi",recursive=T,full.names=T,all.files=T)
for (movieFile in allFilesToProcess) {
folderName = basename(dirname(movieFile))
destinationFolder = paste(dataProcessFolder,folderName,sep="/")
dir.create(destinationFolder)
print("Copying file")
file.copy(from=movieFile,to=paste(destinationFolder,"movie.avi",sep="/"),overwrite=F)
setwd(destinationFolder)
if (!file.exists("original-tracks.hdf5")) {
system("fishFlow -i movie.avi --background=../background.tiff -d original-tracks.hdf5 --grid.width=174 --grid.height=130")
}
setwd(dataSourceFolder)
}
|
f69d9a2747a116581bad60be513a5c344b17ac86
|
9d1aa1752e422ab835cefe3f5db7f67a3b9bff35
|
/tests/testthat/test-auth.R
|
612b5bbe1e4d2566c222a0536bac6a088e38443f
|
[] |
no_license
|
fdlk/molgenis-r-armadillo
|
21888a478d520e552f55bdb9cecd93cf6149191c
|
f942ca6ce91c994423e9a2a44035110e082147d2
|
refs/heads/master
| 2023-08-14T17:43:31.477459
| 2021-09-17T20:38:03
| 2021-09-17T20:38:03
| 268,761,615
| 0
| 0
| null | 2020-06-02T09:46:09
| 2020-06-02T09:46:08
| null |
UTF-8
|
R
| false
| false
| 2,068
|
r
|
test-auth.R
|
content <- xml2::as_xml_document(
list(
AssumeRoleWithWebIdentityResponse =
structure(list(
AssumeRoleWithWebIdentityResult = list(
AssumedRoleUser = list(Arn = list(), AssumeRoleId = list()),
Credentials = list(
AccessKeyId = list("UNWUHGSS9OOE1L7I49X9"),
SecretAccessKey =
list("TiAe4MEuwgU+urcM94kLEORsk8PZtu+JJ6mmmOAj"),
Expiration = list("2020-07-10T10:55:53Z"),
SessionToken = list("edcba")
),
SubjectFromWebIdentityToken =
list("87ff7211-5050-4565-9137-f2547207c852")
),
ResponseMetadata = list(RequestId = list("16205C5CE86AE656"))
), xmlns = "https://sts.amazonaws.com/doc/2011-06-15/")
)
)
test_that("assume_role_with_webidentity retrieves credentials", {
response <- structure(list(status_code = 200), class = "response")
httr_post <- mock(response)
httr_content <- mock(content)
with_mock(
session <- armadillo.assume_role_with_web_identity(
"abcde", "https://example.org", 900
),
"httr::POST" = httr_post,
"httr::content" = httr_content
)
expect_equal(session, list(
AccessKeyId = "UNWUHGSS9OOE1L7I49X9",
SecretAccessKey = "TiAe4MEuwgU+urcM94kLEORsk8PZtu+JJ6mmmOAj",
SessionToken = "edcba"
))
expect_args(httr_post, 1,
"https://example.org",
query = list(
Action = "AssumeRoleWithWebIdentity",
DurationSeconds = 900,
WebIdentityToken = "abcde",
Version = "2011-06-15"
)
)
expect_equal(Sys.getenv("AWS_S3_ENDPOINT"), "example.org")
})
test_that("assume_role_with_webidentity sets port in AWS_S3_ENDPOINT", {
response <- structure(list(status_code = 200), class = "response")
httr_post <- mock(response)
httr_content <- mock(content)
with_mock(
session <- armadillo.assume_role_with_web_identity(
"abcde", "https://example.org:9000", 900
),
"httr::POST" = httr_post,
"httr::content" = httr_content
)
expect_equal(Sys.getenv("AWS_S3_ENDPOINT"), "example.org:9000")
})
|
e082414b8f467c85508ce33d8c44faa694228e87
|
ad21d3e027aa8d6b23f4ffa91c811bf686d77a9d
|
/done-r/proconditioning3.r
|
4f1b17cf57453e4d79a2166873bc5d5b9e852dcc
|
[] |
no_license
|
i-hs/R-project1
|
834462e81815ebeb70830bcb7bf0cac5760dfa90
|
dde28b1e722acf4ce2b5d5587e7141dc9367b9be
|
refs/heads/master
| 2022-11-18T14:10:41.379385
| 2019-12-12T01:54:28
| 2019-12-12T01:54:28
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,003
|
r
|
proconditioning3.r
|
# 유동인구 데이터 201417, 2018, 2019에 id값을 부여하고 위도 데이터와 join
rm(list = ls())
library(dplyr)
add19_coord = read.csv(file = 'data/everyCoords19.csv')
add18_coord = read.csv(file = 'data/everyCoords18.csv')
add17_coord = read.csv(file = 'data/everyCoords17.csv')
head(add19_coord)
#
flow19 <- read_excel('flowdata/flow2019.xlsx')
flow18 <- read_excel('flowdata/flow2018.xlsx')
flow17 <- read_excel('flowdata/flow201417.xlsx')
head(flow_19)
add19 = add19[-1011,] # add19 1011obs라 마지막 행 제거
#Join
flow19<- left_join(flow19, add_total, by = 'trdar_cd_nm')
head(flow19)
#id 열 제거
flow19 = flow19[,-535]
flow18<- left_join(flow18, add_total, by = 'trdar_cd_nm')
head(flow_18)
flow18 = flow18[,-535]
flow17<- left_join(flow17, add_total, by = 'trdar_cd_nm')
head(flow_17)
flow17 = flow17[,-535]
#csv 파일로 저장
write.csv(flow_19,'data/flow2019_coord.csv')
write.csv(flow_18,'data/flow2018_coord.csv')
write.csv(flow_17,'data/flow201417_coord.csv')
|
4e1b09ae5c40062c47a6e67c87a14fcc4f15dd6d
|
44f2e07bcf5aacb88a5351915f8b630d5f4a43d1
|
/RCode/UCR_transform/cbf_transform.R
|
6f72068584786a6903a4145e317b2c35d6887d83
|
[] |
no_license
|
seninp/cbf-dl
|
cae1cd741612b529e7013c1b34a07eb23ba57332
|
4f5c51c2911155c00e676b60ec4b30b1eb35be9b
|
refs/heads/master
| 2021-03-27T20:16:12.670557
| 2017-01-27T17:22:11
| 2017-01-27T17:22:11
| 74,592,613
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 542
|
r
|
cbf_transform.R
|
library(stringr)
##
conn <- file("../src/resources/data/CBF/CBF_TEST", open = "r")
linn <- readLines(conn)
##
ds <- (1:129)
##
for (i in 1:length(linn)) {
str <- str_trim(linn[i])
str_split <- unlist(strsplit(str, "\\s+"))
label <- as.numeric(str_split[1]) - 1
series <- as.numeric(str_split[2:129])
ds <- rbind(ds, c(series, label))
}
close(conn)
#
write.table(ds[-1,], "../src/resources/data/CBF/cbf_test_original.csv", col.names = F, row.names = F, sep = ",")
str(ds)
dd <- read.csv("../shingled_mutant_CBF.txt",header=F)
dd[1,]
|
9a9bcafc0e75709aa7958fd07bde3fff8907af0e
|
a8b6f5cbc0c677b0bb317b0740fac7e78938ab9e
|
/man/ex15.28.Rd
|
213db484232371654f4f809b7c4d61727f4a1cb0
|
[] |
no_license
|
cran/Devore5
|
784f86cb950d39003a797b309bde9ba5ea239795
|
3c6de7a6447f3be4b54d00832e23995ea9655aa5
|
refs/heads/master
| 2020-12-24T14:36:21.931350
| 2004-10-03T00:00:00
| 2004-10-03T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 502
|
rd
|
ex15.28.Rd
|
\name{ex15.28}
\alias{ex15.28}
\non_function{}
\title{data from exercise 15.28}
\description{
The \code{ex15.28} data frame has 8 rows and 3 columns.
}
\format{
This data frame contains the following columns:
\describe{
\item{Subject}{
a numeric vector
}
\item{Potato}{
a numeric vector
}
\item{Rice}{
a numeric vector
}
}
}
\details{
}
\source{
Devore, J. L. (2000) \emph{Probability and Statistics for Engineering and the Sciences (5th ed)}, Duxbury
}
\examples{
data(ex15.28)
}
\keyword{datasets}
|
8de34338ed4548d71a4a92fcccf5b1e0ccaf2f1c
|
839c3ae77ac66f15918357c01a2c08be2b7d934d
|
/R/Projects/Data Visualization/Data Visualization and Exploration with R/IntroR/Solutions/Chapter5_1.R
|
b532b55bd9e6a0c5a2dbb723b7b9ff5f5dbb2d4d
|
[] |
no_license
|
VN-Pikachu/Data-Science
|
d655d0683713f347a9d0a45fb47d303e9ceba039
|
694f47ca1c605ed33f154f293347f59bc5a81729
|
refs/heads/main
| 2023-03-20T00:36:34.351244
| 2021-03-27T13:27:53
| 2021-03-27T13:27:53
| 345,064,490
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 209
|
r
|
Chapter5_1.R
|
dfPop <- read_csv("CountryPopulation.csv", col_names = TRUE)
View(dfPop)
dfPop2 <- gather(dfPop, `2010`, `2011`, `2012`, `2013`, `2014`, `2015`, `2016`, `2017`, key = 'YEAR', value = 'POPULATION')
View(dfPop2)
|
428876681a2e00ddbb09b7b31aff0939316d9c64
|
bdc0c0fabc792a4265880df318ce4e6ec6eedd57
|
/R-BoxPlot.R
|
9b180df4beb8938b8b539fbb7fb9d0e6277c807a
|
[] |
no_license
|
jlrosasp/bedu-proyecto-r
|
cb16260c90be63df6139e784c5ffaa53f5598241
|
5bd57fe7723945582c4f74efaa2bc87b1bc5fc2d
|
refs/heads/master
| 2023-03-18T17:49:15.762746
| 2021-03-09T02:50:33
| 2021-03-09T02:50:33
| 345,863,716
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 911
|
r
|
R-BoxPlot.R
|
library(httr)
library(jsonlite)
#install.packages("plotly")
library(dplyr)
library(plotly)
library(ggplot2)
#Cubo de datos
repositorio = GET("https://api.datamexico.org/tesseract/cubes/imss/aggregate.jsonrecords?drilldowns%5B%5D=Date+Month.Date.Month&drilldowns%5B%5D=Sex.Sex.Sex&measures%5B%5D=Insured+Employment&parents=false&sparse=false")
repositorio
rawToChar(repositorio$content) #convierte en string o serie de caracteries
Datos = fromJSON(rawToChar(repositorio$content))
names(Datos)
Datos<-Datos$data
Datos <- Datos[,-c(1)] #elimina la primera columna
Datos <- Datos[,-c(3)] #elimina la primera columna
View(Datos)
#Convierte a un dataframe
Datos <- data.frame(Datos)
colnames(Datos)<- c("Mes", "Genero", "Asegurados")
# EDA
summary(Datos)
ggplot(Datos, aes(x = Mes, y = Asegurados, fill = Genero)) + geom_boxplot() +
ggtitle("Boxplots") +
xlab("Categorias") +
ylab("Mediciones")
|
b40e7bc03b3de9311a71b3918b0b6c55c8294cc6
|
9615fb602f18b98f2177ab6587b212fd4c4458df
|
/man/dot-plot_facet_wrap.Rd
|
e30422be106baa1d126332fa84c72e4b8abd1354
|
[
"MIT"
] |
permissive
|
keshav-motwani/ggexp
|
5d5e17069d000ef2240ec2fd333fe8d9a71518fc
|
6fa61934bd083be5aba2760b2793a356daa22ee3
|
refs/heads/master
| 2023-02-24T06:11:16.784465
| 2021-02-03T04:39:34
| 2021-02-03T04:39:34
| 207,692,137
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 532
|
rd
|
dot-plot_facet_wrap.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_facets.R
\name{.plot_facet_wrap}
\alias{.plot_facet_wrap}
\title{Helper function to add facet_wrap to plot}
\usage{
.plot_facet_wrap(plot, facet_rows, facet_columns, ...)
}
\arguments{
\item{plot}{ggplot object to facet}
\item{facet_rows}{columns in data to facet}
\item{facet_columns}{same as facet_rows for facet_wrap - just combines these two with rows first then columns}
}
\value{
}
\description{
Helper function to add facet_wrap to plot
}
|
db3f8e4f2c6c60402ee134ad148c29c9f54f2f88
|
72242aa83c2c8d0de12db9365ccbb21d4f097c68
|
/R/angle.hy.R
|
1b04b5a8efbaab63deb676547eec24d496bfca8c
|
[] |
no_license
|
hyochoi/SCISSOR
|
4d932a333c51ea6591f8fe30502c94407eb03df8
|
377789106d34200baa1d4d826952e2ad5313ba3a
|
refs/heads/master
| 2022-07-20T18:20:54.990303
| 2022-07-03T18:37:53
| 2022-07-03T18:37:53
| 194,808,036
| 9
| 2
| null | 2021-11-19T04:09:35
| 2019-07-02T07:06:51
|
R
|
UTF-8
|
R
| false
| false
| 480
|
r
|
angle.hy.R
|
#'
#' @export
angle.hy = function(x,y,perp=TRUE){
angle2.hy=function(xx,y,perp=TRUE){
sumx2 = sum(xx^2); sumy2 = sum(y^2)
if ((sumx2>0) & (sumy2>0)) {
ang0 = min(1,as.vector((t(xx)%*%y)/sqrt(sumx2*sumy2)));
} else {
ang0 = 0
}
ang = acos(ang0)*(180/pi);
if (perp){
ang = min(ang,180-ang)
}
return(ang)
}
X = as.matrix(x); y = as.vector(y);
angle = apply(X,2,FUN=function(x){angle2.hy(x,y,perp=perp)});
return(angle);
}
|
9f42d05befe50e4a928a43511d8e6ba9d9fe2753
|
3b3674cc7cf9a06c1926533f532ccc091bac2f14
|
/30_mirna_seq/02_r_code/03_qpcr/01_chat_expression.R
|
c5be545204a7584e7436980756f587d5cb9077f3
|
[] |
no_license
|
slobentanzer/integrative-transcriptomics
|
8618c6eef9b58da9c31a188e34ff527f3f9f8d04
|
e9e0a7b6f7ed7687f40fbea816df9d094ba293a2
|
refs/heads/master
| 2022-07-18T10:32:51.331439
| 2021-01-19T15:17:52
| 2021-01-19T15:17:52
| 214,313,249
| 2
| 2
| null | 2022-06-29T17:42:35
| 2019-10-11T00:57:06
|
R
|
UTF-8
|
R
| false
| false
| 7,179
|
r
|
01_chat_expression.R
|
#CHAT EXPRESSION IN CNTF DIFFERENTIATION####
rm(list=ls())
home= '~/GitHub/'
rootdir = paste(home, "integrative-transcriptomics", sep="")
setwd(rootdir)
library(ggplot2)
# LA-N-2 ####
expr <- readRDS("./raw_data/raw_expression_qpcr_la2.rds")
#molecular weight of CNTF: 22,931 Da
mw <- 22931
expr$molarC <- as.numeric(as.character(expr$Concentration))*1E-9/mw*1E3*1E9 #1)ng->g, 2)g->mol, 3)ml->l 4)M->nM
expr$Day <- factor(expr$Day)
expr$Concentration <- factor(expr$Concentration)
ggplot(expr, aes(Day, FoldExpression, fill = Concentration)) +
geom_boxplot() +
scale_fill_brewer(palette = "Set3") +
scale_y_continuous(breaks = seq(1:10)) + theme(legend.justification=c(0,1), legend.position=c(0,1))
ggsave("./img/cntf_time_dose_boxplot.svg")
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
library(plyr)
#if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This does the summary. For each group's data frame, return a vector with
# N, mean, and sd
datac <- ddply(data, groupvars, .drop=.drop,
.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm=na.rm),
mean = mean (xx[[col]], na.rm=na.rm),
sd = sd (xx[[col]], na.rm=na.rm)
)
},
measurevar
)
# Rename the "mean" column
datac <- rename(datac, c("mean" = measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
expr.fold.sum <- summarySE(expr, measurevar = "FoldExpression", groupvars = c("Concentration", "Day"))
pd <- position_dodge(0.2)
ggplot(expr.fold.sum, aes(Day, FoldExpression, color = Concentration, group = Concentration)) +
geom_line(position = pd) +
geom_errorbar(aes(ymin=FoldExpression-se, ymax=FoldExpression+se), width=.1, position = pd, color = "grey30") +
geom_point(position = pd, size=2, shape=21, fill="white") +
scale_y_continuous(breaks = seq(1:10)) + theme(legend.justification=c(0,1), legend.position=c(0,1))
ggsave("./img/cntf_time_dose_curve.svg")
#pval
t.test(expr$RelExpression[expr$Concentration == "0" & expr$Day == "II"],
expr$RelExpression[expr$Concentration == "1" & expr$Day == "II"])
t.test(expr$RelExpression[expr$Concentration == "0" & expr$Day == "II"],
expr$RelExpression[expr$Concentration == "10" & expr$Day == "II"])#
t.test(expr$RelExpression[expr$Concentration == "0" & expr$Day == "II"],
expr$RelExpression[expr$Concentration == "100" & expr$Day == "II"])#
t.test(expr$RelExpression[expr$Concentration == "0" & expr$Day == "III"],
expr$RelExpression[expr$Concentration == "1" & expr$Day == "III"])
t.test(expr$RelExpression[expr$Concentration == "0" & expr$Day == "III"],
expr$RelExpression[expr$Concentration == "10" & expr$Day == "III"])#
t.test(expr$RelExpression[expr$Concentration == "0" & expr$Day == "III"],
expr$RelExpression[expr$Concentration == "100" & expr$Day == "III"])#
t.test(expr$RelExpression[expr$Concentration == "0" & expr$Day == "IV"],
expr$RelExpression[expr$Concentration == "1" & expr$Day == "IV"])
t.test(expr$RelExpression[expr$Concentration == "0" & expr$Day == "IV"],
expr$RelExpression[expr$Concentration == "10" & expr$Day == "IV"])
t.test(expr$RelExpression[expr$Concentration == "0" & expr$Day == "IV"],
expr$RelExpression[expr$Concentration == "100" & expr$Day == "IV"])#
# LA-N-5 ####
expr <- readRDS("./raw_data/raw_expression_qpcr_la5.rds")
#molecular weight of CNTF: 22,931 Da
mw <- 22931
expr$molarC <- as.numeric(as.character(gsub("ng", "", expr$Concentration)))*1E-9/mw*1E3*1E9 #1)ng->g, 2)g->mol, 3)ml->l 4)M->nM
expr$Day <- factor(expr$Day)
expr$Concentration <- factor(expr$Concentration)
ggplot(expr, aes(Day, RelExpression, fill = Concentration)) +
geom_boxplot() +
scale_fill_brewer(palette = "Set3") +
scale_y_continuous(breaks = seq(1:10)) + theme(legend.justification=c(0,1), legend.position=c(0,1))
# Supplemental Figure####
ggsave("./img/cntf_time_dose_boxplot_la5.svg")
summarySE <- function(data=NULL, measurevar, groupvars=NULL, na.rm=FALSE,
conf.interval=.95, .drop=TRUE) {
library(plyr)
#if na.rm==T, don't count them
length2 <- function (x, na.rm=FALSE) {
if (na.rm) sum(!is.na(x))
else length(x)
}
# This does the summary. For each group's data frame, return a vector with
# N, mean, and sd
datac <- ddply(data, groupvars, .drop=.drop,
.fun = function(xx, col) {
c(N = length2(xx[[col]], na.rm=na.rm),
mean = mean (xx[[col]], na.rm=na.rm),
sd = sd (xx[[col]], na.rm=na.rm)
)
},
measurevar
)
# Rename the "mean" column
datac <- rename(datac, c("mean" = measurevar))
datac$se <- datac$sd / sqrt(datac$N) # Calculate standard error of the mean
# Confidence interval multiplier for standard error
# Calculate t-statistic for confidence interval:
# e.g., if conf.interval is .95, use .975 (above/below), and use df=N-1
ciMult <- qt(conf.interval/2 + .5, datac$N-1)
datac$ci <- datac$se * ciMult
return(datac)
}
expr.fold.sum <- summarySE(expr, measurevar = "RelExpression", groupvars = c("Concentration", "Day"))
pd <- position_dodge(0.2)
ggplot(expr.fold.sum, aes(Day, RelExpression, color = Concentration, group = Concentration)) +
geom_line(position = pd) +
geom_errorbar(aes(ymin=RelExpression-se, ymax=RelExpression+se), width=.1, position = pd, color = "grey30") +
geom_point(position = pd, size=2, shape=21, fill="white") +
scale_y_continuous(breaks = seq(1:10)) + theme(legend.justification=c(0,1), legend.position=c(0,1))
ggsave("./img/cntf_time_dose_curve_la5.svg")
#pval
t.test(expr$RelExpression[expr$Concentration == "000ng" & expr$Day == "2"],
expr$RelExpression[expr$Concentration == "001ng" & expr$Day == "2"])
t.test(expr$RelExpression[expr$Concentration == "000ng" & expr$Day == "2"],
expr$RelExpression[expr$Concentration == "010ng" & expr$Day == "2"])#
t.test(expr$RelExpression[expr$Concentration == "000ng" & expr$Day == "2"],
expr$RelExpression[expr$Concentration == "100ng" & expr$Day == "2"])
t.test(expr$RelExpression[expr$Concentration == "000ng" & expr$Day == "4"],
expr$RelExpression[expr$Concentration == "001ng" & expr$Day == "4"])
t.test(expr$RelExpression[expr$Concentration == "000ng" & expr$Day == "4"],
expr$RelExpression[expr$Concentration == "010ng" & expr$Day == "4"])#
t.test(expr$RelExpression[expr$Concentration == "000ng" & expr$Day == "4"],
expr$RelExpression[expr$Concentration == "100ng" & expr$Day == "4"])#
|
dba693376079b6ff69620fd46ad0898bda276210
|
6d3f42b98400fa95a472c839feea39ad056adb61
|
/man/SnowTemp.Rd
|
ac80b90bb6c956e3f4935b8ebdf6c7d0d8833fe1
|
[] |
no_license
|
vilnat/microclimf
|
3948256a9876f71144b62f8e0f2dda6828eff133
|
7fbe9b46d5ff12b79f41428eec01f705fc1dd9bd
|
refs/heads/main
| 2023-06-22T12:37:33.537763
| 2021-07-12T08:28:17
| 2021-07-12T08:28:17
| 368,167,358
| 0
| 0
| null | 2021-05-17T11:53:06
| 2021-05-17T11:53:06
| null |
UTF-8
|
R
| false
| true
| 1,042
|
rd
|
SnowTemp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/snowmodel.R
\name{SnowTemp}
\alias{SnowTemp}
\title{Estimates snow temperature}
\usage{
SnowTemp(
tc,
u2,
rh,
pk,
Rsw,
skyem,
snowalb,
snowem = 0.99,
zm = 0.002,
umin = 0.5
)
}
\arguments{
\item{tc}{a vector of air temperature at reference height (deg C)}
\item{u2}{a vector of wind at reference height (m/s)}
\item{rh}{a vector of relative humidities (percentage)}
\item{pk}{a vector of atmospheric pressure (kPa)}
\item{Rsw}{a vector of global shortwave radiation values (W/m^2)}
\item{snowalb}{a vector of snow albedos (as returned by [pSnow()])}
\item{snowem}{optionally, numeric value of snow emissivity}
\item{zm}{optionally, numeric value of roughness length for momentum transfer of snow surface (m)}
\item{umin}{optionally, numeric value indicating minimum wind speed used in conductivity calculations (m/s)}
}
\value{
a vector of snow temperatures (deg C)
}
\description{
The function `SnowTemp` estimates snow temperatures
}
|
899daf648d7b4443beffb65338138b50a5e186bd
|
850a5e7537ecd8a3fdda83199819bdfec3ea7a66
|
/man/layer_inset.Rd
|
9a2807098e778f8369f167f17380c65b49c5b10f
|
[
"MIT"
] |
permissive
|
elipousson/maplayer
|
709cf32944aa628820d62ade3d2802d01ddb9b9d
|
1267b6f1ec5551321457381f131412706993de7a
|
refs/heads/main
| 2023-08-31T10:25:04.096427
| 2023-08-25T17:33:29
| 2023-08-25T17:33:29
| 496,240,757
| 5
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 4,080
|
rd
|
layer_inset.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/layer_inset.R
\name{layer_inset}
\alias{layer_inset}
\alias{make_inset_map}
\alias{stamp_inset_img}
\title{Use patchwork to create a map with an inset context map or figpatch to stamp
an inset image}
\usage{
layer_inset(
map = NULL,
inset = NULL,
position = "bottomright",
scale = 1,
nudge_x = 0,
nudge_y = 0,
align_to = "full",
...
)
make_inset_map(
map = NULL,
inset = NULL,
location = NULL,
context = NULL,
position = "bottomright",
scale = 1,
nudge_x = 0,
nudge_y = 0,
align_to = "full",
...
)
stamp_inset_img(
path,
plot = NULL,
img_margin = ggplot2::margin(0, 0, 0, 0),
position = "bottomright",
scale = 1,
nudge_x = 0,
nudge_y = 0,
align_to = "full",
...
)
}
\arguments{
\item{inset}{plot or map created with \code{\link[=ggplot2]{ggplot2()}} passed to p argument of
\code{\link[patchwork:inset_element]{patchwork::inset_element()}}. If both location and context are provided to
\code{\link[=make_inset_map]{make_inset_map()}}, inset is optional and any provided value is replaced
with a new layer created by \code{\link[=layer_location_context]{layer_location_context()}}.}
\item{position}{inset map position, Default: 'bottomright'. position,
nudge_x, and nudge_y are used to set the left, bottom, top, and right
parameters for \code{\link[patchwork:inset_element]{patchwork::inset_element()}}.}
\item{scale}{scale of inset map, defaults to 1.}
\item{nudge_x, nudge_y}{nudge x and/or y position of inset map, Default: 0.}
\item{align_to}{Specifies what \code{left}, \code{bottom}, etc should be relative to.
Either \code{'panel'} (default), \code{'plot'}, or \code{'full'}.}
\item{...}{
Arguments passed on to \code{\link[patchwork:inset_element]{patchwork::inset_element}}
\describe{
\item{\code{p}}{A grob, ggplot, patchwork, formula, raster, or nativeRaster object
to add as an inset}
\item{\code{left,bottom,right,top}}{numerics or units giving the location of the
outer bounds. If given as numerics they will be converted to \code{npc} units.}
\item{\code{on_top}}{Logical. Should the inset be placed on top of the other plot or
below (but above the background)?}
\item{\code{clip}}{Logical. Should clipping be performed on the inset?}
\item{\code{ignore_tag}}{Logical. Should autotagging ignore the inset?}
}}
\item{location}{A location passed to \code{\link[=layer_location_context]{layer_location_context()}}. This can be
a sf object, a ggplot layer, or a formula or function. If it is a formula
or function, it is applied to the context data is passed to the location
function and the results used as the data for the location layer.}
\item{context}{A \code{sf} object for context area or a ggplot layer representing
the context.}
\item{path}{image path passed to \code{\link[figpatch:fig]{figpatch::fig()}} for \code{\link[=stamp_inset_img]{stamp_inset_img()}}}
\item{plot, map}{plot or map created with \code{\link[=ggplot2]{ggplot2()}}}
\item{img_margin}{margin around image for \code{\link[=stamp_inset_img]{stamp_inset_img()}} created by
\code{\link[ggplot2:element]{ggplot2::margin()}}. Defaults to no margin.}
}
\value{
ggplot2 map with inset map added using patchwork
}
\description{
\code{\link[=layer_inset]{layer_inset()}} is useful when you want to add an inset to a plot.
}
\details{
\code{\link[=make_inset_map]{make_inset_map()}} is useful for creating an inset map just using the
location with fewer options for customization. In that case, the ...
parameters are passed to \code{\link[=layer_location_context]{layer_location_context()}} instead of
\code{\link[patchwork:inset_element]{patchwork::inset_element()}}
\code{\link[=stamp_inset_img]{stamp_inset_img()}} is useful for applying a logo to a map. The ...
parameters are passed to \code{\link[figpatch:fig]{figpatch::fig()}}
Note, currently, plots created with \code{\link[=layer_inset]{layer_inset()}} do not work with
\code{\link[=map_ggsave_ext]{map_ggsave_ext()}} using the \code{single_file = TRUE} parameter.
}
|
e6f2db3532c36fb0b4fbd30e7824c5ed5fbdaeaa
|
29fa35352970c306fb571fdaea7655ec8cb89fe8
|
/R/abbreviate.R
|
5b394f10f465ca9d1b1286f297c4cdeadb0308f6
|
[] |
no_license
|
kardinal-eros/linktaxa
|
d60f1c95d825afaa3f04c5aace69f759e05534e4
|
7e1d00c39552982a0d11961fc4334de545429817
|
refs/heads/master
| 2020-06-02T03:49:38.931966
| 2017-01-16T11:19:20
| 2017-01-16T11:19:20
| 14,984,203
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,826
|
r
|
abbreviate.R
|
# select common string positions
".first" <- function (x, nchar = 4) {
lapply(lapply(x, "[", 1), "substring", 1, nchar)
}
".pair" <- function (x, nchar = 4) {
lapply(lapply(x, "[", c(1,2)), "substring", 1, nchar)
}
".triple" <- function (x, nchar = 4) {
lapply(lapply(x, "[", c(1,2,4)), "substring", 1, nchar)
}
abbreviateSensuLato <- function (x) {
a <- lapply(x, "paste", collapse = " ")
a <- strsplit(stripSensuLato(unlist(a)), " ", fixed = TRUE)
a <- lapply(lapply(a, "substring", 1, 4), "paste", collapse = " ")
a <- lapply(a, function (x) paste(x, "slat", collapse = " "))
return(a)
}
abbreviateSensuStricto <- function (x) {
a <- lapply(x, "paste", collapse = " ")
a <- strsplit(expandSensuStricto(a), " ", fixed = TRUE)
a <- lapply(lapply(.triple(a), "substring", 1, 4), "paste", collapse = " ")
return(a)
}
abbreviateSubspecies <- function (x) {
.triple(x)
}
abbreviateVariety <- function (x) {
.triple(x)
}
abbreviateHybrid <- function (x) {
stop("hybrid abbrevaitions not implemented yet")
}
abbreviateAffinis <- function (x) {
stop("affinis abbrevaitions not implemented yet")
}
abbreviateAggregate <- function (x) {
a <- lapply(.pair(x), function (x) paste(x, "aggr", collapse = " "))
return(a)
}
abbreviateGenus <- function (x) {
a <- lapply(.first(x), function (x) paste(x, "spec ies", collapse = " "))
return(a)
}
abbreviateTaxa <- function (x) {
# x <- c("Ramalina fraxinea s.str.", "Leontodon hispidus s.lat.")
w <- isWhat(x)
s <- .split0(x)
r <- vector("character", length = length(x))
for (i in 1:nrow(w)) {
if (any(w[i,])) {
ii <- w[i, ]
# select tool
t <- names(ii)[which(ii)]
t <- gsub("is", "abbreviate", t, fixed = TRUE)
ri <- do.call(t, list(s[i]))
} else {
ri <- lapply(.pair(s[i]), ".paste0")
}
r[i] <- tolower(unlist(ri))
}
return(r)
}
|
5ad8b7afc5be63669aedd7423affd67c12f26828
|
d22663b34ba75a8d5d5860dcc3649c20067e7a03
|
/man/split_matrix.Rd
|
c31f4d0f649344dc8a8efe119ce9e79b50384eca
|
[] |
no_license
|
CollinErickson/sFFLHD
|
23c973094c12266eba4bddf99f08dd7ba01507f9
|
f72d222410eccc89a5baf13f2711ad4eba42e47e
|
refs/heads/master
| 2020-09-14T17:54:54.224379
| 2019-02-15T21:11:13
| 2019-02-15T21:11:13
| 66,681,013
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 853
|
rd
|
split_matrix.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/split_matrix.R
\name{split_matrix}
\alias{split_matrix}
\title{Split a matrix by rows, based on either the number of rows per group
or number of splits.}
\usage{
split_matrix(mat, rowspergroup = NULL, nsplits = NULL,
shuffle = TRUE)
}
\arguments{
\item{mat}{A matrix to be split.}
\item{rowspergroup}{Number of rows in a group.}
\item{nsplits}{Number of splits to make.}
\item{shuffle}{Should the splits be shuffled before returning?}
}
\value{
A list of the splits of the matrix.
}
\description{
Split a matrix by rows, based on either the number of rows per group
or number of splits.
}
\examples{
mat <- matrix(1:12, ncol=2)
split_matrix(mat, 4, shuffle=FALSE)
split_matrix(mat, 4, shuffle=TRUE)
split_matrix(mat, nsplits=3, shuffle=FALSE) # same as 4 rowspergroup
}
|
65d1e7f9e95a61d66eead66fbd4f93ae45d84f1c
|
a4d350b5c39e05ed9a26073160079c1a443b57c6
|
/cachematrix.R
|
225e2bf2cccedca911d11d600638373150f46854
|
[] |
no_license
|
souravsaha1605/ProgrammingAssignment2
|
ff87faeaf06885e8f3da5ed02e9d24160fe3a8ec
|
3242e845cb7a394a96dc5df5fbf30ac5f8e4920d
|
refs/heads/master
| 2021-01-19T09:58:46.632132
| 2017-04-10T13:50:52
| 2017-04-10T13:50:52
| 87,806,873
| 0
| 0
| null | 2017-04-10T12:14:16
| 2017-04-10T12:14:16
| null |
UTF-8
|
R
| false
| false
| 2,201
|
r
|
cachematrix.R
|
## Put comments here that give an overall description of what your
## functions do
## Matrix inversion is usually a costly computation and there may be some benefit to caching
## the inverse of a matrix rather than compute it repeatedly.
## Below are two functions that are used to calculate and cache the inverse of a matrix.
## Note that these functions assume that the matrix supplied is always invertible.
## Write a short comment describing this function
## This function creates a special "matrix" object that can cache its inverse.
## It contains a list of functions to
## - set the value of the matrix
## - get the value of the matrix
## - set the value of the inverse of the matrix
## - get the value of the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
i <- NULL
set <- function(y){
x <<- y # Initialize the x, matrix value with y when the set function is called
m <<- NULL
}
get <- function() x # Getting the matrix value when the get function is called
set_inverse <- function(inverse){
i <<- inverse # Initializing the inverse value passed as an argument after computing
}
get_inverse <- function() i # Getting the inverse value when the function is called
list(set = set, get = get, #returning the list with all the function names
set_inverse = set_inverse,
get_inverse = get_inverse)
}
## Write a short comment describing this function
## This function computes the inverse of the special "matrix" returned by makeCacheMatrix above.
## If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
i <- x$get_inverse() #getting the inverse value from the get_inverse funcion of the closure makeCacheMatrix
if(!is.null(i)){
message("getting cached data")
return(i) #if the inverse value is not null then it returns the value of the inverse
}
# Executed only if the value of the inverse is null
m <- x$get()
i <- solve(m) # Computing the inverse value of the matrix
x$set_inverse(i)
i
}
|
997f251be7352fa7a55970da31d8303bc5beab4d
|
5302cef45f9290abd8654dfd3f07a4465399674c
|
/svm.R
|
2ec20887050871233b78fe754e3f616681d876df
|
[] |
no_license
|
mnaylor5/PortoSeguro
|
14833000401f3da938fe94fd92a45b389bda322d
|
9ed68dd1367df68ec244166523d7f3a25bba1497
|
refs/heads/master
| 2021-07-24T05:39:05.949368
| 2017-11-05T20:39:39
| 2017-11-05T20:39:39
| 105,821,668
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,180
|
r
|
svm.R
|
# Fit SVM with all predictors
setwd('C:/Users/Mitch/Desktop/Kaggle/Claim Prediction')
source('_utils.R')
library(liquidSVM)
# Read in training data
full <- fread('data/train.csv')
# Names of every predictor
all_predictors <- colnames(full)[!(colnames(full) %in% c('id', 'target'))]
# Random seed for reproducibility
# Different seed from GBM
set.seed(865)
full$random <- runif(nrow(full))
# Train/test split
train <- full %>% filter(random > 0.3) %>%
mutate(target = as.factor(target)) %>%
select(target, all_predictors)
test <- full %>% filter(random <= 0.3) %>%
mutate(target = as.factor(target)) %>%
select(target, all_predictors)
# Formula
svm.fmla <- as.formula(
paste('~', paste(all_predictors, collapse = ' + '), '-1')
)
# Fit SVM
start <- Sys.time() # kick off a timer
svm_test <- svm(target ~ .,
train,
testdata = test %>% select(-target),
testdata_labels = test$target,
predict.prob = T,
max_gamma = 25)
Sys.time() - start
prediction <- predict(svm_test, test %>% select(-target))
colnames(prediction) <- c('p0', 'p1')
summary(test$prediction)
str(test$prediction)
|
30dec945cb293435cd7d750ecbfc647b4f463379
|
e4228d2482a085d3355964c278269a082959e038
|
/R/mc6_mthds.R
|
b486e71859dd2d63c95bbd781689f8eeb8479693
|
[
"MIT"
] |
permissive
|
USEPA/CompTox-ToxCast-tcpl
|
08d9667ee76532382f2ef2fe7da2e7b8ebc26b2b
|
a8582c61883ba6e6f25b503cfa1f1b37605e3b29
|
refs/heads/main
| 2023-08-30T12:53:21.736913
| 2023-08-24T13:40:02
| 2023-08-24T13:40:02
| 89,386,154
| 23
| 13
|
NOASSERTION
| 2023-09-13T14:07:15
| 2017-04-25T17:05:00
|
R
|
UTF-8
|
R
| false
| false
| 15,628
|
r
|
mc6_mthds.R
|
#-------------------------------------------------------------------------------
# mc6_mthds: Load list of flag methods (to be used at level 6)
#-------------------------------------------------------------------------------
#' @name MC6_Methods
#' @title Load list of level 6 multiple-concentration flag methods
#'
#' @description
#' \code{mc6_mthds} returns a list of flag methods to be used
#' during level 6 multiple-concentration processing.
#'
#' @return A list functions
#'
#' @seealso \code{\link{mc6}}, \code{\link{Method functions}} to query what
#' methods get applied to each aeid.
#'
#' @section Available Methods:
#'
#' More information about the level 6 multiple-concentration processing is
#' available in the package vignette, "Data_processing."
#'
#' \describe{
#' \item{modl.directionality.fail}{Flag series if model directionality is questionable, i.e. if
#' the winning model direction was opposite, more responses (resp) would have exceeded the cutoff
#' (coff). If loss was winning directionality (top < 0), flag if
#' \eqn{count(resp<-1*coff)<2*count(resp>coff)}{count(resp < -1(coff)) < 2(count(resp > coff))}.
#' If gain was winning directionality (top > 0), flag if
#' \eqn{count(resp>coff)<2*count(resp<-1*coff)}.}
#' \item{low.nrep}{Flag series if the average number of replicates per concentration is less than
#' 2; \eqn{nrep < 2}{nrep < 2}.}
#' \item{low.nconc}{Flag series if 4 concentrations or less were tested; \eqn{nconc<=4}{nconc<=4}.
#' }
#' \item{bmd.high}{Flag series if modeled benchmark dose (BMD) is greater than AC50
#' (concentration at 50 percent maximal response). This is indicates high variability in baseline
#' response in excess of more than half of the maximal response.}
#' \item{singlept.hit.high}{Flag single-point hit that's only at the highest conc tested, where
#' series is an active hit call (hitc >= 0.9) with the median response observed above baseline
#' occurring only at the highest tested concentration tested.}
#' \item{singlept.hit.mid}{Flag single-point hit that's not at the highest conc tested, where
#' series is an active hit call (hitc >= 0.9) with the median response observed above baseline
#' occurring only at one concentration and not the highest concentration tested.}
#' \item{multipoint.neg}{Flag multi-point miss, where series is an inactive hit call (hitc < 0.9)
#' with multiple median responses observed above baseline.}
#' \item{gnls.lowconc}{Flag series where winning model is gain-loss (gnls) and the gain AC50 is
#' less than the minimum tested concentration, and the loss AC50 is less than the mean tested
#' concentration.}
#' \item{noise}{Flag series as noisy if the quality of fit as calculated by the root mean square
#' error (rmse) for the series is greater than the cutoff (coff); \eqn{rmse > coff}{rmse > coff}.}
#' \item{border}{Flag series if borderline activity is suspected based on modeled top parameter
#' (top) relative to cutoff (coff); \eqn{|top|<=1.2*coff~or~|top|>=0.8*coff}{|top| <= 1.2(coff) or
#' |top| >= 0.8(coff)}.}
#' \item{overfit.hit}{Method not yet updated for tcpl implementation. Flag hit-calls that would
#' get changed after doing the small N correction to the aic values.}
#' \item{efficacy.50}{Flag low efficacy hits if series has an active hit call (hitc >= 0.9) and
#' efficacy values (e.g. top and maximum median response) less than 50 percent; intended for
#' biochemical assays. If \eqn{hitc>=0.9}{hitc>=0.9} and \eqn{coff>=5}{coff>=5}, then flag when
#' \eqn{top<50}{top<50} or \eqn{maxmed < 50}{ma_med < 50}. If \eqn{hitc>=0.9}{hitc>=0.9} and
#' \eqn{coff<5}{coff<5}, then flag when \eqn{top<\log_{2}{1.5}}{top<log2(1.5)} or
#' \eqn{maxmed<\log_{2}{1.5}}{max_med<log2(1.5)}.}
#' \item{ac50.lowconc}{Flag series with an active hit call (hitc >= 0.9) if AC50 (concentration
#' at 50 percent maximal response) is less than the lowest concentration tested; if
#' \eqn{hitc>=0.9}{hitc>=0.9} and \eqn{ac50<10^{log_{c}{min}}}{ac50<10^logc_min}, then flag.}
#' \item{viability.gnls}{Flag series with an active hit call (hitc >= 0.9) if denoted as cell
#' viability assay with winning model is gain-loss (gnls); if hitc >= 0.9, modl = "gnls" and
#' cell_viability_assay = 1, then flag.}
#' \item{no.med.gt.3bmad}{Flag series where no median response values are greater than baseline as
#' defined by 3 times the baseline median absolute deviation (bmad); nmed_gtbl = 0, where
#' nmed_gtbl is the number of medians greater than 3 * bmad.}
#' }
#'
#' @note
#' This function is not exported and is not intended to be used by the user.
mc6_mthds <- function() {
list(
modl.directionality.fail = function(mthd) {
flag <- "Model directionality questionable"
out <- c("m5id", "m4id", "aeid", "mc6_mthd_id",
"flag")
init <- bquote(list(.(mthd), .(flag), FALSE))
e1 <- bquote(dr[ , .(c(out[4:5], "test")) := .(init)])
e2 <- bquote(dr[ , coffsign := ifelse(top < 0, -1*coff, coff)])
e3 <- bquote(dr[ , gtabscoff := abs(resp) > abs(coffsign)])
e4 <- bquote(dr[ , nrsp_gtabscoff := sum(gtabscoff), by = m4id])
e5 <- bquote(dr[ , gtcoff := resp > coffsign])
e6 <- bquote(dr[ , ltcoff := resp < coffsign])
e7 <- bquote(dr[ , nrsp_gtcoff := sum(gtcoff), by = m4id])
e8 <- bquote(dr[ , nrsp_ltcoff := sum(ltcoff), by = m4id])
e9 <- bquote(dr[ , test := ifelse(coffsign > 0, nrsp_gtabscoff > 2*nrsp_gtcoff, nrsp_gtabscoff > 2*nrsp_ltcoff)])
e10 <- bquote(f[[.(mthd)]] <- unique(dr[which(test), .SD, .SDcols = .(out)], by = NULL))
cr <- c("mc6_mthd_id", "flag", "test", "coffsign", "gtabscoff", "nrsp_gtabscoff", "gtcoff", "ltcoff", "nrsp_gtcoff", "nrsp_ltcoff")
e11 <- bquote(dr[ , .(cr) := NULL])
list(e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11)
},
low.nrep = function(mthd) {
flag <- "Average number of replicates per conc is less than 2"
out <- c("m5id", "m4id", "aeid", "mc6_mthd_id",
"flag")
init <- bquote(list(.(mthd), .(flag), FALSE))
e1 <- bquote(ft[ , .(c(out[4:5], "test")) := .(init)])
e2 <- bquote(ft[ , test := nrep < 2])
e3 <- bquote(f[[.(mthd)]] <- ft[which(test), .SD, .SDcols = .(out)])
cr <- c("mc6_mthd_id", "flag", "test")
e4 <- bquote(ft[ , .(cr) := NULL])
list(e1, e2, e3, e4)
},
low.nconc = function(mthd) {
flag <- "Number of concentrations tested is less than 4"
out <- c("m5id", "m4id", "aeid", "mc6_mthd_id",
"flag")
init <- bquote(list(.(mthd), .(flag), FALSE))
e1 <- bquote(ft[ , .(c(out[4:5], "test")) := .(init)])
e2 <- bquote(ft[ , test := modl != "none" & nconc <= 4])
e3 <- bquote(f[[.(mthd)]] <- ft[which(test), .SD, .SDcols = .(out)])
cr <- c("mc6_mthd_id", "flag", "test")
e4 <- bquote(ft[ , .(cr) := NULL])
list(e1, e2, e3, e4)
},
bmd.high = function(mthd) {
flag <- "Bmd > ac50, indication of high baseline variability"
out <- c("m5id", "m4id", "aeid", "mc6_mthd_id",
"flag")
init <- bquote(list(.(mthd), .(flag), FALSE))
e1 <- bquote(ft[ , .(c(out[4:5], "test")) := .(init)])
e2 <- bquote(ifelse(all(c("ac50","bmd") %in% names(ft)),ft[ , test := bmd > ac50],ft))
e3 <- bquote(f[[.(mthd)]] <- ft[which(test), .SD, .SDcols = .(out)])
cr <- c("mc6_mthd_id", "flag", "test")
e4 <- bquote(ft[ , .(cr) := NULL])
list(e1, e2, e3, e4)
},
singlept.hit.high = function(mthd) {
flag <- "Only highest conc above baseline, active"
out <- c("m5id", "m4id", "aeid", "mc6_mthd_id",
"flag")
init <- bquote(list(.(mthd), .(flag), FALSE))
e1 <- bquote(ft[ , .(c(out[4:5], "test")) := .(init)])
e2 <- bquote(ft[ , lstc := max_med_conc == logc_max])
e3 <- bquote(ft[ , test := nmed_gtbl == 1 & hitc >= 0.9 & lstc])
e4 <- bquote(f[[.(mthd)]] <- ft[which(test), .SD, .SDcols = .(out)])
cr <- c("mc6_mthd_id", "flag", "test", "lstc")
e5 <- bquote(ft[ , .(cr) := NULL])
list(e1, e2, e3, e4, e5)
},
singlept.hit.mid = function(mthd) {
flag <- "Only one conc above baseline, active"
out <- c("m5id", "m4id", "aeid", "mc6_mthd_id",
"flag")
init <- bquote(list(.(mthd), .(flag), FALSE))
e1 <- bquote(ft[ , .(c(out[4:5], "test")) := .(init)])
e2 <- bquote(ft[ , lstc := max_med_conc == logc_max])
e3 <- bquote(ft[ , test := nmed_gtbl == 1 & hitc >= 0.9 & !lstc])
e4 <- bquote(f[[.(mthd)]] <- ft[which(test), .SD, .SDcols = .(out)])
cr <- c("mc6_mthd_id", "flag", "test", "lstc")
e5 <- bquote(ft[ , .(cr) := NULL])
list(e1, e2, e3, e4, e5)
},
multipoint.neg = function(mthd) {
flag <- "Multiple points above baseline, inactive"
out <- c("m5id", "m4id", "aeid", "mc6_mthd_id",
"flag")
init <- bquote(list(.(mthd), .(flag), FALSE))
e1 <- bquote(ft[ , .(c(out[4:5], "test")) := .(init)])
e2 <- bquote(ft[ , test := nmed_gtbl > 1 & hitc < 0.9])
e3 <- bquote(f[[.(mthd)]] <- ft[which(test), .SD, .SDcols = .(out)])
cr <- c("mc6_mthd_id", "flag", "test")
e4 <- bquote(ft[ , .(cr) := NULL])
list(e1, e2, e3, e4)
},
gnls.lowconc = function(mthd) {
flag <- "Gain AC50 < lowest conc & loss AC50 < mean conc"
out <- c("m5id", "m4id", "aeid", "mc6_mthd_id",
"flag")
init <- bquote(list(.(mthd), .(flag), FALSE))
e1 <- bquote(ft[ , .(c(out[4:5], "test")) := .(init)])
e2 <- bquote(ft[ , c_min := 10^logc_min])
e3 <- bquote(ft[ , c_max := 10^logc_max])
conc_cols <- c("c_min", "c_max")
e4 <- bquote(ft[ , cmen := rowMeans(.SD), .SDcols = .(conc_cols)])
e5 <- bquote(ifelse("ac50_loss" %in% names(ft), ft[ , test := modl == "gnls" & ac50 < c_min & ac50_loss < cmen], ft))
e6 <- bquote(f[[.(mthd)]] <- ft[which(test), .SD, .SDcols = .(out)])
cr <- c("mc6_mthd_id", "flag", "test", "c_min", "c_max","cmen")
e7 <- bquote(ft[ , .(cr) := NULL])
list(e1, e2, e3, e4, e5, e6, e7)
},
noise = function(mthd) {
flag <- "Noisy data"
out <- c("m5id", "m4id", "aeid", "mc6_mthd_id",
"flag")
init <- bquote(list(.(mthd), .(flag), FALSE))
e1 <- bquote(ft[ , .(c(out[4:5], "test")) := .(init)])
e2 <- bquote(ft[ , test := rmse > coff])
e3 <- bquote(f[[.(mthd)]] <- ft[which(test), .SD, .SDcols = .(out)])
cr <- c("mc6_mthd_id", "flag", "test")
e4 <- bquote(ft[ , .(cr) := NULL])
list(e1, e2, e3, e4)
},
border = function(mthd) {
flag <- "Borderline"
out <- c("m5id", "m4id", "aeid", "mc6_mthd_id",
"flag")
init <- bquote(list(.(mthd), .(flag), FALSE))
e1 <- bquote(ft[ , .(c(out[4:5], "test")) := .(init)])
e2 <- bquote(ft[ , test := abs(top) <= 1.2*coff & abs(top) >= 0.8*coff])
e3 <- bquote(f[[.(mthd)]] <- ft[which(test), .SD, .SDcols = .(out)])
cr <- c("mc6_mthd_id", "flag", "test")
e4 <- bquote(ft[ , .(cr) := NULL])
list(e1, e2, e3, e4)
},
#overfit.hit = function(mthd) {
#flag <- "Hit-call potentially confounded by overfitting"
#out <- c("m5id", "m4id", "aeid", "mc6_mthd_id",
#"flag")
#init <- bquote(list(.(mthd), .(flag), FALSE))
#e1 <- bquote(ft[ , .(c(out[4:5], "test")) := .(init)])
#e2 <- bquote(ft[modl == "hill" & npts < 5 & hitc == 1, test := TRUE])
#e3 <- bquote(ft[modl == "gnls" & npts < 7 & hitc == 1, test := TRUE])
#e4 <- bquote(ft[npts > 1, cna := cnst_aic + 4/(npts - 2)])
#e5 <- bquote(ft[npts > 4, hna := hill_aic + 40/(npts - 4)])
#e6 <- bquote(ft[npts > 6, gna := gnls_aic + 84/(npts - 7)])
#e7 <- bquote(ft[ , nma := pmin(cna, hna, gna, na.rm = TRUE)])
#e8 <- bquote(ft[gna == nma, nmdl := "gnls"])
#e9 <- bquote(ft[hna == nma, nmdl := "hill"])
#e10 <- bquote(ft[cna == nma, nmdl := "cnst"])
#e11 <- bquote(ft[ , nhc := FALSE])
#e12 <- bquote(ft[nmdl == "hill" & hill_tp >= coff & max_med >= coff,
#nhc := TRUE])
#e13 <- bquote(ft[nmdl == "gnls" & gnls_tp >= coff & max_med >= coff,
#nhc := TRUE])
#e14 <- bquote(ft[hitc == 1 & !nhc, test := TRUE])
#e15 <- bquote(f[[.(mthd)]] <- ft[which(test), .SD, .SDcols = .(out)])
#cr <- c("mc6_mthd_id", "flag", "test",
#"cna", "hna", "gna", "nma", "nmdl", "nhc")
#e16 <- bquote(ft[ , .(cr) := NULL])
#list(e1, e2, e3, e4, e5, e6, e7, e8, e9, e10,
#e11, e12, e13, e14, e15, e16)
#
#},
efficacy.50 = function(mthd) {
flag <- "Less than 50% efficacy"
out <- c("m5id", "m4id", "aeid", "mc6_mthd_id",
"flag")
init <- bquote(list(.(mthd), .(flag), FALSE))
e1 <- bquote(ft[ , .(c(out[4:5], "test")) := .(init)])
e2 <- bquote(ft[hitc >= 0.9 & coff >= 5,
test := top < 50 | max_med < 50])
e3 <- bquote(ft[hitc >= 0.9 & coff < 5,
test := top < log2(1.5) | max_med < log2(1.5)])
e4 <- bquote(f[[.(mthd)]] <- ft[which(test), .SD, .SDcols = .(out)])
cr <- c("mc6_mthd_id", "flag", "test")
e5 <- bquote(ft[ , .(cr) := NULL])
list(e1, e2, e3, e4, e5)
},
ac50.lowconc = function(mthd) {
flag <- "AC50 less than lowest concentration tested"
out <- c("m5id", "m4id", "aeid", "mc6_mthd_id",
"flag")
init <- bquote(list(.(mthd), .(flag), FALSE))
e1 <- bquote(ft[ , .(c(out[4:5], "test")) := .(init)])
e2 <- bquote(ft[hitc >= 0.9, test := ac50 < 10^logc_min])
e3 <- bquote(f[[.(mthd)]] <- ft[which(test), .SD, .SDcols = .(out)])
cr <- c("mc6_mthd_id", "flag", "test")
e4 <- bquote(ft[ , .(cr) := NULL])
list(e1, e2, e3, e4)
},
viability.gnls = function(mthd) {
flag <- "Cell viability assay fit with gnls winning model"
out <- c("m5id", "m4id", "aeid", "mc6_mthd_id",
"flag")
init <- bquote(list(.(mthd), .(flag), FALSE))
e1 <- bquote(ft[ , .(c(out[4:5], "test")) := .(init)])
e2 <- bquote(ft[hitc >= 0.9, test := modl=="gnls" & cell_viability_assay == 1])
e3 <- bquote(f[[.(mthd)]] <- ft[which(test), .SD, .SDcols = .(out)])
cr <- c("mc6_mthd_id", "flag", "test")
e4 <- bquote(ft[ , .(cr) := NULL])
list(e1, e2, e3, e4)
},
no.med.gt.3bmad = function(mthd) {
flag <- "Flag series where no median response values are greater than baseline as defined by 3 times the baseline median absolute deviation (bmad)"
out <- c("m5id", "m4id", "aeid", "mc6_mthd_id",
"flag")
init <- bquote(list(.(mthd), .(flag), FALSE))
e1 <- bquote(ft[ , .(c(out[4:5], "test")) := .(init)])
e2 <- bquote(ft[ , test := nmed_gtbl == 0])
e3 <- bquote(f[[.(mthd)]] <- ft[which(test), .SD, .SDcols = .(out)])
cr <- c("mc6_mthd_id", "flag", "test")
e4 <- bquote(ft[ , .(cr) := NULL])
list(e1, e2, e3, e4)
}
)
}
#-------------------------------------------------------------------------------
|
49bc9309fd07a6d3a60efd39a51ec9ba5fe60887
|
db4fbf2a3f510af870758c27aaa6bd81bc44e834
|
/plot4.R
|
6447f2bc52effaee9052961965e36d82845a6483
|
[] |
no_license
|
arunapatil/ExData_Plotting1
|
16da0fed9d5a01d6460db0eaaae3470b338e71f7
|
969acd55568415b3c120661dd878189f931d21ff
|
refs/heads/master
| 2020-12-26T01:18:23.696388
| 2015-01-11T09:01:13
| 2015-01-11T09:01:13
| 29,082,698
| 0
| 0
| null | 2015-01-11T05:43:28
| 2015-01-11T05:43:26
| null |
UTF-8
|
R
| false
| false
| 1,257
|
r
|
plot4.R
|
#Read input data and clean it (subset to just the required dates, date/time conversions, numeric conversion)
setwd("C:/Users/arunaba/DataScienceCoursera/ExploratoryDataAnalysis")
library(data.table)
data<-fread("household_power_consumption.txt", na.strings=c("?", ""))
newdata<-data[data$Date=="1/2/2007"|data$Date=="2/2/2007"]
newdata$DateTime<-as.POSIXct(paste(newdata$Date, newdata$Time), format="%d/%m/%Y %H:%M:%S")
newdata$Global_active_power=as.numeric(newdata$Global_active_power)
newdata$Global_reactive_power=as.numeric(newdata$Global_reactive_power)
newdata$Voltage=as.numeric(newdata$Voltage)
#Draw the plots
png("plot4.png")
par(mfrow=c(2, 2))
with(newdata, {
plot(DateTime, Global_active_power, type="l", ylab="Global Active Power (kilowatts)", xlab="")
plot(DateTime, Voltage, type="l", ylab="Voltage", xlab="")
plot(DateTime, Sub_metering_1, type="n", xlab="", ylab="Energy sub metering")
lines(DateTime, Sub_metering_1, col="black")
lines(DateTime, Sub_metering_2, col="red")
lines(DateTime, Sub_metering_3, col="blue")
legend("topright", col=c("black","red","blue"), lty=1, legend=c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(DateTime, Global_reactive_power, type="l", xlab="datetime")
})
dev.off()
|
56be769255b8b54b767a8d35f67e92d5f2d17db5
|
a144d52144c41691de3baf988ba1ec0c15e7cc93
|
/functions.R
|
61fdda5c3182bb1ca8c752c343fafd16a7d81476
|
[
"MIT"
] |
permissive
|
saha-19/FreshAPI
|
dfcb644b54b5d37ec9e3f20c0e16556267458b58
|
eb8dc55d03313c94d65013428f7e2b5084e414f3
|
refs/heads/master
| 2022-04-23T07:19:21.697783
| 2020-04-29T18:03:12
| 2020-04-29T18:03:12
| 254,222,650
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 11,596
|
r
|
functions.R
|
predict.bacCounts <- function(bacteria, method, entry, info, product, platform){
#
# Function to predict selected bacteria counts
#
# bacteria = information which bactera type was chosen to predict (TVC, Ps, Bth or LAB)
# entry = a single FTIR entry row
# info = data frame with informations about models (specification in a technical document)
# method = information which machine learning method was chosen for prediction (rf, knn, svmLinear, tsvmRadial, svmPoly, lm)
# platform = analytical platform which was used for measurement: FTIR or MSI
# product = orgin of the sample: Chicken Thigh Fillet (CTF) or Chicken Burger (CB)
#
# Hint - function requires "FTIR_models.xlsx" file loaded as model.info variable
# (model.info <- read_excel("FTIR_models.xlsx"))
# - prospectr package is required
# - readxl package is required
# - jsonlite package is required
#
data <- entry
model.info <- info
prediction <- 0
for(i in 1:nrow(model.info)){
if(model.info$product[i] == product && model.info$bacterialCounts[i] == bacteria && model.info$ML[i] == method && model.info$platform[i] == platform){
if(model.info$preprocessing[i] == "SG"){ #Savitzky-Golay filter
data <- preprocess.sg(data) #appropriate pre-processing
model <- readRDS(model.info$model[i])
prediction <- stats::predict(model, data) #predict
}
else if(model.info$preprocessing[i] == "nSG"){ #normalisation + Savitzky-Golay filter
spectra <- readRDS(model.info$rawSpectra[i])
data <- preprocess.nor_sg(spectra, data) #appropriate pre-processing
model <- readRDS(model.info$model[i])
prediction <- stats::predict(model, data) #predict
}
else{ #raw data
model <- readRDS(model.info$model[i])
prediction <- stats::predict(model, data) #predict
}
}
}
prediction <- prediction
}
###################################################################################################################
###################################################################################################################
predict.bestTVC <- function(entry, info, product, platform){
#
# Function to predict selected bacteria counts
#
# bacteria = information which bactera type was chosen to predict (TVC, Ps, Bth or LAB)
# entry = a single FTIR entry row
# info = data frame with informations about models (specification in a technical document)
# method = information which machine learning method was chosen for prediction (rf, knn, svmLinear, tsvmRadial, svmPoly, lm)
# platform = analytical platform which was used for measurement: FTIR or MSI
# product = orgin of the sample: Chicken Thigh Fillet (CTF) or Chicken Burger (CB)
#
# Hint - function requires "models.xlsx" file loaded as model.info variable
# (model.info <- read_excel("models.xlsx"))
# - prospectr package is required
# - readxl package is required
# - jsonlite package is required
#
data <- entry
model.info <- info
prediction <- 0
model_name <- 0
for(i in 1:nrow(model.info)){
if(model.info$product[i] == product && model.info$bacterialCounts[i]=="TVC" && model.info$place[i]=="1" && model.info$platform[i] == platform){
if(model.info$preprocessing[i] == "SG" && model.info$platform == "FTIR"){ #Savitzky-Golay filter
data <- preprocess.sg(data) #appropriate pre-processing
model <- readRDS(model.info$model[i])
prediction <- stats::predict(model, data) #predict
if(model.info$ML[i] == "knn"){
model_name <- "k-Nearest Neighbors (k-NN) algorithm"
} else if(model.info$ML[i] == "rf"){
model_name <- "Random Forest algorithm"
} else if(model.info$ML[i] == "svmLinear"){
model_name <- "Support-Vector Machine (SVM) with linear kernel"
} else if(model.info$ML[i] == "svmRadial"){
model_name <- "Support-Vector Machine (SVM) with radial kernel"
} else if(model.info$ML[i] == "svmPoly"){
model_name <- "Support-Vector Machine (SVM) with polynomial kernel"
} else {
model_name <- "Linear Model"
}
}
else if(model.info$preprocessing[i] == "nSG" && model.info$platform == "FTIR"){ #normalisation + Savitzky-Golay filter
spectra <- readRDS(model.info$rawSpectra[i])
data <- preprocess.nor_sg(spectra, data) #appropriate pre-processing
model <- readRDS(model.info$model[i])
prediction <- stats::predict(model, data) #predict
if(model.info$ML[i] == "knn"){
model_name <- "k-Nearest Neighbors (k-NN) algorithm"
} else if(model.info$ML[i] == "rf"){
model_name <- "Random Forest algorithm"
} else if(model.info$ML[i] == "svmLinear"){
model_name <- "Support-Vector Machine (SVM) with linear kernel"
} else if(model.info$ML[i] == "svmRadial"){
model_name <- "Support-Vector Machine (SVM) with radial kernel"
} else if(model.info$ML[i] == "svmPoly"){
model_name <- "Support-Vector Machine (SVM) with polynomial kernel"
} else {
model_name <- "Linear Model"
}
}
else{ #raw data
model <- readRDS(model.info$model[i])
prediction <- stats::predict(model, data) #predict
if(model.info$ML[i] == "knn"){
model_name <- "k-Nearest Neighbors (k-NN) algorithm"
} else if(model.info$ML[i] == "rf"){
model_name <- "Random Forest algorithm"
} else if(model.info$ML[i] == "svmLinear"){
model_name <- "Support-Vector Machine (SVM) with linear kernel"
} else if(model.info$ML[i] == "svmRadial"){
model_name <- "Support-Vector Machine (SVM) with radial kernel"
} else if(model.info$ML[i] == "svmPoly"){
model_name <- "Support-Vector Machine (SVM) with polynomial kernel"
} else {
model_name <- "Linear Model"
}
}
}
}
return(paste0("The most accurate prediction using ", model_name, ": ", round(prediction,digits = 3)))
}
###################################################################################################################
###################################################################################################################
preprocess.sg <- function(singleRow){
#
# Function to apply Savitzky Golay filter on a single row spectra
#
# spectra = data frame with FTIR results based on which machine learning model was built
# singleRow = a single row FTIR entry
#
# Hint - prospectr package is required
#
singleRow <- singleRow[,order(as.numeric(colnames(singleRow)))] #order by wavelengths from least to greatest
columns <- colnames(singleRow[,-c((ncol(singleRow)-9):ncol(singleRow))]) #write columnnames which will be used after Savitzky-Golay smoothing
singleRow <- prospectr::savitzkyGolay(singleRow, w=11, p=3, m=1) #apply SG
singleRow <- as.data.frame(singleRow) #write received single row as a dataframe
colnames(singleRow) <- columns #set data frame columns
singleRow <- singleRow
}
###################################################################################################################
###################################################################################################################
preprocess.nor_sg <- function(spectra, singleRow){
#
# Function to apply Standard Normal Variate (SNV) Normalization and Savitzky Golay filter on a single row spectra
#
# spectra = data frame with FTIR results based on which machine learning model was built
# singleRow = a single row FTIR entry
#
# Hint - prospectr package is required
#
features <- c(1001:4000) #create a vector with wavelengths which are suitable for results which can be received from different machines
#feature extraction in spectra dataset
selected_spectra <- spectra[, c(as.character(features))] #select features
selected_spectra <- selected_spectra[, order(as.numeric(colnames(selected_spectra)))] #order by wavelengths from least to greatest
#remove features in a single row
selected_singleRow <- singleRow[,c(as.character(features))] #select features
selected_singleRow <- selected_singleRow[,order(as.numeric(colnames(selected_singleRow)))] #order by wavelengths from least to greatest
#merge spectra
merged <- rbind.data.frame(selected_spectra, selected_singleRow) #merge a single row and spectra together
#SNV
snv <- scale(merged, center=TRUE, scale=TRUE) #normalise the data
output <- snv[nrow(snv),] #extract the single row entry, which is the last row
#apply SG
columns <- colnames(snv[,-c((ncol(snv)-9):ncol(snv))]) #write columnnames which will be used after Savitzky-Golay smoothing
output <- prospectr::savitzkyGolay(output, w=11, p=3, m=1) #apply SG
output <- matrix(output, nrow=1) # #write received single row as a dataframe
output <- as.data.frame(output) #write matrix as a dataframe
colnames(output) <- columns #set column names
singleRow <- output
}
####################################################################################################################
####################################################################################################################
roundWavelengths <- function(df){
#
# function to round column names/wavelengs
#
# df = data frame where column names are numeric
#
# Hint - function works just for numeric column names
# - CreateObj function is required
#
roundedCol_df <- round(as.numeric(colnames(df))) #round column names/wavelengths
colnames(df) <- roundedCol_df #set rounded wavelengths as column names
df_orgin <- df
data <- df
d <- c()
for (i in 1:(ncol(df_orgin)-1)){
if(as.numeric(colnames(df_orgin[i])) == as.numeric(colnames(df_orgin[i+1]))){
for(j in 1:nrow(df_orgin)){ #calculate means based on columns of which wavelengths are the same after rounding
d[j] <- mean(as.numeric(df_orgin[j,i]), as.numeric(df_orgin[j,i+1]))
}
d <- matrix(d, ncol = 1)
d <- as.data.frame(d)
rownames(d) <- rownames(df_orgin)
colnames(d) <- colnames(df_orgin[i])
for(z in 1:(length(colnames(data))-1)){ #create a dataframe where wavelengths are not repeated
if(colnames(data[z]) == colnames(df_orgin[i])){
df <- CreateObj(data[,1:(z-1)], d)
df <- CreateObj(df, data[,(z+2):length(data)])
data <- df
}
}
d <- c()
i = i+1
}
}
dataFrame <- df
}
###################################################################################################################
###################################################################################################################
CreateObj <- function(data1, data2){
#
# Data pretreatment function to create combined data sets
#
# data1 = the first data set to combining
# data2 = the second data set to combining
#
# Hint - each data set has to have the same number of rows
#
#combine all rows from the both dataset
merged <- merge(data1, data2, by = 'row.names')
rownames(merged) = merged[,1]
# remove the row names column, which was added th the merged dataset during mergining (additional one)
as.data.frame(merged[,-1])
}
|
23498440fd97fa06d8fe1c73c6d8a540fe046b7c
|
727eca003d70aaed8129d84c8c6cbf53023384f9
|
/pkg/mac/licence.r
|
f6e40aaf9a2374cce650fb9eecd8f4a14142ca1e
|
[
"LicenseRef-scancode-unknown-license-reference",
"PostgreSQL"
] |
permissive
|
allentc/pgadmin3-lts
|
833484e5ada99be4629369a984c13cfb59180600
|
d69b58b473ee501fd5be016ceea6f7c21f10336a
|
refs/heads/master
| 2023-05-26T16:42:23.572751
| 2023-05-22T02:40:57
| 2023-05-22T02:40:57
| 174,668,882
| 25
| 34
|
NOASSERTION
| 2023-05-22T02:40:58
| 2019-03-09T08:36:29
|
C++
|
UTF-8
|
R
| false
| false
| 2,020
|
r
|
licence.r
|
data 'LPic' (5000) {
$"0000 0001 0000 0000 0000"
};
resource 'STR#' (5000, "English buttons") {
{ /* array StringArray: 9 elements */
/* [1] */
"English",
/* [2] */
"Agree",
/* [3] */
"Disagree",
/* [4] */
"Print",
/* [5] */
"Save...",
/* [6] */
"IMPORTANT - Read this License Agreement carefully before clicking on "
"the \"Agree\" button. By clicking on the \"Agree\" button, you agree "
"to be bound by the terms of the License Agreement.",
/* [7] */
"Software License Agreement",
/* [8] */
"This text cannot be saved. This disk may be full or locked, or the file "
"may be locked.",
/* [9] */
"Unable to print. Make sure you've selected a printer."
}
};
data 'TEXT' (5000, "English") {
"pgAdmin III\n"
"\n"
"Copyright (C) 2002 - 2016, The pgAdmin Development Team\n"
"\n"
"Permission to use, copy, modify, and distribute this software and its documentation for any purpose, without fee, and without a written agreement is hereby granted, provided that the above copyright notice and this paragraph and the following two paragraphs appear in all copies.\n"
"\n"
"IN NO EVENT SHALL THE PGADMIN DEVELOPMENT TEAM BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF THE PGADMIN DEVELOPMENT TEAM HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n"
"\n"
"THE PGADMIN DEVELOPMENT TEAM SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS ON AN \"AS IS\" BASIS, AND THE PGADMIN DEVELOPMENT TEAM HAS NO OBLIGATIONS TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.\n"
};
|
8dbcbeafcf32ca7615172c6d4b369b1514e06ab7
|
fcb9c08efecacc63018c388fcb847e58aa07f087
|
/R/m2_vary.R
|
92d0c6572dd8c4663be80b423162ba5013729164
|
[
"MIT"
] |
permissive
|
yuliasidi/m2imp
|
52957a3580385d5f9a29e85579b4f124ae9d874e
|
857f43522906c43306d49bcd4649d2cf44901343
|
refs/heads/master
| 2020-07-13T05:51:33.835388
| 2020-01-14T21:14:16
| 2020-01-14T21:14:16
| 205,009,061
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,880
|
r
|
m2_vary.R
|
#' @title Variable margin overall point estimate and variance derivation
#' @description Calculates overall point estimates and its variance using a
#' vector of margins
#' @param lambda numeric, fraction of control treatment effect preservation
#' @param m1 numeric, statistical margin
#' @param pc numeric, estimated proportion of events in group 'c'
#' @param pt numeric, estimated proportion of events in group 't'
#' @param pd_var numeric, estimated variance for difference in propotions
#' between groups 'c' and 't'
#' @param nc numeric, number of observations in group 'c'
#' @param nt numeric, number of observations in group 't'
#' @param method chararcter with the followin two options: "wald", "fm"
#' @return list
#' @details DETAILS
#' @examples
#' lambda <- c(0.60, 0.63, 0.65)
#' m2_vary(lambda, m1 = 0.23, pc = 0.8, pt = 0.7, pd_var = 0.004, nc = 100, nt = 100, method = 'wald')
#' @seealso
#' \code{\link[stats]{cor}}
#' \code{\link[purrr]{map}},\code{\link[purrr]{set_names}}
#' \code{\link[bin2mi]{p_rmle}}
#' @rdname m2_vary
#' @export
#' @importFrom stats var
#' @importFrom purrr map_dbl set_names
#' @importFrom bin2mi p_rmle
m2_vary <- function(lambda, m1, pc, pt, pd_var, nc, nt, method = c('wald', 'fm')){
m2 <- (1-lambda)*m1
mean_lambda <- mean(m2)
var_lambda <- stats::var(m2)
num_k <- length(m2)
qbar <- pc - pt - mean_lambda
if (method == 'wald'){
ubar <- pd_var + var_lambda
}
if (method == 'fm'){
pc_rmle <- purrr::map_dbl(m2, bin2mi::p_rmle, nt = nt, nc = nc, pc = pc, pt = pt)
pt_rmle <- pc_rmle - m2
uk <- pc_rmle*(1 - pc_rmle)/nc + pt_rmle*(1 - pt_rmle)/nt
ubar <- mean(uk) + var_lambda
}
b <- var_lambda
t <- ubar + (1 + 1/num_k)*b
v <- floor((num_k - 1)*(1 + ubar/((1+1/num_k)*b))^2)
out <- list(qbar, ubar, b, t, v)%>%purrr::set_names("qbar", "ubar", 'b', 't', 'v')
return(out)
}
|
016f3bae723d64043087d7f89c5e0d88eab7ba6b
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/intubate/examples/kernlab.Rd.R
|
d133e465a195ca965cb85c0fab9397e4e6fc073c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,967
|
r
|
kernlab.Rd.R
|
library(intubate)
### Name: kernlab
### Title: Interfaces for kernlab package for data science pipelines.
### Aliases: ntbt_gausspr ntbt_kfa ntbt_kha ntbt_kkmeans ntbt_kpca ntbt_kqr
### ntbt_ksvm ntbt_lssvm ntbt_rvm ntbt_sigest ntbt_specc
### Keywords: intubate magrittr kernlab gausspr kfa kha kkmeans kpca kqr
### ksvm lssvm rvm sigest specc
### ** Examples
## Not run:
##D library(intubate)
##D library(magrittr)
##D library(kernlab)
##D
##D ## ntbt_gausspr: Gaussian processes for regression and classification
##D data(iris)
##D
##D ## Original function to interface
##D gausspr(Species ~ ., data = iris, var = 2)
##D
##D ## The interface puts data as first parameter
##D ntbt_gausspr(iris, Species ~ ., var = 2)
##D
##D ## so it can be used easily in a pipeline.
##D iris %>%
##D ntbt_gausspr(Species ~ ., var = 2)
##D
##D
##D ## ntbt_kfa: Kernel Feature Analysis
##D data(promotergene)
##D
##D ## Original function to interface
##D kfa(~ ., data = promotergene)
##D
##D ## The interface puts data as first parameter
##D ntbt_kfa(promotergene, ~ .)
##D
##D ## so it can be used easily in a pipeline.
##D promotergene %>%
##D ntbt_kfa(~ .)
##D
##D
##D ## ntbt_kha: Kernel Principal Components Analysis
##D data(iris)
##D test <- sample(1:150,70)
##D
##D ## Original function to interface
##D kpc <- kha(~ ., data = iris[-test, -5], kernel = "rbfdot", kpar = list(sigma=0.2),
##D features = 2, eta = 0.001, maxiter = 65)
##D pcv(kpc)
##D
##D ## The interface puts data as first parameter
##D kpc <- ntbt_kha(iris[-test, -5], ~ ., kernel = "rbfdot", kpar = list(sigma=0.2),
##D features = 2, eta = 0.001, maxiter = 65)
##D pcv(kpc)
##D
##D ## so it can be used easily in a pipeline.
##D iris[-test, -5] %>%
##D ntbt_kha(~ ., kernel = "rbfdot", kpar = list(sigma=0.2),
##D features = 2, eta = 0.001, maxiter = 65) %>%
##D pcv()
##D
##D
##D ## ntbt_kkmeans: Kernel k-means
##D ## Original function to interface
##D sc <- kkmeans(~ ., data = iris[-test, -5], centers = 3)
##D centers(sc)
##D
##D ## The interface puts data as first parameter
##D sc <- ntbt_kkmeans(iris[-test, -5], ~ ., centers = 3)
##D centers(sc)
##D
##D ## so it can be used easily in a pipeline.
##D iris[-test, -5] %>%
##D ntbt_kkmeans(~ ., centers = 3) %>%
##D centers()
##D
##D
##D ## ntbt_kpca: Kernel Principal Components Analysis
##D data(iris)
##D test <- sample(1:150,20)
##D
##D ## Original function to interface
##D kpc <- kpca(~ ., data = iris[-test, -5], kernel = "rbfdot",
##D kpar = list(sigma = 0.2), features = 2)
##D pcv(kpc)
##D
##D ## The interface puts data as first parameter
##D kpc <- ntbt_kpca(iris[-test, -5], ~ ., kernel = "rbfdot",
##D kpar = list(sigma = 0.2), features = 2)
##D pcv(kpc)
##D
##D ## so it can be used easily in a pipeline.
##D iris[-test, -5] %>%
##D ntbt_kpca(~ ., kernel = "rbfdot",
##D kpar = list(sigma = 0.2), features = 2) %>%
##D pcv()
##D
##D
##D ## ntbt_kqr: Kernel Quantile Regression
##D ## Not found example using formula interface, and I am
##D ## completely ignorant to construct one.
##D x <- sort(runif(300))
##D y <- sin(pi*x) + rnorm(300,0,sd=exp(sin(2*pi*x)))
##D
##D dkqr <- data.frame(x, y)
##D
##D ## Original function to interface
##D set.seed(1)
##D kqr(x, y, tau = 0.5, C = 0.15)
##D
##D ## The interface puts data as first parameter
##D set.seed(1)
##D ntbt_kqr(dkqr, x, y, tau = 0.5, C = 0.15)
##D
##D ## so it can be used easily in a pipeline.
##D set.seed(1)
##D dkqr %>%
##D ntbt_kqr(x, y, tau = 0.5, C = 0.15)
##D
##D
##D ## ntbt_ksvm: Support Vector Machines
##D data(spam)
##D index <- sample(1:dim(spam)[1])
##D spamtrain <- spam[index[1:floor(dim(spam)[1]/2)], ]
##D spamtest <- spam[index[((ceiling(dim(spam)[1]/2)) + 1):dim(spam)[1]], ]
##D
##D ## Original function to interface
##D set.seed(1)
##D ksvm(type ~ ., data = spamtrain, kernel = "rbfdot",
##D kpar = list(sigma = 0.05), C = 5, cross = 3)
##D
##D ## The interface puts data as first parameter
##D set.seed(1)
##D ntbt_ksvm(spamtrain, type ~ ., kernel = "rbfdot",
##D kpar = list(sigma = 0.05), C = 5, cross = 3)
##D
##D ## so it can be used easily in a pipeline.
##D set.seed(1)
##D spamtrain %>%
##D ntbt_ksvm(type ~ ., kernel = "rbfdot",
##D kpar = list(sigma = 0.05), C = 5, cross = 3)
##D
##D
##D ## ntbt_lssvm: Least Squares Support Vector Machine
##D data(iris)
##D
##D ## Original function to interface
##D set.seed(1)
##D lssvm(Species ~ ., data = iris)
##D
##D ## The interface puts data as first parameter
##D set.seed(1)
##D ntbt_lssvm(iris, Species ~ .)
##D
##D ## so it can be used easily in a pipeline.
##D set.seed(1)
##D iris %>%
##D ntbt_lssvm(Species ~ .)
##D
##D
##D ## ntbt_rvm: Relevance Vector Machine
##D ## Not found example using formula interface, and I am
##D ## completely ignorant to construct one.
##D x <- seq(-20,20,0.1)
##D y <- sin(x)/x + rnorm(401,sd=0.05)
##D
##D drvm <- data.frame(x, y)
##D
##D ## Original function to interface
##D set.seed(1)
##D rvm(x, y, tau = 0.5, C = 0.15)
##D
##D ## The interface puts data as first parameter
##D set.seed(1)
##D ntbt_rvm(drvm, x, y, tau = 0.5, C = 0.15)
##D
##D ## so it can be used easily in a pipeline.
##D set.seed(1)
##D drvm %>%
##D ntbt_rvm(x, y, tau = 0.5, C = 0.15)
##D
##D
##D ## ntbt_sigest: Hyperparameter estimation for the Gaussian Radial Basis kernel
##D data(promotergene)
##D
##D ## Original function to interface
##D set.seed(1)
##D sigest(Class ~ ., data = promotergene)
##D
##D ## The interface puts data as first parameter
##D set.seed(1)
##D ntbt_sigest(promotergene, Class ~ .)
##D
##D ## so it can be used easily in a pipeline.
##D set.seed(1)
##D promotergene %>%
##D ntbt_sigest(Class ~ .)
##D
##D ## ntbt_specc: Spectral Clustering
##D ## Not found example using formula interface, and I am
##D ## completely ignorant to construct one.
## End(Not run)
|
efc68794e0e97b9d42d28e21f2240d8562ac7bfa
|
1e4f0d9f2d5bd272dcdc22b908d5734499737043
|
/R/HRQoLplot.R
|
bd71f689c454862c088c6b1ae42c609cd542763a
|
[] |
no_license
|
idaejin/PROreg
|
f8aeadf6e4e5943507aded875ae5edfaaed65d75
|
739571ba62b3216b0ceff9ae4326d762bc1535da
|
refs/heads/master
| 2020-03-28T17:05:41.562507
| 2018-10-16T22:36:42
| 2018-10-16T22:36:42
| 148,756,546
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,017
|
r
|
HRQoLplot.R
|
HRQoLplot <- function(data,legend=FALSE,title="Short Form-36 Health Survey",dimlabel.cex=NULL,legend.cex=1,linewidth=3,title.cex=1,lty=1){
number <- 0
number.cex <- rep(1,8)
maxmin <- data.frame(
PF=c(20,0),
RP=c(4,0),
BP=c(9,0),
GH=c(20,0),
VT=c(20,0),
SF=c(8,0),
RE=c(3,0),
MH=c(13,0))
colnames(maxmin) <- c("PF (20)","RP (4)","BP (9)","GH (20)","VT (20)","SF (8)","RE (3)","MH (13)")
colnames(data) <- c("PF (20)","RP (4)","BP (9)","GH (20)","VT (20)","SF (8)","RE (3)","MH (13)")
dat <- rbind(maxmin,data)
radarchart(dat, axistype=number, seg=4, pty=32, plty=lty,plwd=linewidth, na.itp=FALSE,cglcol="black",
title=title,pcol=brewer.pal(8,"Set1"),vlcex=dimlabel.cex,cex.main=title.cex,axislabcol="black",
calcex=number.cex)
# Legend
if (legend==TRUE){
legend("topright", legend=c(rownames(dat[-c(1,2),])),text.col=brewer.pal(8,"Set1"),bty="n",cex=legend.cex,
lty=lty,lwd=legend.cex,col=brewer.pal(8,"Set1"))
}
}
|
c68c074ec9e9d9e779dc9c1388dd41951742ee6c
|
9d1e0abd4ea265db162001d36cbf2202c86effbd
|
/R/logpl.R
|
cb620452d903e0fa60c599bd8fcc728b420e21e7
|
[] |
no_license
|
cran/survcomp
|
018fb386d9497b09286b439eba26446ff0f86081
|
f9980b6f34782542731743799b5040f6b562fd7a
|
refs/heads/master
| 2021-01-10T19:44:06.874372
| 2009-02-08T00:00:00
| 2009-02-08T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,842
|
r
|
logpl.R
|
`logpl` <-
function(x, surv.time, surv.event, strata, beta, na.rm=FALSE, verbose=FALSE) {
##############
#internal function
##############
logpl1 <- function(x, surv.time, surv.event, beta, verbose=FALSE) {
x <- as.matrix(x)
nm <- dim(x)
n <- nm[1]
m <- nm[2]
r <- rank(surv.time)
beta <- as.matrix(beta)
ita <- x %*% beta
epita <- exp(ita)
d <- rep(0, n)
dono <- rep(0, n)
for(i in 1:n) {
d[i] <- sum(surv.event[r == r[i]])
dono[i] <- sum(epita[r >= r[i]])
}
risk <- d/dono
risk1 <- d/dono^{ 2}
culrisk1 <- culrisk <- rep(0, n)
for(i in 1:n) {
culrisk[i] <- sum(unique(risk[r <= r[i]]))
culrisk1[i] <- sum(unique(risk1[r <= r[i]]))
}
lik <- sum((ita - log(dono)) * surv.event)
res <- c(lik, sum(surv.event))
names(res) <- c("logpl", "event")
return(res)
}
##############
## remove NA values
x <- as.matrix(x)
if(missing(strata)) { strata <- rep(1, nrow(x)) }
beta <- as.matrix(beta)
cc.ix <- complete.cases(surv.time, surv.event, x, strata)
surv.time <- surv.time[cc.ix]
surv.event <- surv.event[cc.ix]
x <- x[cc.ix, ,drop=FALSE]
strata <- strata[cc.ix]
n <- sum(cc.ix)
if (!all(cc.ix) && !na.rm) { stop("NA values are present!") }
if(verbose) { cat(sprintf("%i cases are removed due to NA values\n",as.integer(sum(!cc.ix)))) }
ss <- unique(strata)
if(length(ss) < 2) {
res <- logpl1(surv.time=surv.time, surv.event=surv.event, beta=beta, x=x, verbose=verbose)
}
else {
res1 <- 0
res2 <- 0
for(i in 1:length(ss)) {
myx <- strata == ss[i]
rr <- logpl1(surv.time=surv.time[myx], surv.event=surv.event[myx], beta=beta, x=x[myx, ], verbose=verbose)
res1 <- res1 + rr[1]
res2 <- res2 + rr[2]
}
res <- c(res1, res2)
names(res) <- c("logpl", "event")
}
return(res)
}
|
d550e0c5c5a257c11984a59ea3675d8769fc7912
|
2fae5a43bd5f38d5674406bd636b998729db8271
|
/runfiles/archive/msw_decay_msm_v2.R
|
57aacb1a003ad0a01240dd3e230199dc39cf5569
|
[
"MIT"
] |
permissive
|
shoshievass/bridgeConditions
|
1be73e22ea87d223e99c1cf3c9e59ce1759539f4
|
24208ee063a58de73492d7e63fb6d8ddb414d760
|
refs/heads/master
| 2021-12-21T02:56:00.098804
| 2021-12-16T00:46:06
| 2021-12-16T00:46:06
| 117,144,750
| 1
| 0
|
MIT
| 2020-05-25T20:29:28
| 2018-01-11T19:39:11
|
R
|
UTF-8
|
R
| false
| false
| 4,854
|
r
|
msw_decay_msm_v2.R
|
library(tidyverse)
B <- 100
T_max <- 16
simulated_bridges <- data.frame(bridgeID = 1:B, age = runif(B, 0, 100))
bridge_dates <- expand.grid(bridgeID = 1:B, data_year = 1:T_max) %>% arrange(bridgeID)
simulated_bridges <- simulated_bridges %>%
right_join(bridge_dates)
T_b_df <- simulated_bridges %>%
mutate(rn = row_number()) %>%
group_by(bridgeID) %>%
summarize(
T_b = n(),
n_b = first(rn)
)
simulated_bridges <- simulated_bridges %>%
left_join(T_b_df)
simulated_bridges <- simulated_bridges %>%
group_by(bridgeID) %>%
mutate(
spending_ind = rbinom(T_max, 1, 0.1),
spending = rgamma(T_max, 15, 5) * spending_ind,
cum_spending = cumsum(spending),
tacit_work_ind = rbinom(T_max, 1, 0.05)
) %>%
ungroup() %>%
mutate(
time_laps = 1,
work_done = ifelse(spending_ind > 0, 1 ,0)
)
simulated_bridges <- simulated_bridges %>%
group_by(bridgeID, cum_spending) %>%
mutate(
periods_since_spending = cumsum(time_laps) - 1
) %>%
ungroup()
X_bridge <- model.matrix(~ scale(age), data = simulated_bridges)
library(rstan)
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores())
# expose_stan_functions("Models/msm_bridge_decay_v2.stan")
# test_triangular <- getLowerTriangularMatrix(5)
# test_discounting_mat <- getDiscountingMatrix(5, (0.95))
dgp_model <- stan_model("Models/msm_bridge_decay_v2.stan")
# w = 1e4 # for scaling spending
w <- 1
raw_data_list <- list(
N = nrow(simulated_bridges),
B = length(unique(simulated_bridges$bridgeID)),
T = length(unique(simulated_bridges$data_year)),
M = ncol(X_bridge),
H = 7,
bridge_ID = simulated_bridges$bridgeID,
period_ID = simulated_bridges$data_year,
deck_health = sample(4:7, nrow(simulated_bridges), replace = T), # intialize randomly
superstructure_health = sample(4:7, nrow(simulated_bridges), replace = T), # intialize randomly
substructure_health = sample(4:7, nrow(simulated_bridges), replace = T), # intialize randomly
deck_health_change = rep(0, nrow(simulated_bridges)),
superstructure_health_change = rep(0, nrow(simulated_bridges)),
substructure_health_change = rep(0, nrow(simulated_bridges)),
spending = (simulated_bridges$spending / w),
num_periods_since_spending = simulated_bridges$periods_since_spending,
work_done = simulated_bridges$work_done,
num_periods_lapsed = simulated_bridges$time_laps,
T_b = T_b_df$T_b,
N_b = T_b_df$n_b,
X = X_bridge,
run_estimation = 0
)
dgp_sample <- sampling(dgp_model, data = raw_data_list, iter = 12, chains = 1, seed = 467)
dgp_beta <- as.data.frame(dgp_sample, pars = "beta_deck")
dgp_beta <- t(dgp_beta[1, ])
dgp_beta
dgp_discount_scalar <- as.data.frame(dgp_sample, pars = "discount_scalar")
dgp_discount_scalar <- t(dgp_discount_scalar[1, ])
dgp_discount_scalar
dgp_sim_deck_health <- as.data.frame(dgp_sample, pars = "deck_health_sim")
dgp_sim_deck_health <- t(dgp_sim_deck_health[1, ])
summary(dgp_sim_deck_health)
dgp_sim_superstructure_health <- as.data.frame(dgp_sample, pars = "superstructure_health_sim")
dgp_sim_superstructure_health <- t(dgp_sim_superstructure_health[1, ])
summary(dgp_sim_superstructure_health)
dgp_sim_substructure_health <- as.data.frame(dgp_sample, pars = "substructure_health_sim")
dgp_sim_substructure_health <- t(dgp_sim_substructure_health[1, ])
summary(dgp_sim_substructure_health)
simulated_data_list <- list(
N = nrow(simulated_bridges),
B = length(unique(simulated_bridges$bridgeID)),
T = length(unique(simulated_bridges$data_year)),
M = ncol(X_bridge),
H = 7,
bridge_ID = simulated_bridges$bridgeID,
period_ID = simulated_bridges$data_year,
deck_health = as.vector(dgp_sim_deck_health),
superstructure_health = as.vector(dgp_sim_superstructure_health),
substructure_health = as.vector(dgp_sim_substructure_health),
spending = (simulated_bridges$spending / w),
num_periods_since_spending = simulated_bridges$periods_since_spending,
work_done = simulated_bridges$work_done,
num_periods_lapsed = simulated_bridges$time_laps,
T_b = T_b_df$T_b,
N_b = T_b_df$n_b,
X = X_bridge,
run_estimation = 1
)
system.time(model_fit_opt <- optimizing(dgp_model, data = simulated_data_list, verbose = T))
get_opt_est <- function(x, par) {
x$par[grepl(par, names(x$par))]
}
mle_beta <- get_opt_est(model_fit_opt, "\\bbeta_deck\\b")
plot(mle_beta, dgp_beta)
mle_discount_scalar <- get_opt_est(model_fit_opt, "\\bdiscount_scalar\\b")
plot(mle_discount_scalar, dgp_discount_scalar)
estimated_model <- sampling(dgp_model, data = simulated_data_list, iter = 500, chains = 2, cores = 2)
bayes_beta <- as.numeric(colMeans(as.data.frame(estimated_model, pars = "beta_deck")))
plot(bayes_beta,dgp_beta)
bayes_discount_scalar <- as.numeric(colMeans(as.data.frame(estimated_model, pars = "discount_scalar")))
print(estimated_model, pars = "discount_scalar")
|
6f4384278c07b0f3f658566b7268e24827b48ef0
|
83d35a0c687e56de320bbe025fe876df41ea3bf6
|
/inst/unitTests/findBAFvariance_test.R
|
eb9646218807632c6d2dfa38a450f5ceff6ef6f1
|
[] |
no_license
|
smgogarten/GWASTools
|
797f4cc0d90299195fea29ee1fc24c492267541a
|
720bfc6bede713dfcfbff1dd506f4c9f338caa9d
|
refs/heads/devel
| 2023-06-26T13:37:21.371466
| 2023-06-22T12:37:41
| 2023-06-22T12:37:41
| 100,623,140
| 11
| 8
| null | 2023-06-22T12:34:02
| 2017-08-17T16:18:11
|
R
|
UTF-8
|
R
| false
| false
| 1,373
|
r
|
findBAFvariance_test.R
|
test_findBAFvariance <- function() {
blfile <- system.file("extdata", "illumina_bl.nc", package="GWASdata")
blnc <- NcdfIntensityReader(blfile)
genofile <- system.file("extdata", "illumina_geno.nc", package="GWASdata")
genonc <- NcdfGenotypeReader(genofile)
nbins <- rep(8, 3) # chroms 21-26 in this file, so need bins for (21,22,23)
baf.res <- sdByScanChromWindow(blnc, genonc, nbins=nbins)
checkEquals(length(baf.res), 3)
checkEquals(dim(baf.res[[1]]), c(nscan(blnc), 7))
data(illumina_scan_annot)
sex <- illumina_scan_annot$sex
sd.res <- meanSdByChromWindow(baf.res, sex)
var.res <- findBAFvariance(sd.res, baf.res, sex, 2)
sd.res <- medianSdOverAutosomes(baf.res)
checkEquals(dim(sd.res), c(nscan(blnc), 2))
# default value for nbins
baf.res <- sdByScanChromWindow(blnc, genonc)
checkEquals(dim(baf.res[[1]]), c(nscan(blnc), 1))
sd.res <- medianSdOverAutosomes(baf.res)
checkEquals(dim(sd.res), c(nscan(blnc), 2))
# try LRR
lrr.res <- sdByScanChromWindow(blnc, var="LogRRatio", incl.hom=TRUE)
checkEquals(length(lrr.res), 3)
checkEquals(dim(lrr.res[[1]]), c(nscan(blnc), 1))
sd.res <- medianSdOverAutosomes(lrr.res)
checkEquals(dim(sd.res), c(nscan(blnc), 2))
# check error - default incl. values w/o genoData
checkException(sdByScanChromWindow(blnc, var="LogRRatio"))
close(blnc)
close(genonc)
}
|
a35462147368f70e384ad3295cf0310b2aaad95b
|
51d43d3ee56f17d715430a742ca1e9c985a076cb
|
/app/models/home.R
|
a0138175321e4c198a5ce004e4eac4f36982fd2d
|
[
"MIT"
] |
permissive
|
aleDsz/lambR
|
9f77ee447f3bcf898b2ef817e6067cfaff13b331
|
a2f41328ccba3b8899e922b280b55c2f0b292702
|
refs/heads/master
| 2021-07-06T14:26:44.342123
| 2020-08-01T20:01:08
| 2020-08-01T20:01:08
| 156,108,383
| 0
| 0
|
MIT
| 2020-08-02T00:47:28
| 2018-11-04T17:43:07
|
R
|
UTF-8
|
R
| false
| false
| 122
|
r
|
home.R
|
Home <- R6Class("HomeModel", inherit = Model,
public = list(
get_name = function () {
return ("Alexandre")
}
)
)
|
906e20ffa9ddbfb81fc5b252afa6bbfc55bd27cf
|
a7dc929cfb59fa8add87b63be0146b7b7707d3b2
|
/rprog/week2/assignment2/pollutantmean.R
|
e10088681a9961f5aab22a49d0e43ee501eb555a
|
[] |
no_license
|
theoneandoney/datasciencecoursera
|
3a15376fa427f15c202f93405ad5d504279d1b94
|
61377eec59c77d52906ffd028bd8464ce3c65a3e
|
refs/heads/master
| 2021-12-02T15:32:22.397811
| 2021-11-03T00:48:39
| 2021-11-03T00:48:39
| 38,469,696
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 350
|
r
|
pollutantmean.R
|
pollutantmean <- function(directory, pollutant, id = 1:332) {
myfiles <- list.files(path = directory,
pattern = "*.csv", full.names = TRUE)
df <- data.frame()
for (i in id) {
df <- rbind(df, read.csv(myfiles[i]))
}
p <- df[,pollutant]
good <- complete.cases(p)
p2 <- p[good]
y <- mean(p2)
y
}
|
1b5c07f9dab6e06a266a44cfff19e5071d3fe14a
|
363335caa7750c527dde4055320e8972fd32bbe8
|
/ui.R
|
a5d7b2831b7cc71d17f943ef1fb38a9ac0be19d9
|
[] |
no_license
|
ayolaikaa/Proyek-Data-Science
|
3313dd72e30adb9711bd0d58255dfb0be91931fa
|
7d4b4dacbdf5745a73da9a1675dead8e2c7481d4
|
refs/heads/main
| 2023-02-18T10:35:47.317936
| 2021-01-22T11:11:08
| 2021-01-22T11:11:08
| 331,920,289
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 662
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for miles per gallon application
shinyUI(pageWithSidebar(
# Application title
ui<- headerPanel("Disaster"),
sidebarPanel(
selectInput("Pilihan", "Pilihan:",
list(
"Positive/Negative Word " = "1")),
),
mainPanel(
h3(textOutput("caption")),
plotOutput("plot"),
)
))
|
852b7c99aa559bd02f337ddee1babfb5fe697d87
|
27a47d29aab06ca0d90889e7c2d7b71535785678
|
/Week3/Code/DataWrangTidy.R
|
d4a1665708e2a9bae4634bd6d35e22d04d7023bb
|
[] |
no_license
|
amishabhojwani/CMEECourseWork
|
529ce63e16222795c8725357541ad8b11ecc2edf
|
25ffc77b5a613956b4752dd5f99b7a6bd0649e09
|
refs/heads/master
| 2023-03-12T00:24:11.609251
| 2021-01-22T15:46:16
| 2021-01-22T15:46:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,323
|
r
|
DataWrangTidy.R
|
################################################################
################## Wrangling the Pound Hill Dataset ############
################################################################
require(tidyverse)
############# Load the dataset ###############
# header = false because the raw data don't have real headers
MyData <- as.matrix(read.csv("../Data/PoundHillData.csv", header = FALSE))
# header = true because we do have metadata headers
MyMetaData <- read.csv("../Data/PoundHillMetaData.csv", header = TRUE, sep = ";")
############# Inspect the dataset ###############
head(MyData)
dim(MyData)
dplyr::glimpse(MyData)
#utils::View(MyData) #you can also do this
#utils::View(MyMetaData)
############# Transpose ###############
# To get those species into columns and treatments into rows
MyData <- t(MyData)
head(MyData)
colnames(MyData) #there's no column names, just "data"
dim(MyData)
############# Replace species absences with zeros ###############
MyData[MyData == ""] = 0
############# Convert raw matrix to data frame ###############
TempData <- as.data.frame(MyData[-1,],stringsAsFactors = F) #stringsAsFactors = F is important!
colnames(TempData) <- MyData[1,] # assign column names from original data
head(TempData)
rownames(TempData) <- NULL #get rid of row names
head(TempData)
############# Convert from wide to long format ###############
MyWrangledData <- TempData %>%
pivot_longer(5:45, names_to = "Species", values_to = "Count") %>% #could use gather also, make it long from wide
mutate(Cultivation = as.factor(Cultivation)) %>% #change variables to factors
mutate(Block = as.factor(Block)) %>%
mutate(Plot = as.factor(Plot)) %>%
mutate(Quadrat = as.factor(Quadrat)) %>%
mutate(Count = as.integer(Count)) %>% #only integer variable
mutate(Species = as.factor(Species))
dplyr::glimpse(MyWrangledData)
head(MyWrangledData)
dim(MyWrangledData)
############# Exploring the data ###############
require(tidyverse)
tibble::as_tibble(MyWrangledData) # convert to a tibble, which is like a data frame but more manipulable
dplyr::glimpse(MyWrangledData) #like str(), but nicer!
utils::View(MyWrangledData) #same as fix() or View()
dplyr::filter(MyWrangledData, Count>100) #like subset(), but nicer!
dplyr::slice(MyWrangledData, 10:15) # Look at an arbitrary set of data rows
|
a91693aedc1e09cdbd971736b2106bb821d4ea35
|
75d5e31cdfb355e03452ad51b58b0194a5adf527
|
/multiBatchScript.R
|
5ddbe9d1f152b0f7eb7a9859c8134b3066b360e8
|
[] |
no_license
|
samueldnj/hierProdAnalysis
|
1a520bde8e1d91e33a002a9a947278aec39add15
|
7fca3c8998679bf7d728b81b16848f329854d133
|
refs/heads/master
| 2021-03-27T13:54:36.601714
| 2019-03-03T09:08:25
| 2019-03-03T09:08:25
| 66,486,114
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,326
|
r
|
multiBatchScript.R
|
# --------------------------------------------------------------------------
# multiBatchScript.R
#
# Runs a list of batch files in turn.
#
# Author: Samuel DN Johnson
# Date: 27 March, 2017
#
# --------------------------------------------------------------------------
# Source the simulation framework
source("control.R")
# List batch file names, base control file names and experiment
# prefix names (for folder naming)
# vectors must length k.
batchFiles <- c("DoverAssess.bch","infoScenarios.bch","pubBase4MPs.bch")
baseCtlFiles <- c("simCtlFileDoverAssess.txt","simCtlFile.txt","simCtlFile.txt")
expPrefix <- c("DoverAssess","info_qREs_RP","pubBase")
plots <- c(FALSE,FALSE,FALSE)
# Now loop over the experiments
for( i in 1:length(batchFiles))
{
# Make batch design
makeBatch( batchCtlFile = batchFiles[i],
baseCtlFile = baseCtlFiles[i] )
# Run batch job in parallel
.runBatchJob( par = T, prefix = expPrefix[i] )
# Create stat tables
sims <- grep(pattern = "sim", x = list.files("./project/"), value = T)
nSims <- length(sims)
.statTables(1:nSims,expPrefix[i],par=T)
# Create plots if asked for
if(plots)
{
# Perf metric summaries of each scenario
dumpPerfMetrics( tabNameRoot = expPrefix[i], stockLabel = "Stock1",
vars = c("Umsy","BnT","Bmsy","Dep","q_1","q_2"),
varLabels = expression(U[MSY], B[T], B[MSY], B[T]/B[0], q[11], q[21]),
MPs = c("noJointPriors","qPriorOnly","UmsyPriorOnly","qUpriors" ),
MPlabels = expression("None", q, U[MSY], q/U[MSY] ) )
# Now get the folder order numbering so we can plot the BCsim
# and stockPerf
simNumTable <- makeSimNumTable()
# Now do rep by rep biomass plots
dumpBCsim( simPath = "./project",
prefix = batchName,
MPs = c("noJointPriors","qPriorOnly","UmsyPriorOnly","qUpriors" ),
MPlabels = expression("Single Stock","None", q, U[MSY], q/U[MSY] ),
simNumTable = simNumTable )
# Now do relative error distributions
dumpStockPerf( simPath = "./project",
prefix = batchName,
MPs = c("noJointPriors","qPriorOnly","UmsyPriorOnly","qUpriors" ),
MPlabels = c( noJointPriors = "None",
qPriorOnly = expression(q),
UmsyPriorOnly = expression(U[MSY]),
qUpriors = expression(q/U[MSY]) ),
simNumTable = simNumTable )
}
# Now copy the project folder to Landmark NAS
copyDest <- file.path("/Volumes/home/thesisStuff/cwMSexperiments/TMB",paste(expPrefix[i],Sys.Date(),sep = "_") )
dir.create( copyDest )
# Copy project folder contents recursively to copyDest
cat( "Copying project folder contents to ", copyDest, "\n", sep = "" )
x <- file.copy( from = file.path( getwd(),"project"), to = copyDest,
recursive = TRUE )
if(!x)
{
cat( "Error copying project folder contents to remote server,\n",
"using local dir instead" )
copyDest <- file.path("../",paste(expPrefix[i],Sys.Date(),sep = "_") )
dir.create( copyDest )
# Copy project folder contents recursively to copyDest
cat( "Copying project folder contents to ", copyDest, "\n", sep = "" )
x <- file.copy( from = file.path( getwd(),"project"), to = copyDest,
recursive = TRUE )
}
# Now remove the simulation folders and the contents of the batch sub-dir
# from the project folder
# sims <- grep(pattern = "sim", x = list.files("./project/"), value = T)
simsPath <- file.path( getwd(),"project",sims)
batchFldrContents <- list.files( file.path(getwd(), "project", "Batch") )
batchContentsPath <- file.path( getwd(), "project", "Batch", batchFldrContents )
# Copy out sims to dropbox, tidy up
cat("Removing simulations from ./project/ \n", sep="")
for(k in 1:length(simsPath))
system(command=paste("rm -d -R ",simsPath[k],sep=""))
cat("Removing batch files from ./project/batch folder\n", sep="")
for(k in 1:length(batchContentsPath))
system(command=paste("rm -d -R ",batchContentsPath[k],sep=""))
cat("Experiment and tidy-up complete, ready to start next experiment.\n")
}
|
fe0df62cf051881b9e82fd10f4c1d96d6ee96b76
|
4bcc55d78276d5ee25e0a523470878104705f8b5
|
/man/CleanDesignCtrl.Rd
|
6acc0b9f3f86b3697ee837831b59b75b046afe3c
|
[] |
no_license
|
kerwin12580/perturbLM
|
56dea7e3fcb6df56fcb90c7e71c12261487b82f7
|
28350fb33637f9d2d9ebe04acdb9eb0cd35da123
|
refs/heads/master
| 2022-10-17T00:06:46.453710
| 2020-05-30T21:55:16
| 2020-05-30T21:55:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 464
|
rd
|
CleanDesignCtrl.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/genotype_handling.R
\name{CleanDesignCtrl}
\alias{CleanDesignCtrl}
\title{Filters out cells that belong to control genotype and other genotypes}
\usage{
CleanDesignCtrl(design.mat, ctrl)
}
\arguments{
\item{design.mat}{Design matrix}
\item{ctrl}{Control genotype}
}
\value{
Filtered design matrix
}
\description{
Filters out cells that belong to control genotype and other genotypes
}
|
62f5af6cc15ab068559d990c0ae3836077c9cd8c
|
7240a4d06e5d8d7420af21ee8829e352f3b6bbd1
|
/man/getWeekOneSoh.Rd
|
2eb1a51d72419f3598b3384b7d8c4fcefb1f8094
|
[] |
no_license
|
Kanjali/stencilRfunctions
|
e55e0bb756a788f14a3736626f60eb4de6cfd1ab
|
c928c24f084878fca90f381f5a6629be7d5da571
|
refs/heads/master
| 2021-01-01T16:23:39.316053
| 2017-10-23T09:53:36
| 2017-10-23T09:53:36
| 97,823,824
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 628
|
rd
|
getWeekOneSoh.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dataPreprocessFunctions.R
\name{getWeekOneSoh}
\alias{getWeekOneSoh}
\title{Get week-1 soh using sales happened in that week and soh of next week}
\usage{
getWeekOneSoh(soh_period, sales_with_period)
}
\arguments{
\item{soh_period}{accepts data frame (soh with edited period numbers for given date ranges by increasing the period number of data}
\item{sales_with_period}{is a dataframe sales data merge with period}
}
\value{
soh is dataframe i.e soh for week1
}
\description{
Get week-1 soh using sales happened in that week and soh of next week
}
|
42c0bc91d3df7882985b664a7d36c5fcda36d754
|
0a906cf8b1b7da2aea87de958e3662870df49727
|
/grattan/inst/testfiles/IncomeTax/libFuzzer_IncomeTax/IncomeTax_valgrind_files/1610051622-test.R
|
684176eaa9576dbb373c71c0a57870cfd4c7a514
|
[] |
no_license
|
akhikolla/updated-only-Issues
|
a85c887f0e1aae8a8dc358717d55b21678d04660
|
7d74489dfc7ddfec3955ae7891f15e920cad2e0c
|
refs/heads/master
| 2023-04-13T08:22:15.699449
| 2021-04-21T16:25:35
| 2021-04-21T16:25:35
| 360,232,775
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,208
|
r
|
1610051622-test.R
|
testlist <- list(rates = 2.73737456983631e-312, thresholds = c(-1.26822486837899e-30, -2.24767481577173e-289, 7.19919233908128e-310, -2.12131688733295e-309, 3.65190389201212e-306, 3.916129996301e-306, -8.98798625795533e+307, -1.90877083252545e-287, -8.93447058037318e-157, -9.7771978033507e-292, 1.39348334607466e-309, 2.37192207663558e-103, NaN, NaN, NaN, NaN), x = c(5.4323092248711e-312, 2.332463439243e-12, -8.05583748338107e-287, -1.26836459123889e-30, 9.37339630957792e-312, -5.78534238436574e-34, -1.26836459270829e-30, -1.26836459122741e-30, 9.37339630957792e-312, 1.70257006040729e-313, -3.9759940224262e-34, 5.44329432060333e-312, -3.65223708759439e+304, NaN, NaN, NaN, -2.30331110979915e-156, -1.32083970121448e-284, 3.65207087186091e-306, NaN, NaN, NaN, -6.56920793574151e-287, -2.11965588233324e-289, -1.83593039382815e-307, -2.6064446869563e+304, -5.66324542991478e+303, 2.84809454421703e-306, 3.78987813636309e-312, 3.65365169083783e-306, 2.8480945455619e-306, -2.2982794060734e-185, -2.35343736826454e-185, -2.35343736640235e-185, 7036874417766.4, 2.80365634892121e-312, -9.7757963632732e-150, 2.73729184714066e-312))
result <- do.call(grattan::IncomeTax,testlist)
str(result)
|
741a7ea191bb747e5f98898b7d8c9f08352baeaf
|
f786c6b7ea4b65b280f666fde497df4940e7c02c
|
/man/get_county_map.Rd
|
a317447e26f22120e60271edf08d404eac8b5f5c
|
[] |
no_license
|
southwick-associates/sadash
|
affbeb6b3c418ce6f9af7b99d28867182df1f78c
|
26ed9b6440043e5246f6b5e8800b077131f4ac79
|
refs/heads/master
| 2022-01-25T17:01:31.374910
| 2022-01-05T20:45:58
| 2022-01-05T20:45:58
| 208,875,090
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 869
|
rd
|
get_county_map.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/visualize.R
\name{get_county_map}
\alias{get_county_map}
\title{Load county spatial data for selected state}
\usage{
get_county_map(state)
}
\arguments{
\item{state}{abbreviation of state to pull}
}
\description{
Load county spatial data for selected state
}
\examples{
\dontrun{
library(ggplot2)
county_map <- get_county_map("SC")
ggplot(county_map) +
geom_polygon(aes(long, lat, group = county))
}
}
\seealso{
Other functions to run dashboard visualization:
\code{\link{int_breaks}()},
\code{\link{join_county_map}()},
\code{\link{plot_bar}()},
\code{\link{plot_county}()},
\code{\link{plot_month}()},
\code{\link{plot_value2}()},
\code{\link{plotly_config}()},
\code{\link{run_visual_county}()},
\code{\link{ui_button_layout}()}
}
\concept{functions to run dashboard visualization}
|
d385928ba0b9acfa3a5cc5e1b5eca01a5ad11ce4
|
a500013b7a3733f72d747082e10801e98567097a
|
/archived_script/stats_comparing_taxa_in_pubs_and_cases.R
|
7f6c28d0c5ec0c720b9176e2c80095d79743578e
|
[] |
no_license
|
robcrystalornelas/impacts_systematic_review
|
4ff913c79e3f7b14d6ba79f6cc4f9e612fe68c29
|
9ed0f457f72bad9fb7de420bb7a9744dd9ada667
|
refs/heads/master
| 2022-03-18T14:40:31.648205
| 2019-11-20T00:19:12
| 2019-11-20T00:19:12
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,394
|
r
|
stats_comparing_taxa_in_pubs_and_cases.R
|
## Read IN DATA ####
source("~/Desktop/Impacts Systematic Review/scripts/impacts_systematic_review/clean_raw_data.R")
## LOAD PACKAGES ####
library(ggplot2)
library(dplyr)
library(ggthemes)
library(grid)
# Expected proportions for UNIQUE species
# Proportions of each taxa using UNIQUE SPECIES
subset_species_and_taxa <- dplyr::select(raw_data, invasivespecies, invasivespeciestaxa)
unique_species_and_taxa <- unique(subset_species_and_taxa)
head(unique_species_and_taxa)
dim(unique_species_and_taxa)
counted_unique_species <- as.data.frame(dplyr::count(unique_species_and_taxa, invasivespeciestaxa))
dim(counted_unique_species)
head(counted_unique_species)
setDT(counted_unique_species)[, Prop := n/sum(n)]
expected_taxa_prop <- counted_unique_species$Prop
expected_taxa_prop
# counts of taxa in all case studies
subset_species_and_case <- dplyr::select(raw_data, code, invasivespecies, invasivespeciestaxa)
counted_case_studies_taxa <- as.data.frame(dplyr::count(subset_species_and_case, invasivespeciestaxa))
head(counted_case_studies_taxa) # this shows all 2,000 + case studies
# counts of unique taxa across publications
subset_species_and_publications <- dplyr::select(raw_data, code, invasivespecies, invasivespeciestaxa)
head(subset_species_and_publications)
unique_species_and_publications <- unique(subset_species_and_publications)
head(unique_species_and_publications)
counted_species_and_publications <- as.data.frame(dplyr::count(unique_species_and_publications, invasivespeciestaxa))
head(counted_species_and_publications) # this shows all 2,000 + case studies
# comparing expected based on unique species to case studies
counted_case_studies_taxa_n <- counted_case_studies_taxa$n
chisq.test(x = counted_case_studies_taxa_n, p = expected_taxa_prop)
# comparing expected based on unique species to publications
counted_species_and_publications_n <- counted_species_and_publications$n
publications_chi <- chisq.test(x = counted_species_and_publications_n, p = expected_taxa_prop)
publications_chi$observed
publications_chi$expected
# comparing taxonomic trends in publications and case studies
# First get case study proporitions
counted_case_studies_taxa
setDT(counted_case_studies_taxa)[, Prop := n/sum(n)]
counted_case_studies_taxa_prop <- counted_case_studies_taxa$Prop
counted_case_studies_taxa_prop
expected_taxa_prop
expected_taxa_count <- counted_unique_species$n
|
a00555b19eaee2d277a95973893440eaac5d63fe
|
27c3030d0f13190a657c85705953d43e0c6d9e11
|
/man/cAMARETTO_HTMLreport.Rd
|
9bd485c645da4b48ca18f512f3f5a202873bdbdf
|
[
"Apache-2.0"
] |
permissive
|
broadinstitute/CommunityAMARETTO
|
7f6ea2f4563e0dc1b2b1c4add76e57af88cb1be4
|
5d6e50ee04fecfda06b5c7419da48f3119c219c5
|
refs/heads/master
| 2021-07-09T15:31:43.250678
| 2020-04-02T16:26:10
| 2020-04-02T16:26:10
| 149,328,234
| 5
| 3
| null | 2019-04-02T01:17:14
| 2018-09-18T17:37:55
|
R
|
UTF-8
|
R
| false
| true
| 2,114
|
rd
|
cAMARETTO_HTMLreport.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cAMARETTO_HTMLreport.R
\name{cAMARETTO_HTMLreport}
\alias{cAMARETTO_HTMLreport}
\title{cAMARETTO_HTMLreport
Creates a HTMLreport for the community AMARETTO results}
\usage{
cAMARETTO_HTMLreport(
cAMARETTOresults = list(),
cAMARETTOnetworkM = list(),
cAMARETTOnetworkC = list(),
PhenotypeTablesList = NULL,
output_address = "./",
HTMLsAMARETTOlist = NULL,
CopyAMARETTOReport = TRUE,
hyper_geo_reference = NULL,
hyper_geo_reference_gp = NULL,
hyper_geo_reference_cp = NULL,
driverGSEA = TRUE,
NrCores = 2
)
}
\arguments{
\item{cAMARETTOresults}{The output of the Results function.}
\item{cAMARETTOnetworkM}{The output of the Module Network function.}
\item{cAMARETTOnetworkC}{The output of the Identify Communities function.}
\item{PhenotypeTablesList}{List of Phenotype Association Tables
for different AMARETTO runs.}
\item{output_address}{The output repository for the HTML report.}
\item{HTMLsAMARETTOlist}{A list with AMARETTO reports to link with the
Community AMARETTO report. If NULL, no links are added.}
\item{CopyAMARETTOReport}{Boolean to indicate if the AMARETTO reports
needs to be copied in the AMARETTO report directory.
In this way links are contained when moving the HTML directory.}
\item{hyper_geo_reference}{A reference gmt file to perform
the Hyper Geometric Test.}
\item{hyper_geo_reference_gp}{Hypergeometric test table
for genetic perturbation}
\item{hyper_geo_reference_cp}{Hypergeometric test table for
chemical perturbation}
\item{driverGSEA}{if TRUE, driver genes beside the target genes will also
be included for hypergeometric test.}
\item{NrCores}{Number of Cores to use during generation of the HTML report.}
}
\value{
A set of HTMLs, giving caracteristics of the communities
}
\description{
cAMARETTO_HTMLreport
Creates a HTMLreport for the community AMARETTO results
}
\examples{
try(
cAMARETTO_HTMLreport(cAMARETTOresults,
cAMARETTOnetworkM,
cAMARETTOnetworkC,
HTMLsAMARETTOlist = HTMLsAMARETTOlist,
hyper_geo_reference = gmtfile,
output_address= "./")
)
}
|
0ca0a3b1de7a5f63ec2c57b2d4178e771f624273
|
95a50903de88ffb02f1aa1f27a53aa6439cb9ac3
|
/Assignment7/Suppl7/Data_Imputation.R
|
d55ac5dc62ec74e6cb7a2d2436e2dd5e54528470
|
[] |
no_license
|
maxthemagician/BioInformatics3
|
46b2dff420dab3c818c3fcd7233cebff5369095a
|
8468240ab99893a9947635e661537f84a2ec580a
|
refs/heads/master
| 2020-03-10T14:20:08.165461
| 2018-07-10T12:18:11
| 2018-07-10T12:18:11
| 129,423,764
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,107
|
r
|
Data_Imputation.R
|
if(!require(data.table)){
install.packages("data.table")
}
if(!require(ggplot2)){
install.packages("ggplot2")
}
dat = fread("ms_toy.txt") #read data
dat = as.matrix(dat) #transform fo better plotting
org_dat = dat[,1] # select a column
c1 = org_dat # copy for plot
s = sd(org_dat, na.rm = T) # compute standart deviation
s = 1/4 *s # take fraction
q = quantile(org_dat, na.rm = T) # compute quantiles
nm = as.numeric(q[2]) # set new mean to 25% quantile
filles = list() # list with new entries
for(i in 1:length(org_dat)){ # replace missing entries with new entries from new distribution
if(is.na(org_dat[i])){
org_dat[i] = rnorm(1, nm, s)
filles[length(filles)+1]= org_dat[i]
}
}
hist(org_dat, col = "Green", xlab = "Expression Value", main = "Data Distribution") # plot complete new distipution with imputed values in green
hist(c1, col = "Blue", add = T) # add inital distribution in blue
hist(as.numeric(filles), col = "Red", add=T) # add dist of imputed values
|
e0c4a6541e7544b21a9cf15e6ec18dd7444d33cd
|
d7d4f3aac9c493ac1837d529c4361ad4e6f4659a
|
/Day 21 (Elevation)/Day 21 (Elevation).R
|
d4583f54a75e4398ab3fb2d34ba7b104d8bc2b09
|
[] |
no_license
|
venkatnsn/30DayMapChallenge
|
2047718cae45c56657c096819f043d3c772c0ab3
|
2797a77ea0b5261367e2dbdaf3cd14619b75ee98
|
refs/heads/main
| 2023-09-03T00:50:31.709478
| 2021-11-23T05:26:48
| 2021-11-23T05:26:48
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 13,581
|
r
|
Day 21 (Elevation).R
|
# Libraries
library(osmdata)
library(sf)
library(tidyverse)
library(elevatr)
library(raster)
library(sysfonts)
library(showtext)
library(cowplot)
# Aes
showtext_auto()
sf_use_s2(FALSE)
colors1 <- c("#ffd902", "#FFAB28", "#FF751B", "#FF3E0D", "#cc0600", "#a20d46", "#5f0078", "#3f004f")
font_add_google("Fira Sans")
font1 <- "Fira Sans"
# Roads and Borders
rbFun <- function(cityname, filtername, adminlevel){
border <- opq(bbox=cityname) %>%
add_osm_feature(key="admin_level", value=adminlevel) %>%
osmdata_sf() %>%
.$osm_multipolygons %>%
dplyr::select(osm_id, name, geometry) %>%
dplyr::filter(name==filtername)
streets <- opq(bbox=cityname) %>%
add_osm_feature(key="highway", value=c("primary", "secondary", "tertiary", "residential")) %>%
osmdata_sf() %>%
.$osm_lines %>%
dplyr::select(osm_id, name, geometry)
streetsInt <- st_intersection(streets, border)
final <- list(border, streetsInt)
}
# Selected Cities
Milan <- rbFun("Milan, Italy", "Milano", "8")
Nyc <- rbFun("New York City", "New York", "5")
Cdmx <- rbFun("Mexico City, Mexico", "Ciudad de México", "4")
Paris <- rbFun("Paris, France", "Paris", "8")
BuenosAires <- rbFun("Buenos Aires, Argentina", "Buenos Aires", "8")
Berlin <- rbFun(c(13.099, 52.6674, 13.7892, 52.337), "Berlin", "4")
Mumbai <- rbFun("Mumbai, India", "Mumbai", "8")
# Custom London
LondonBorder <- opq(bbox=c(-0.55, 51.2911, 0.25, 51.744)) %>%
add_osm_feature(key="admin_level", value="6") %>%
osmdata_sf() %>%
.$osm_multipolygons %>%
dplyr::select(osm_id, name, geometry) %>%
dplyr::filter(name %in% c("London", "City of London")) %>%
st_union()
LondonStreets <- opq(bbox=c(-0.55, 51.2911, 0.25, 51.744)) %>%
add_osm_feature(key="highway", value=c("primary", "secondary", "tertiary", "residential")) %>%
osmdata_sf() %>%
.$osm_lines %>%
dplyr::select(osm_id, name, geometry)
LondonStreentsint <- st_intersection(LondonStreets, LondonBorder)
London <- list(LondonBorder, LondonStreentsint)
rm(LondonBorder, LondonStreentsint, LondonStreets)
# City Elevation
ElFun <- function(border, cutVec){
Elev <- get_elev_raster(locations = border, z = 7, clip = "bbox")
Poly <- rasterToPolygons(Elev) %>% st_as_sf()
CutPoly <- st_intersection(Poly, st_make_valid(border)) %>% dplyr::filter(st_is_valid(.)==TRUE)
colnames(CutPoly) <- if(ncol(CutPoly)==4){c("Elevation", "Id", "Name", "geometry")}else{c("Elevation", "geometry")}
Final <- CutPoly %>% mutate(cut = cut(Elevation, cutVec)) %>% st_as_sf()
Return <- list(Final, CutPoly)
}
# City Elevations
MilanEle <- ElFun(Milan[[1]], c(-100, 110, 115, 120, 125, 130, 135, 140, 2000))
NycEle <- ElFun(Nyc[[1]], c(-100, 0, 5, 10, 15, 20, 25, 30, 2000))
CdmxEle <- ElFun(Cdmx[[1]], c(-100, 2250, 2450, 2650, 2850, 3050, 3250, 3450, 5000))
ParisEle <- ElFun(Paris[[1]], c(-100, 30, 35, 40, 45, 50, 55, 60, 1000))
LondonEle <- ElFun(London[[1]], c(-100, 10, 20, 30, 40, 50, 60, 70, 5000))
MumbaiEle <- ElFun(Mumbai[[1]], c(-400, 0, 5, 10, 15, 20, 25, 30, 5000))
BuenosAiresEle <- ElFun(BuenosAires[[1]], c(-100, 0, 5, 10, 15, 20, 25, 30, 5000))
BerlinEle <- ElFun(Berlin[[1]], c(-100, 30, 35, 40, 45, 50, 55, 60, 5000))
## Use these to test for good breaks
#mosaic::favstats(~BerlinEle[[2]]$Elevation)
#table(BerlinEle[[1]]$cut)
# Road Elevations
RoadEle <- function(ElevationFrame, Roads, Cuts){
CityRoadEle <- NULL
for(i in unique(Cuts)){
FilteredDf <- ElevationFrame %>% dplyr::filter(cut==i)
Unioned <- st_union(FilteredDf) %>% as.data.frame() %>% st_as_sf()
RoadEle <- st_intersection(Roads, Unioned) %>% mutate(level=i)
CityRoadEle <- rbind(CityRoadEle, RoadEle)
}
return(CityRoadEle)
}
# City Road Elevations (Final Step)
MilanRoads <- RoadEle(MilanEle[[1]], Milan[[2]], MilanEle[[1]]$cut)
NycRoads <- RoadEle(NycEle[[1]], Nyc[[2]], NycEle[[1]]$cut)
CdmxRoads <- RoadEle(CdmxEle[[1]], Cdmx[[2]], CdmxEle[[1]]$cut)
ParisRoads <- RoadEle(ParisEle[[1]], Paris[[2]], ParisEle[[1]]$cut)
LondonRoads <- RoadEle(LondonEle[[1]], London[[2]], LondonEle[[1]]$cut)
MumbaiRoads <- RoadEle(MumbaiEle[[1]], Mumbai[[2]], MumbaiEle[[1]]$cut)
BuenosAiresRoads <- RoadEle(BuenosAiresEle[[1]], BuenosAires[[2]], BuenosAiresEle[[1]]$cut)
BerlinRoads <- RoadEle(BerlinEle[[1]], Berlin[[2]], BerlinEle[[1]]$cut)
# Plotter
plotter <- function(df, linesize, cityname, labels, levels){
plot <- ggplot() +
geom_sf(data=df, aes(color=level, fill=level), size=linesize) +
scale_color_manual(values = colors1, guide=guide_legend(nrow = 1),
breaks=levels) +
scale_fill_manual(values = colors1, guide=guide_legend(nrow = 1),
breaks = levels) +
theme_void() +
coord_sf() +
theme(legend.position = c(0.5, 1.025),
legend.direction = "horizontal",
legend.key.width = unit(3.5, "cm"),
legend.key.height = unit(1, "cm"),
legend.text = element_blank(),
legend.title = element_blank())
ggdraw(plot) +
theme(plot.margin = margin(6.5, 0.5, 1, 0.5, "cm"),
plot.background = element_rect(fill="#fdf9f5", color="#fdf9f5"),
panel.background = element_rect(fill="#fdf9f5", color="#fdf9f5")) +
draw_label(label=cityname, x=0.5, y=1.14, size=250, fontfamily = font1, fontface = "bold", color="#3f004f") +
draw_label(label=paste("Map shows the elevation (in meters) of", cityname, "by street."), x=0.5, y=1.068, size=70, color="#3f004f", fontfamily = font1) +
draw_label(label="Twitter: @BlakeRobMills | Source: OpenStreetMap.org | GitHub: BlakeRMills", x=0.5, y=-0.01, size=50, fontface="bold", color="#3f004f", fontfamily = font1) +
draw_label(label=labels[1], x=0.133, y=1.025, size=55, fontface="bold", color="grey15", fontfamily = font1) +
draw_label(label=labels[2], x=0.238, y=1.025, size=55, fontface="bold", color="grey15", fontfamily = font1) +
draw_label(label=labels[3], x=0.343, y=1.025, size=55, fontface="bold", color="grey95", fontfamily = font1) +
draw_label(label=labels[4], x=0.448, y=1.025, size=55, fontface="bold", color="grey95", fontfamily = font1) +
draw_label(label=labels[5], x=0.553, y=1.025, size=55, fontface="bold", color="grey95", fontfamily = font1) +
draw_label(label=labels[6], x=0.658, y=1.025, size=55, fontface="bold", color="grey95", fontfamily = font1) +
draw_label(label=labels[7], x=0.763, y=1.025, size=55, fontface="bold", color="grey95", fontfamily = font1) +
draw_label(label=labels[8], x=0.868, y=1.025, size=55, fontface="bold", color="grey95", fontfamily = font1)
}
MilanPlot <- plotter(MilanRoads, 1, "Milan", c("< 110", "110-115", "115-120", "120-125", "125-130", "130-135", "135-140", "> 140"),
c("(-100,110]", "(110,115]", "(115,120]", "(120,125]", "(125,130]", "(130,135]", "(135,140]", "(140,2e+03]"))
ggsave("~/Desktop/Milan.png", height = 15, width = 15)
NycPlot <- plotter(NycRoads, 0.35, "New York", c("< 0", "0-5", "5-10", "10-15", "15-20", "20-25", "25-30", "> 30"),
c("(-100,0]", "(0,5]", "(5,10]", "(10,15]", "(15,20]", "(20,25]", "(25,30]", "(30,2e+03]"))
ggsave("~/Desktop/NewYork.png", height = 15, width = 15)
CdmxPlot <- plotter(CdmxRoads, 0.25, "Mexico City", c("< 2250", "2250-2450", "2450-2650", "2650-2850", "2850-3050", "3050-3250", "3250-3450", "> 3450"),
c("(-100,2.25e+03]", "(2.25e+03,2.45e+03]", "(2.45e+03,2.65e+03]", "(2.65e+03,2.85e+03]", "(2.85e+03,3.05e+03]", "(3.05e+03,3.25e+03]", "(3.25e+03,3.45e+03]", "(3.45e+03,5e+03]"))
ggsave("~/Desktop/Cdmx.png", height = 15, width = 15)
LondonPlot <- plotter(LondonRoads, 0.4, "London", c("< 10", "10-20", "20-30", "30-40", "40-50", "50-60", "60-70", "> 70"),
c("(-100,10]", "(10,20]", "(20,30]", "(30,40]", "(40,50]", "(50,60]", "(60,70]", "(70,5e+03]"))
ggsave("~/Desktop/London.png", height = 15, width = 15)
BuenosAiresPlot <- plotter(BuenosAiresRoads, 0.9, "Buenos Aires", c("< 0", "0-5", "5-10", "10-15", "15-20", "20-25", "25-30", "> 30"),
c("(-100,0]", "(0,5]", "(5,10]", "(10,15]", "(15,20]", "(20,25]", "(25,30]", "(30,5e+03]"))
ggsave("~/Desktop/BuenosAires.png", height = 15, width = 15)
MumbaiPlot <- plotter(MumbaiRoads, 0.5, "Mumbai", c("< 0", "0-5", "5-10", "10-15", "15-20", "20-25", "25-30", "> 30"),
c("(-400,0]", "(0,5]", "(5,10]", "(10,15]", "(15,20]", "(20,25]", "(25,30]", "(30,5e+03]"))
ggsave("~/Desktop/Mumbai.png", height = 15, width = 15)
# Paris amd Berlin were not working in function. This generates their plots
## Paris
ParisLabs <- c("< 30", "30-35", "35-40", "40-45", "45-50", "50-55", "55-60", "> 60")
ParisPlot <- ggplot() +
geom_sf(data=ParisRoads, aes(color=level, fill=level), size=1) +
scale_color_manual(values = colors1, guide=guide_legend(nrow = 1),
breaks=c("(-100,30]", "(30,35]", "(35,40]", "(40,45]", "(45,50]", "(50,55]", "(55,60]", "(60,1e+03]")) +
scale_fill_manual(values = colors1, guide=guide_legend(nrow = 1),
breaks=c("(-100,30]", "(30,35]", "(35,40]", "(40,45]", "(45,50]", "(50,55]", "(55,60]", "(60,1e+03]")) +
theme_void() +
coord_sf() +
theme(legend.position = c(0.5, 1.05),
legend.direction = "horizontal",
legend.key.width = unit(3.5, "cm"),
legend.key.height = unit(1, "cm"),
legend.text = element_blank(),
legend.title = element_blank())
ParisPlot2 <- ggdraw(ParisPlot) +
theme(plot.margin = margin(2, 0.5, -2, 0.5, "cm"),
plot.background = element_rect(fill="#fdf9f5", color="#fdf9f5"),
panel.background = element_rect(fill="#fdf9f5", color="#fdf9f5")) +
draw_label(label="Paris", x=0.5, y=0.99, size=250, fontfamily = font1, fontface = "bold", color="#3f004f") +
draw_label(label=paste("Map shows the elevation (in meters) of Paris by street."), x=0.5, y=0.91, size=70, color="#3f004f", fontfamily = font1) +
draw_label(label="Twitter: @BlakeRobMills | Source: OpenStreetMap.org | GitHub: BlakeRMills", x=0.5, y=0.1, size=50, fontface="bold", color="#3f004f", fontfamily = font1) +
draw_label(label=ParisLabs[1], x=0.133, y=0.861, size=55, fontface="bold", color="grey15", fontfamily = font1) +
draw_label(label=ParisLabs[2], x=0.238, y=0.861, size=55, fontface="bold", color="grey15", fontfamily = font1) +
draw_label(label=ParisLabs[3], x=0.343, y=0.861, size=55, fontface="bold", color="grey95", fontfamily = font1) +
draw_label(label=ParisLabs[4], x=0.448, y=0.861, size=55, fontface="bold", color="grey95", fontfamily = font1) +
draw_label(label=ParisLabs[5], x=0.553, y=0.861, size=55, fontface="bold", color="grey95", fontfamily = font1) +
draw_label(label=ParisLabs[6], x=0.658, y=0.861, size=55, fontface="bold", color="grey95", fontfamily = font1) +
draw_label(label=ParisLabs[7], x=0.763, y=0.861, size=55, fontface="bold", color="grey95", fontfamily = font1) +
draw_label(label=ParisLabs[8], x=0.868, y=0.861, size=55, fontface="bold", color="grey95", fontfamily = font1)
ggsave("~/Desktop/Paris.png", height = 12, width = 15)
## Berlin
BerlinLabs <- c("< 30", "30-35", "35-40", "40-45", "45-50", "50-55", "55-60", "> 60")
BerlinPlot <- ggplot() +
geom_sf(data=BerlinRoads, aes(color=level, fill=level), size=0.6) +
scale_color_manual(values = colors1, guide=guide_legend(nrow = 1),
breaks=c("(-100,30]", "(30,35]", "(35,40]", "(40,45]", "(45,50]", "(50,55]", "(55,60]", "(60,5e+03]")) +
scale_fill_manual(values = colors1, guide=guide_legend(nrow = 1),
breaks=c("(-100,30]", "(30,35]", "(35,40]", "(40,45]", "(45,50]", "(50,55]", "(55,60]", "(60,5e+03]")) +
theme_void() +
coord_sf() +
theme(legend.position = c(0.5, 1.025),
legend.direction = "horizontal",
legend.key.width = unit(3.5, "cm"),
legend.key.height = unit(1, "cm"),
legend.text = element_blank(),
legend.title = element_blank())
BerlinPlot2 <- ggdraw(BerlinPlot) +
theme(plot.margin = margin(6.5, 0.5, 1, 0.5, "cm"),
plot.background = element_rect(fill="#fdf9f5", color="#fdf9f5"),
panel.background = element_rect(fill="#fdf9f5", color="#fdf9f5")) +
draw_label(label="Berlin", x=0.5, y=1.14, size=250, fontfamily = font1, fontface = "bold", color="#3f004f") +
draw_label(label=paste("Map shows the elevation (in meters) of Berlin by street."), x=0.5, y=1.068, size=70, color="#3f004f", fontfamily = font1) +
draw_label(label="Twitter: @BlakeRobMills | Source: OpenStreetMap.org | GitHub: BlakeRMills", x=0.5, y=-0.01, size=50, fontface="bold", color="#3f004f", fontfamily = font1) +
draw_label(label=BerlinLabs[1], x=0.133, y=1.004, size=55, fontface="bold", color="grey15", fontfamily = font1) +
draw_label(label=BerlinLabs[2], x=0.238, y=1.004, size=55, fontface="bold", color="grey15", fontfamily = font1) +
draw_label(label=BerlinLabs[3], x=0.343, y=1.004, size=55, fontface="bold", color="grey95", fontfamily = font1) +
draw_label(label=BerlinLabs[4], x=0.448, y=1.004, size=55, fontface="bold", color="grey95", fontfamily = font1) +
draw_label(label=BerlinLabs[5], x=0.553, y=1.004, size=55, fontface="bold", color="grey95", fontfamily = font1) +
draw_label(label=BerlinLabs[6], x=0.658, y=1.004, size=55, fontface="bold", color="grey95", fontfamily = font1) +
draw_label(label=BerlinLabs[7], x=0.763, y=1.004, size=55, fontface="bold", color="grey95", fontfamily = font1) +
draw_label(label=BerlinLabs[8], x=0.868, y=1.004, size=55, fontface="bold", color="grey95", fontfamily = font1)
ggsave("~/Desktop/Berlin.png", height = 15, width = 15)
|
6e9b23e1bb161c42e05b7756fb89ea5064d2674c
|
dac4a8f2b14dbb92dd07e9ca9642410ae407a2f2
|
/man/TransEntropy.Rd
|
dc96e4da946a9ea04001dd17a2f138d55249650a
|
[] |
no_license
|
dstgithub/GrpString
|
0710f0b5d1e8a90ee1e94e5a2f6facb19bc48c97
|
45b4da9cc59c71ddb8b53d7b6753665b7ff960fe
|
refs/heads/master
| 2021-01-12T03:26:45.555515
| 2017-11-15T21:40:25
| 2017-11-15T21:40:25
| 78,210,123
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,179
|
rd
|
TransEntropy.Rd
|
\name{TransEntropy}
\alias{TransEntropy}
\title{
Transition entropy of each string in a group
}
\description{
TransEntropy computes the transition entropy of each of the strings in a group.
}
\usage{
TransEntropy(strings.vec)
}
\arguments{
\item{strings.vec}{
String Vector.
}
}
\value{
Returns a number vector.
}
\details{
Entropy is calculated using the Shannon entropy formula: -sum(freqs * log2(freqs)). Here, freqs are transition frequencies, which are the values in the normalized transition matrix exported by function TransMx in this package. The formula is equivalent to the function entropy.empirical in the 'entropy' package when unit is set to log2.
}
\note{
Strings with less than 2 characters are not included for computation of entropy.
}
\references{
I. Hooge; G. Camps. (2013) Scan path entropy and arrow plots: capturing scanning behavior of multiple observers. Frontiers in Psychology.
}
\seealso{
\code{\link{TransEntro}},
\code{\link{TransMx}}
}
\examples{
# default values
stra.vec <- c("ABCDdefABCDa", "def123DC", "A", "123aABCD", "ACD13", "AC1ABC", "3123fe")
TransEntropy(stra.vec)
}
\keyword{programming}
|
ad6e8fe132f4f8afe6f5df9c553781a579553ca5
|
1e42b9829b85bc37d112ec5b8efa1682264297b2
|
/man/filter_trace_length.Rd
|
0092e0f8902ddc9798a6c4a450de89d015ab5246
|
[] |
no_license
|
strategist922/edeaR
|
ca83bf91f58e685bc9333f4db3bfea3d8c019343
|
ad96118cccfdc90a7bed94f5aef2ee0cfab3aac8
|
refs/heads/master
| 2021-07-05T04:30:35.286640
| 2017-09-27T12:25:04
| 2017-09-27T12:25:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,163
|
rd
|
filter_trace_length.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/filter_trace_length.R
\name{filter_trace_length}
\alias{filter_trace_length}
\alias{ifilter_trace_length}
\title{Filter: Trace length percentile}
\usage{
filter_trace_length(eventlog, lower_threshold = NULL,
upper_threshold = NULL, percentile_cut_off = NULL, reverse = F)
ifilter_trace_length(eventlog)
}
\arguments{
\item{eventlog}{The event log to be used. An object of class
\code{eventlog}.}
\item{lower_threshold}{The lower trace length threshold.
When \code{reverse} is FALSE, all traces with a lower frequency are discarded.}
\item{upper_threshold}{The upper trace length threshold.
When \code{reverse} is FALSE, all traces with a lo frequency are discarded.}
\item{percentile_cut_off}{Alternatively to providing thresholds, a percentile cut off can be provided.
A percentile cut off value of 0.9 will return the 90\% shortest cases.
When \code{reverse} is set to TRUE, it will return the 10\% longest cases.}
\item{reverse}{A logical parameter depicting whether the selection should be reversed.}
}
\description{
Filters cases on length, using a percentile threshold.
}
|
a2dca350e1ecf7e5ddd27765ac851eaf15d597c3
|
25541dc86ec95929bce47611f8bc3f66f4ebe2d1
|
/misc/scripts/NICE checker.R
|
557d660531f887052c148dbfc46db76f5c92b5fe
|
[
"LicenseRef-scancode-public-domain",
"CC0-1.0"
] |
permissive
|
JoepvanderPlas/covid-19
|
04822770d5c6203b5b1668092a31053ed0fd99ad
|
02eae255b60121fcba7cd1077694bd6d2c04f046
|
refs/heads/master
| 2023-06-14T00:23:57.124675
| 2021-07-15T13:33:54
| 2021-07-15T13:33:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,486
|
r
|
NICE checker.R
|
#### NICE CHECKER ####
source("workflow/parse_nice-data.R")
temp = tail(list.files(path = "data-nice/data-nice-json/",pattern="*.csv", full.names = T),4)
myfiles = lapply(temp, read.csv)
dat.today <- as.data.frame(myfiles[4])
dat.yesterday <- as.data.frame(myfiles[3])
dat.twodaysago <- as.data.frame(myfiles[2])
dat.threedaysago <- as.data.frame(myfiles[1])
sum(dat.today$Hospital_Intake_Proven) - sum(dat.yesterday$Hospital_Intake_Proven)
last(dat.today$Hospital_Currently)
sum(dat.today$IC_Intake_Proven) - sum(dat.yesterday$IC_Intake_Proven)
last(dat.today$IC_Current)
dat.today <- dat.today %>%
mutate(Hospital_Intake_7d = round(frollmean(Hospital_Intake_Proven,7),0)) %>%
mutate(IC_Intake_7d = round(frollmean(IC_Intake_Proven,7),0))
df <- merge(dat.today[,c("date","Hospital_Intake_Proven","Hospital_Intake_Suspected","IC_Intake_Proven","Hospital_Currently","IC_Current")], dat.yesterday[,c("date","Hospital_Intake_Proven","Hospital_Intake_Suspected","IC_Intake_Proven","Hospital_Currently","IC_Current")], by = "date", all.x=T)
df$diff.proven <- df$Hospital_Intake_Proven.x-df$Hospital_Intake_Proven.y
df$diff.suspec <- df$Hospital_Intake_Suspected.x-df$Hospital_Intake_Suspected.y
df$diff.proven.ic <- df$IC_Intake_Proven.x-df$IC_Intake_Proven.y
df$diff.current.hosp <- df$Hospital_Currently.x-df$Hospital_Currently.y
df$diff.current.ic <- df$IC_Current.x-df$IC_Current.y
#rm(myfiles,temp,dat.threedaysago,dat.twodaysago,dat.yesterday,dat.today,df,vaccine.data,dat)
|
20ca99cc063e39358950e8a9cff40606eb0b4d65
|
7f169a675234c3b5e7ac47ebbc132d2e4efd399b
|
/server.R
|
cd60582f9ead960b4b54960434706e4c2fecb07e
|
[] |
no_license
|
aliahameed/PaedFever
|
4c011544eb7ca5cf8e028e1e51b0f8f90c694894
|
5ae4d0210bf71c2bf9d3d7d17470618f6863a605
|
refs/heads/master
| 2021-01-22T02:39:50.389553
| 2014-07-24T08:26:56
| 2014-07-24T08:26:56
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,602
|
r
|
server.R
|
calculateAge <- function(years, months) {
# return age in months
(years * 12) + months
}
calculateTemp <- function(method, ctemp, ftemp, scale) {
# convert to Celsius first
if (scale == 'fhr') {
temp = (ftemp - 32) * 0.56
} else {
temp = ctemp
}
# return rectal (the most accurate) temperature
if (method == "orally") {
temp = temp + 0.3
} else if (method == "under the arm") {
temp = temp + 0.6
}
temp
}
getRecommendation <- function(years, months, method, ctemp, ftemp, scale) {
# determine course of action based on child's age in months
age <- calculateAge(years, months)
# and body temperature in Celsius
temp = calculateTemp(method, ctemp, ftemp, scale)
if (temp < 37.3) {
"No fever."
} else if (temp < 38.4) {
"This is considered a \"low-grade\" fever. Low-grade fevers are useful in fighting off infection. There is no reason to treat a low-grade fever."
} else if ((age < 2) && (temp > 38.3)) {
"MEDICAL EMERGENCY: A doctor should evaluate your infant right away, either during business hours, or in an emergency room after hours. Do not give any fever-reducing medications until a doctor has seen your child."
} else if ((age < 4) && (temp > 38.3)) {
"Make an appointment with your doctor within the next several hours."
} else if (temp > 39.7) {
"This is a high fever. Before calling your doctor, try to bring the temperature down with medication. Acetaminophen or ibuprofen can safely be administered to children. (Never give aspirin to children aged 12 and under.) A lukewarm bath and cool washcloth can be used as well as cool liquids to drink. If these measures don't bring the fever down, or if your child develops any other unusual symptoms (lethargy, irritability, stiff neck or pain in the back of the neck, light sensitivity, vomiting or headache), call your doctor without delay."
} else {
"This is considered a \"common\" fever. This is generally not serious and can wait until morning to be evaluated. If your child develops any other unusual symptoms (lethargy, irritability, stiff neck or pain in the back of the neck, light sensitivity, vomiting or headache), call your doctor without delay"
}
}
shinyServer(
function(input, output) {
output$age <- renderText({paste(input$years, "year(s) and", input$months, "month(s)")})
output$temperature <- renderText({input$ftemp})
output$method <- renderText({input$method})
output$recommendation <- renderText({getRecommendation(input$years, input$months, input$method, input$ctemp, input$ftemp, input$scale)})
}
)
|
fde19b799ec3ca7921855d06db4b9bc0f99fadbd
|
3ff86edc5910eb98255d5c9cd644d5b6316d0af8
|
/2019/0508/viya-r-random-forest-with-imputation.R
|
a4a60f6d8b3d31d1f86cb756f0f48f6b5cb37803
|
[] |
no_license
|
bong-ju-kang/sas-school
|
f3e6671c6f57b7fb8c01f8b7464d9552e6385906
|
f898c17be8bf7d6e8a686a7cf1c8d33562d7c202
|
refs/heads/master
| 2023-06-23T19:30:23.549758
| 2023-06-07T07:56:44
| 2023-06-07T07:56:44
| 185,529,951
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 779
|
r
|
viya-r-random-forest-with-imputation.R
|
# Bong Ju Kang
# for for random forest sample with missing imputation
# 5/8/2019
library(randomForest)
print(dm_model_formula)
print(dm_input)
head(dm_traindf)
str(dm_traindf)
dm_model <- randomForest(dm_model_formula, ntree=100, mtry=5, data=dm_traindf, importance=T)
# 예측
predicted <- predict(dm_model, dm_inputdf,type='prob')
dm_scoreddf <- data.frame(predicted)
colnames(dm_scoreddf)
# SAS 지정양식으로 만들기
values <- levels(unique(dm_inputdf[,c(dm_dec_target)]))
namelist <- array()
for (i in 1:length(values)){
namelist[i] <- paste0('P_', dm_dec_target, values[i])
}
colnames(dm_scoreddf) <- namelist
# 출력물 보내고 받기
png("rpt_rf_mse_plot.png")
plot(dm_model, main='Random Forest MSE Plot')
dev.off()
|
9945cdf85e0d533bc6e00e3846ed80a639c0f748
|
80169cc686f0c0cd9a9288c863e9b894ed9ed467
|
/NCBR.RTools/man/add_FPKM_to_GR.Rd
|
e40c81c0e213c904cada51e8e45c9db24d584202
|
[] |
no_license
|
arunbodd/NIAID
|
a2c58c9484a4a7ec926123d9faac9a3604730715
|
9ab248b4ab96f177846330e0fa6414e18d9a49be
|
refs/heads/master
| 2020-04-23T00:10:39.661297
| 2019-02-14T22:51:51
| 2019-02-14T22:51:51
| 170,769,487
| 1
| 0
| null | 2019-02-14T22:55:27
| 2019-02-14T22:55:27
| null |
UTF-8
|
R
| false
| true
| 1,925
|
rd
|
add_FPKM_to_GR.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/add_FPKM_to_GR.R
\name{add_FPKM_to_GR}
\alias{add_FPKM_to_GR}
\title{Add FPKM values to a genomic ranges structure}
\usage{
add_FPKM_to_GR(GR, FPKM, FPKMgene, columns, FPKMsymbol = FALSE,
FPKMensembl = FALSE, outFile)
}
\arguments{
\item{GR}{a genomic ranges object containing gene names in metadata column 'name'}
\item{FPKM}{a data.frame containing FPKM data and gene names of the same format as those in GR$name}
\item{FPKMgene}{the name of the column of FPKM to be compared to GR$name for merging}
\item{columns}{a vector of column names from FPKM with FPKM values to be analyzed}
\item{FPKMsymbol}{the name of the column with gene symbols/names to be added to GR [default:""]}
\item{FPKMensembl}{the name of the column with ensembl gene IDs to be added to GR [default:""]}
\item{outFile}{the name of the file to be created with the results of this function, optional.}
}
\value{
a genomic ranges object with FPKM data
}
\description{
Combine a data.frame with FPKM values and a genomic ranges object
}
\details{
Requires: GenomicRanges
Returns a genomic ranges object with FPKM values (mean of replicates, if given)
Gene symbols or ensemblIDs can also be added to to the object if requested and
and available in the FPKM data.frame
NOTE: current version assumes that length of the genomic ranges object and the
number of rows in the data.frame are equal and that the gene sets are identical, just
in a different order.
}
\examples{
bed <- rtracklayer::import.bed("geneinfo.bed")
RSEM <- read.table("RSEM.genes.FPKM.all_samples.txt", header=T, stringsAsFactors=F)
out <- add_FPKM_to_GR(GR=bed, FPKM=RSEM, FPKMgene="gene_id",
columns=c("S35_FPKM","S36_FPKM"), FPKMsymbol="GeneName",outFile= "geneinfo_FPKM.txt")
}
\author{
Tovah Markowitz \email{tovah.markowitz@nih.gov}
}
\keyword{FPKM}
\keyword{GenomicRanges}
\keyword{RNAseq}
|
14e4fcf43050da8d9c297563cfeba54d3f7a1a42
|
05b698ebe661e7fde47992172f4d72130bbc738e
|
/R/noupclassify.R
|
71268704e64c93b49323d5877dd41fbe24a6af11
|
[] |
no_license
|
cran/upclass
|
b28d5caba03390f4d01caeb39e2408824a95c413
|
a85c1abbea766d7b536d2ac40157c6e80310756b
|
refs/heads/master
| 2018-12-29T06:57:42.908233
| 2013-11-26T00:00:00
| 2013-11-26T00:00:00
| 17,700,692
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 803
|
r
|
noupclassify.R
|
.packageName <- 'upclass'
noupclassify <-
function (Xtrain, cltrain, Xtest, cltest = NULL, modelscope = NULL, ...)
{
if (is.null(modelscope)) {
if (is.matrix(Xtrain)) {
d <- ncol(Xtrain)
}
else {
d <- 1
}
modelscope <- modelvec(d)
}
res <- list()
bestBIC <- -Inf
res[["Best"]] <- list()
for (modelName in modelscope) {
res[[modelName]] <- list()
res[[modelName]] <- noupclassifymodel(Xtrain, cltrain, Xtest,
cltest, modelName, ...)
if(!is.na(res[[modelName]]$bic)){
if (res[[modelName]]$bic > bestBIC) {
res[["Best"]] <- res[[modelName]]
bestBIC <- res[[modelName]]$bic
}
}
}
class(res)<-"upclassfit"
res
}
|
1cd050f43fecd551f382e802f23ec29a51f368f7
|
54f9314cf3a933b39ae1316e1d1e78a21f7b8b56
|
/man/get_poly_last_price.Rd
|
4fea337785567ee2875d3acc214624590e8a5079
|
[] |
no_license
|
tanho63/AlpacaforR
|
1b36bcd44a188bd73c908223708bfbdd78820196
|
d23df32fd337185c413dbeed8383bb0221a84034
|
refs/heads/master
| 2023-06-05T11:36:21.782384
| 2020-12-31T18:12:34
| 2020-12-31T18:12:34
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,036
|
rd
|
get_poly_last_price.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Polygon.R
\name{get_poly_last_price}
\alias{get_poly_last_price}
\title{Get Polygon Last Price (Deprecated)}
\usage{
get_poly_last_price(ticker = NULL)
}
\arguments{
\item{ticker}{Specify which symbol you want to call by inserting ticker as a string.}
}
\value{
A list object containing all information the API responds with.
}
\description{
Deprecated. See \code{\link[AlpacaforR]{polygon}}.
This function provides the last listed price from Polygon. A list is returned with values such as the last price, last size, last exchange, and last timestamp.
}
\examples{
# Getting the last listed price for AMZN:
get_poly_last_price("AMZN")
}
\seealso{
Other Polygon:
\code{\link{get_meta}()},
\code{\link{get_poly_agg_quote}()},
\code{\link{get_poly_historic_info}()},
\code{\link{get_poly_last_trade}()},
\code{\link{get_poly_ohlc}()},
\code{\link{get_poly_prev_dayclose}()},
\code{\link{get_poly_stock_splits}()},
\code{\link{polygon}()}
}
\concept{Polygon}
|
308bea0633a027fb186d60eac23afce416556089
|
0d7425c0b9c210e34ef9c1938df117adbf43c172
|
/5.LMMs.R
|
a9ab6ab0f278083458316ab31a1fa8d6614ae3dd
|
[
"MIT"
] |
permissive
|
DanPapageorgiou/ranging_seasonality
|
ed81e777775911daec12a32ae72473711379acfd
|
2a3aec308bb4a8449d1eeb16beed4aff8f387116
|
refs/heads/main
| 2023-06-10T18:31:01.087259
| 2021-07-01T13:27:54
| 2021-07-01T13:27:54
| 381,995,904
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,666
|
r
|
5.LMMs.R
|
library(lme4)
library(lmerTest)
library(repmis)
#download data files from Github and set a working directory
#################Question 1 & 2#########################
#load("/Users/danpapag/Desktop/GitHub_code/ddf_hr_dtd.Rdata")
#' Reproduecs Supplementary Table 2. Results of the LMM for the core home range size 50% and its response to seasonality.
#' Reference level of season type is set to dry.
m <- lmer(df$AREA_SQM ~ df$season_type + scale(df$days_tracked) + (1|df$groupss_))
anova(m)
summary(m)
#' Reproduces Supplementary Table 3. Results of the LMM for home range size 95% and its response to seasonality.
#' Reference level of season type is set to dry.
m <- lmer(df$AREA_95 ~ df$season_type + scale(df$days_tracked) + (1|df$groupss_))
anova(m)
summary(m)
#' Reproduces Supplementary Table 7. Results of the LMM, in which we added the number of individuals tracked in each group in
#' each season as a predictor for home range, alongside with the fixed and random effects of the LMM presented in
#' Supplementary Table 2.
#' We found that the number of individuals tracked in each group was not a significant predictor of home range size.
#' Reference level of season type is set to dry.
p <- data.frame(table(paste(df$season_, df$groupss_)))
df$ids_per_group_per_season <- p$Freq[match(paste(df$season_, df$groupss_), p$Var1)]
m <- lmer(df$AREA_SQM ~ df$season_type + scale(df$days_tracked) + (1|df$groupss_)
+ df$ids_per_group_per_season)
anova(m)
summary(m)
#' Reproduces Supplementary Table 4. Results of the LMM for daily travel distance and its response to seasonality.
#' Reference level of season type is set to dry.
m <- lmer(df$speed_km_day ~ df$season_type + scale(df$days_tracked) + (1|df$groupss_))
anova(m)
summary(m)
#################Question 3#########################
#load("/Users/danpapag/Desktop/GitHub_code/df_dhor.Rdata")
#' Reproduces Supplementary Table 5. Results of the LMM for day-to-day site fidelity
#' and its response to seasonality. Reference level of season type is set to dry seasons.
df_<-df[!is.na(df$fidelity),]
m <- lmer(df_$fidelity ~ df_$season_type + scale(df_$days_tracked) + (1|df_$groupss_)) #+ (1|df_$season))
anova(m)
summary(m)
#################Question 4#########################
#load("/Users/danpapag/Desktop/GitHub_code/df_dshor.Rdata")
#' Reproduces Supplementary Table 6. Results of the LMM for seasonal range overlap
#' and its response to seasonality. Reference level of season type is set to the overlap between two dry seasons.
m <- lmer(df_years12$Overlap ~ df_years12$season_types + (1|df_years12$groupss_)) #+ (1|df_years12$year_season))
anova(m)
summary(m)
|
841c67a3bebdfa1fd2eebe5fb395b5537c4f0621
|
118fc6e0c460f378a7135d12b9bb497af3715053
|
/programs/config.R
|
a498859f54f2e97fc20f2d2dc8793c9d3e34648e
|
[
"BSD-3-Clause"
] |
permissive
|
labordynamicsinstitute/ldi-replication-dataprep
|
2724d38134214b0f4ff79d069b5c71282ebf1c21
|
7139fcdc83752665e5828b852e78d6d3136ceae0
|
refs/heads/master
| 2020-05-09T18:05:05.501115
| 2019-04-15T22:00:32
| 2019-04-15T22:00:32
| 181,326,410
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 258
|
r
|
config.R
|
# ###########################
# CONFIG: define and filenames for later reference
# ###########################
# The Google Doc links are private - they are stored in a separate repo and need to be manually put in place here.
# Any other config goes here.
|
31cc6a000285591c4a0e6aa893fbd9389f5f9d7e
|
5f684a2c4d0360faf50fe055c1147af80527c6cb
|
/2021/2021-week10/bechdel-test.R
|
867700a24e1b52f23b45f4d0625913b81cb6762c
|
[
"MIT"
] |
permissive
|
gkaramanis/tidytuesday
|
5e553f895e0a038e4ab4d484ee4ea0505eebd6d5
|
dbdada3c6cf022243f2c3058363e0ef3394bd618
|
refs/heads/master
| 2023-08-03T12:16:30.875503
| 2023-08-02T18:18:21
| 2023-08-02T18:18:21
| 174,157,655
| 630
| 117
|
MIT
| 2020-12-27T21:41:00
| 2019-03-06T14:11:15
|
R
|
UTF-8
|
R
| false
| false
| 3,539
|
r
|
bechdel-test.R
|
library(tidyverse)
library(colorspace)
bechdel <- readr::read_csv('https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2021/2021-03-09/raw_bechdel.csv')
f1 = "JetBrains Mono"
f1l = "Regular"
f1b = "ExtraBold"
bechdel_med <- bechdel %>%
group_by(year) %>%
summarise(median_rating = median(rating), n = n()) %>%
mutate(
char = strsplit(as.character(year), split = ""),
char_n = list(c(0:3))
) %>%
ungroup() %>%
unnest(c(char, char_n)) %>%
mutate(
fam = paste0(f1, " ", if_else(char_n == floor(median_rating) | char_n == ceiling(median_rating), f1b, f1l)),
decade = (year - 1) %/% 10 * 10,
x = year - decade,
margin_x = if_else(lag(str_detect(fam, "Bold"), default = FALSE), 0.04, 0)
) %>%
group_by(year) %>%
mutate(margin_x = cumsum(margin_x)) %>%
ungroup()
ggplot(bechdel_med) +
geom_text(aes(x = x + char_n/10 + margin_x, y = decade,
label = char, family = fam,
size = if_else(str_detect(fam, "Bold"), 4.5, 3.5),
color = if_else(str_detect(fam, "Bold"), n, NULL)
),
stat = "unique", hjust = 0) +
# Legend
annotate("text", x = c(1, 1.15, 1.35, 1.55), y = 1850, label = c("1", "9", "7", "6"), size = c(6, 6, 7.5, 6), family = c("JetBrains Mono Regular", "JetBrains Mono Regular", "JetBrains Mono ExtraBold", "JetBrains Mono Regular"), color = c("grey20", "grey20", 2, "grey20")) +
annotate("text", x = c(1, 1.15, 1.35, 1.55), y = 1862, label = c("0", "1", "2", "3"), size = 3, family = "JetBrains Mono Regular", color = "grey20") +
annotate("text", x = 1.95, y = 1860.5, label = "Bold position indicates the median of Bechdel test rating\nfor the year (two bold numbers mean a median between them)", size = 2.5, family = "JetBrains Mono Regular", color = "grey10", lineheight = 0.9, hjust = 0, vjust = 1) +
annotate("segment", x = c(1, 1.15, 1.35, 1.55), y = 1859,
xend = c(1, 1.15, 1.35, 1.55), yend = 1855,
color = darken("#5E92B3", 0.2), size = 0.2, arrow = arrow(length = unit(0.005, "npc"))) +
# Title
annotate("text", x = 10.5, y = 1850, label = "Bechdel Test", size = 11, family = "Graphik Semibold", hjust = 1) +
annotate("text", x = 10.5, y = 1859, label = "Median test rating by year for 8 839 films\nbetween 1888 and 2021", size = 3.2, family = "Graphik", hjust = 1, vjust = 1, lineheight = 1) +
# Scales, theme, etc
scale_y_reverse() +
scale_size_identity() +
scale_color_distiller(palette = "Reds", na.value = "grey20", direction = 1, guide = guide_colorbar(title = "Color indicates total number of films for the year", title.position = "top", title.vjust = 1)) +
# Caption
labs(
caption = "Source: Bechdeltest.com · Graphic: Georgios Karamanis"
) +
theme_void() +
theme(
legend.position = c(0.35, 0.95),
legend.direction = "horizontal",
legend.key.height = unit(0.25, "line"),
legend.key.width = unit(3.35, "line"),
legend.title = element_text(family = "JetBrains Mono Regular", size = 7, color = "grey10", hjust = 0),
legend.text = element_text(family = "JetBrains Mono Regular", size = 7, color = "grey10"),
plot.background = element_rect(fill = lighten("#5E92B3", 0.3), color = NA),
plot.caption = element_text(family = "Graphik Light", hjust = 0.92, size = 7.5, margin = margin(0, 0, 10, 0)),
plot.margin = margin(10, 0, 0, 10)
)
ggsave(here::here("temp", paste0("bechdel-test-", format(Sys.time(), "%Y%m%d_%H%M%S"), ".png")), dpi = 320, height = 6, width = 8)
|
6467050b25df5a0eb05c2ffe2c78263ee7da87bc
|
5502b31c584e3ecc778f8dab98f71fbd207d7377
|
/EDA.R
|
e86210b2e35b22063aeaeba66fa9dbc469056ddc
|
[] |
no_license
|
EST-Team-Adam/Exploratory-Data-Analysis
|
386a89f70119d73f347fa8a1d660d6d917057836
|
2aea93eff731ed0e6239c761f5bb55d81c5776ec
|
refs/heads/EDA
| 2021-01-12T20:59:18.693429
| 2016-10-03T16:05:01
| 2016-10-03T16:05:01
| 65,398,695
| 1
| 0
| null | 2016-08-31T15:04:36
| 2016-08-10T16:32:50
|
R
|
UTF-8
|
R
| false
| false
| 1,886
|
r
|
EDA.R
|
EDA <- function(df,w) {
# This function runs a basic analysis of a given dataframe where Date is
# first column.
# analysis[[1]] : Price and Returns plots
# analysis[[2]] : Summaries
# analysis[[3]] : Boxplots
# analysis[[4]] and following : Scatterplots
## MEMORY PRE-ALLOCATION & DATE ##
df_ret <- as.data.frame(matrix(nrow=dim(df)[1]-1,ncol=dim(df)[2]-1,NA))
colnames(df_ret) <- names(df[,2:dim(df)[2]])
analysis <- rep(list(rep(list(NA),dim(df)[2]-1)),dim(df)[2]+2)
date<-seq(as.Date("2005-01-01"), as.Date("2016-06-14"),length = dim(df)[1])
## RETURNS ##
for (i in 2:dim(df)[2]) {
df_ret[,i-1] <- diff(df[,i])/df[,i][-length(df[,i])]
}
## PLOTS ##
for (i in 2:dim(df)[2]) {
for (j in 2:dim(df)[2]) {
par(mfrow=c(2,1))
plot(date,df[,i],main=names(df)[i],xlab="Observation", ylab="Price",type="line")
legend("topleft",lty=c(1,1), lwd=c(2.5,2.5),col=c("black","blue","red"),legend= c("Price","LOESS","Spline"))
lowpass.spline <- smooth.spline(df[,i], spar = NULL)
lowpass.loess <- loess.as(1:length(date), df[,i], degree = 1, criterion = c("aicc", "gcv")[2], user.span = NULL)
lines(date,predict(lowpass.spline, 1:length(date), lwd = 2)$y,type="line",col="red")
lines(date,predict(lowpass.loess, 1:length(date)),type="line",col="blue")
plot(date[1:length(date)-1],df_ret[,i-1],main=names(df_ret)[i-1],xlab="Observation", ylab="Returns",type="line")
analysis[[1]][[i-1]] <- recordPlot()
analysis[[2]][[i-1]] <- summary(df[,i])
par(mfrow=c(1,1))
boxplot(df[,j]~rep(1:ceiling(dim(df)[1]/4), each=w)[1:dim(df)[1]],main=names(df_ret)[j-1], xlab="Observation", ylab="Price")
analysis[[3]][[j-1]] <- recordPlot()
plot(df[,i]~df[,j],xlab=names(df)[i], ylab=names(df)[j])
analysis[[i+2]][[j-1]] <- recordPlot()
}
}
return(analysis)
}
|
8b08deb5bb9d5c973f75bde8064245da2474e74c
|
2c8fcec7afb7574d5e361ae53a613efa408903ab
|
/PD_Geno_Numericalisation_and_ChromSeparation.R
|
edbcce01c5af32103552ef060167e27c612ddd3d
|
[] |
no_license
|
BioinformaticsUWL2020/BigData
|
a5947fab00426337442290153c3f6628bdedeee1
|
cc8435554117b3d1de9361c4b6e7b71d0acab64a
|
refs/heads/master
| 2021-05-21T17:18:19.564453
| 2020-06-19T16:30:40
| 2020-06-19T16:30:40
| 252,732,500
| 2
| 3
| null | 2020-04-23T20:15:30
| 2020-04-03T12:56:30
| null |
UTF-8
|
R
| false
| false
| 8,059
|
r
|
PD_Geno_Numericalisation_and_ChromSeparation.R
|
### Numericalisation and Separation of PD Genotypes
### 17.06.2020
#### VERY IMPORTANT ####
# Run PD_HapMap_and_Pheno_Construction.R first
#lib import
## GAPIT REQUIREMENTS
library('MASS') # required for ginv
library(multtest)
library(gplots)
library(compiler) #required for cmpfun
library("scatterplot3d")
source("http://www.zzlab.net/GAPIT/emma.txt")
source("http://www.zzlab.net/GAPIT/gapit_functions.txt")
## GAPIT REQUIREMENTS
library(plyr)
library(hablar)
top_level_path <- 'C:/Users/zacha/Documents/BigData/' # Change to match your file structure
ext1_path <- 'PD_Geno_by_Chrom/'
ext2_path <- 'HapMap_and_Pheno_Files/'
setwd(paste0(top_level_path, ext2_path))
# Genotype Numericalisation
myY <- read.table('pd_pheno.txt', head = TRUE)
myG <- read.table('pd_hapmap.txt', head = FALSE)
setwd(top_level_path)
x <- GAPIT.HapMap(myG)
numeric_colanmes <- as.vector(t(x$GT))
numeric_genotype <- as.data.frame(t(x$GD))
names(numeric_genotype) <- numeric_colanmes
rs_chrom_pos <- x$GI
# Convert 0,1,2 to -1,0,1
numeric_genotype$`915` <- factor(numeric_genotype$`915`, levels = c(0, 1, 2))
numeric_genotype$`1669` <- factor(numeric_genotype$`1669`, levels = c(0, 1, 2))
numeric_genotype$`8346` <- factor(numeric_genotype$`8346`, levels = c(0, 1, 2))
numeric_genotype$`8549` <- factor(numeric_genotype$`8549`, levels = c(0, 1, 2))
numeric_genotype$`7748` <- factor(numeric_genotype$`7748`, levels = c(0, 1, 2))
numeric_genotype$`441` <- factor(numeric_genotype$`441`, levels = c(0, 1, 2))
numeric_genotype$`6573` <- factor(numeric_genotype$`6573`, levels = c(0, 1, 2))
numeric_genotype$`4280` <- factor(numeric_genotype$`4280`, levels = c(0, 1, 2))
numeric_genotype$`7678` <- factor(numeric_genotype$`7678`, levels = c(0, 1, 2))
numeric_genotype$`9170` <- factor(numeric_genotype$`9170`, levels = c(0, 1, 2))
numeric_genotype$`915` <- mapvalues(numeric_genotype$`915`,
from = c(0, 1, 2), to = c(-1, -0, 1))
numeric_genotype$`1669` <- mapvalues(numeric_genotype$`1669`,
from = c(0, 1, 2), to = c(-1, 0, 1))
numeric_genotype$`8346` <- mapvalues(numeric_genotype$`8346`,
from = c(0, 1, 2), to = c(-1, 0, 1))
numeric_genotype$`8549` <- mapvalues(numeric_genotype$`8549`,
from = c(0, 1, 2), to = c(-1, 0, 1))
numeric_genotype$`7748` <- mapvalues(numeric_genotype$`7748`,
from = c(0, 1, 2), to = c(-1, 0, 1))
numeric_genotype$`441` <- mapvalues(numeric_genotype$`441`,
from = c(0, 1, 2), to = c(-1, 0, 1))
numeric_genotype$`6573` <- mapvalues(numeric_genotype$`6573`,
from = c(0, 1, 2), to = c(-1, 0, 1))
numeric_genotype$`4280` <- mapvalues(numeric_genotype$`4280`,
from = c(0, 1, 2), to = c(-1, 0, 1))
numeric_genotype$`7678` <- mapvalues(numeric_genotype$`7678`,
from = c(0, 1, 2), to = c(-1, 0, 1))
numeric_genotype$`9170` <- mapvalues(numeric_genotype$`9170`,
from = c(0, 1, 2), to = c(-1, 0, 1))
numeric_swap <- numeric_genotype[, c('915', '1669', '8346', '8549', '7748',
'441', '6573', '4280', '7678', '9170')]
numeric_swap <- numeric_genotype %>%
convert(num(c(`915`, `1669`, `8346`, `8549`, `7748`,
`441`, `6573`, `4280`, `7678`, `9170`)))
numeric_pd_genotype <- cbind(rs_chrom_pos, numeric_swap)
numeric_pd_genotype <- numeric_pd_genotype %>%
mutate(as.character(Chromosome))
numeric_pd_genotype$Chromosome <- factor(numeric_pd_genotype$Chromosome, levels = c('1', '2', '3', '4', '5', '6', '7',
'8', '9', '10', '11', '12', '13', '14', '15', '16',
'17', '18', '19', '20', '21', '22', '23', '24', '25'))
numeric_pd_genotype <- numeric_pd_genotype %>%
convert(num(c(Chromosome, Position))) %>%
mutate(as.character(SNP))
numeric_pd_genotype <- as.data.frame(numeric_pd_genotype)
# Separate data by chrom and write to disk
numeric_pd_genotype_chrom1 <- numeric_pd_genotype %>%
filter(grepl('^1$', Chromosome))
numeric_pd_genotype_chrom2 <- numeric_pd_genotype %>%
filter(grepl('^2$', Chromosome))
numeric_pd_genotype_chrom3 <- numeric_pd_genotype %>%
filter(grepl('^3$', Chromosome))
numeric_pd_genotype_chrom4 <- numeric_pd_genotype %>%
filter(grepl('^4$', Chromosome))
numeric_pd_genotype_chrom5 <- numeric_pd_genotype %>%
filter(grepl('^5$', Chromosome))
numeric_pd_genotype_chrom6 <- numeric_pd_genotype %>%
filter(grepl('^6$', Chromosome))
numeric_pd_genotype_chrom7 <- numeric_pd_genotype %>%
filter(grepl('^7$', Chromosome))
numeric_pd_genotype_chrom8 <- numeric_pd_genotype %>%
filter(grepl('^8$', Chromosome))
numeric_pd_genotype_chrom9 <- numeric_pd_genotype %>%
filter(grepl('^9$', Chromosome))
numeric_pd_genotype_chrom10 <- numeric_pd_genotype %>%
filter(grepl('^10$', Chromosome))
numeric_pd_genotype_chrom11 <- numeric_pd_genotype %>%
filter(grepl('^11$', Chromosome))
numeric_pd_genotype_chrom12 <- numeric_pd_genotype %>%
filter(grepl('^12$', Chromosome))
numeric_pd_genotype_chrom13 <- numeric_pd_genotype %>%
filter(grepl('^13$', Chromosome))
numeric_pd_genotype_chrom14 <- numeric_pd_genotype %>%
filter(grepl('^14$', Chromosome))
numeric_pd_genotype_chrom15 <- numeric_pd_genotype %>%
filter(grepl('^15$', Chromosome))
numeric_pd_genotype_chrom16 <- numeric_pd_genotype %>%
filter(grepl('^16$', Chromosome))
numeric_pd_genotype_chrom17 <- numeric_pd_genotype %>%
filter(grepl('^17$', Chromosome))
numeric_pd_genotype_chrom18 <- numeric_pd_genotype %>%
filter(grepl('^18$', Chromosome))
numeric_pd_genotype_chrom19 <- numeric_pd_genotype %>%
filter(grepl('^19$', Chromosome))
numeric_pd_genotype_chrom20 <- numeric_pd_genotype %>%
filter(grepl('^20$', Chromosome))
numeric_pd_genotype_chrom21 <- numeric_pd_genotype %>%
filter(grepl('^21$', Chromosome))
numeric_pd_genotype_chrom22 <- numeric_pd_genotype %>%
filter(grepl('^22$', Chromosome))
numeric_pd_genotype_chromX <- numeric_pd_genotype %>%
filter(grepl('^23$', Chromosome))
numeric_pd_genotype_chromY <- numeric_pd_genotype %>%
filter(grepl('^24$', Chromosome))
numeric_pd_genotype_chromMT <- numeric_pd_genotype %>%
filter(grepl('^25$', Chromosome))
chrom_sep_genotypes <- list(numeric_pd_genotype_chrom1, numeric_pd_genotype_chrom2,
numeric_pd_genotype_chrom3, numeric_pd_genotype_chrom4,
numeric_pd_genotype_chrom5, numeric_pd_genotype_chrom6,
numeric_pd_genotype_chrom7, numeric_pd_genotype_chrom8,
numeric_pd_genotype_chrom9, numeric_pd_genotype_chrom10,
numeric_pd_genotype_chrom11, numeric_pd_genotype_chrom12,
numeric_pd_genotype_chrom13, numeric_pd_genotype_chrom14,
numeric_pd_genotype_chrom15, numeric_pd_genotype_chrom16,
numeric_pd_genotype_chrom17, numeric_pd_genotype_chrom18,
numeric_pd_genotype_chrom19, numeric_pd_genotype_chrom20,
numeric_pd_genotype_chrom21, numeric_pd_genotype_chrom22,
numeric_pd_genotype_chromX, numeric_pd_genotype_chromY,
numeric_pd_genotype_chromMT)
list_names <- as.character(c(1:22, 'X', 'Y', 'MT'))
names(chrom_sep_genotypes) <- list_names
if(!(dir.exists('PD_Geno_by_Chrom'))) {
dir.create('PD_Geno_by_Chrom')
}
# Where the .txt files will be written
setwd(paste0(top_level_path, ext1_path))
for(i in 1:length(chrom_sep_genotypes)) {
write.table(
chrom_sep_genotypes[[i]],
paste0('PD_num_gen_chrom_', i, '.txt'),
quote = FALSE,
sep = '\t',
row.names = FALSE
)
}
|
de5651de92e5100fa56235025a0a0ea86c98e1d5
|
42f1f581913296f395277166b209f3a1b0feac53
|
/cachematrix.R
|
0cdd1ca051a55601acd132ab60d87742110d2abf
|
[] |
no_license
|
srvshubh/ProgrammingAssignment2
|
d67553c99a0c51a6782bf28a5da6f5c2e47c7a19
|
60040a795515e0be5525096ea875f4629ff45cdd
|
refs/heads/master
| 2023-05-12T00:53:39.687439
| 2021-06-01T09:06:59
| 2021-06-01T09:06:59
| 255,364,594
| 0
| 0
| null | 2020-04-13T15:21:59
| 2020-04-13T15:21:57
| null |
UTF-8
|
R
| false
| false
| 881
|
r
|
cachematrix.R
|
## These function computes the inverse of the special "matrix". If the inverse has already been calculated, it would retrieve the inverse from the cache
## Below function creates a special "matrix" object that can cache its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y){
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(solveMatrix) inv <<- solveMatrix
getInverse <- function() inv
list(set = set, get = get, setInverse = setInverse, getInverse = getInverse)
}
## Below function computes the inverse of the special "matrix" returned by makeCacheMatrix above
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if(!is.null(inv)){
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data)
x$setInverse(inv)
inv
}
|
f417d9bf47c446516867135a26256692af94587f
|
355d56c77d3926fb9cdbd76314c7723635431c20
|
/manual.R
|
fe271b5ba836b7dd1e70f4089d2470a00af59d93
|
[] |
no_license
|
davydovpv/R.guide
|
ca916d08f5687eaaa3c6322903f639da908b97e3
|
8eb299e24c51dc4c870fb1c908a59e47baca2df8
|
refs/heads/master
| 2021-03-12T01:08:44.953428
| 2017-02-25T22:41:11
| 2017-02-25T22:41:11
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 52,217
|
r
|
manual.R
|
1. Установка на 14.04
# Добавить реп
sudo sh -c 'echo 'deb http://cran.rstudio.com/bin/linux/ubuntu trusty/' >> /etc/apt/sources.list'
gpg --keyserver keyserver.ubuntu.com --recv-key E084DAB9
gpg -a --export E084DAB9 | sudo apt-key add -
# поставить
sudo apt-get update
sudo apt-get install r-base
# установка r-studio
# sudo apt-get install libjpeg62 libcurl4-openssl-dev libxml2-dev libmariadb-client-lgpl-dev
apt-get update && apt-get install -y \
libcurl4-openssl-dev \
libxml2-dev \
libmariadb-client-lgpl-dev \
libssl-dev
wget https://download1.rstudio.org/rstudio-0.99.489-amd64.deb
sudo dpkg -i https://download1.rstudio.org/rstudio-0.99.489-amd64.deb
2. Установка пакетов
install.packages('xts') # xts - пакет для работы с временными рядами
install.packages('xts', dependencies=TRUE)
nstall.packages('quantmod', repos='http://r-forge.r-project.org') #с указанием репа
# для пакетов: в меню r-studio tools->check for packets updates
3. Базовый синтаксис
# присваивание значений
a = 5
a <- 5
assign('a',5)
# стравнение значений (возвращает логические данные)
a == b
a >= b
# сложение, вычитание
a + b
a - b
# справка по функции
help($функция) # в Rstudio - выделить и нажать F11
# тело функции
body($функция)
# список параметров функции
formals($функция)
# код функции и его корректировка
fix($функция)
# округлить
round(x, digits = 0)
floor()
# округлить в сторону 0 (отбросить данные после точки)
trunc()
## задание собственной функции
# подгрузка библиотек внутри функции
require()
# подключение файла со своим набором функций
source('123.R')
my.fun1 <- function(x) {
1 / (x + 1) / sqrt(x)
}
# или так
my.fun1 <- function(x) {
z <- 1 / (x + 1) / sqrt(x)
return(z)
}
## работа с ошибками внутри функции
# генерация ошибок
warning()
error()
# остановка по условию
stop()
# предупреждения как ошибки
option(warn = 2)
# подавление ошибок
suppressWarnings(ParentFunction())
# поиск ошибок
x <- tryCatch('OK', # или warning('got a warning!'), или stop('an error occured!')
warning = function(w) {
return(paste('Warning:', conditionMessage(w)));
},
error = function(e) {
return(paste('Error:', conditionMessage(e)));
},
finally = {
print('This is try-catch test. check the output.')
})
print(x)
# оператор вывода
print('text')
# вывод в одну строку
cat(i)
# вывод времени затраченного на выполнение операции
system.time(expr, gcFirst = TRUE)
# обнулить окружение
new.env()
3.1 Работа с векторами
## задать вектор (структурированный набор значений)
a <- c(1, 2, 3, 4, 5, 6:10)
b <- c(1, 'a', T, F)
# или так
u <- seq(from = -2, to = 3, by = 0.5) # from/to - диапазон, by - шаг сетки
u <- seq(-2, 3, length = 11) # length - длина вектора, шаг расчитывается автоматически
# задать вектор из повторяющихся значений
rep(1:3, 4)
rep(1:3, c(1,2,3))
b[1] # 1-й элемент вектора
a[2:4] # элементы с 2 по 4-й
length(v) # длина вектора
# с векторами можно работать практически всеми стандартными функциями
# при сложении/вычитании вектора должны быть кратны друг другу
## функции для работы с векторами
mean(x) # вычисляет среднее
sum(x)
sum(x, na.rm = TRUE) # позволяет исключить из вектора нечисловые значения
sd(x) # вычисляет СКО (standart deviation)
## генерация случайных чисел
# set.seed(10) - стандартизация последовательностей
open.cost <- runif($длина, $min, $max) # равномерный закон растпред-я
rnorm(5, mean=2, sd=1) # нормальное распред-е (mean - среднее, sd - дисперсия)
# кумулятивная сумма
cumsum()
# количество положительных элементов
sum(x>0)
# объединение строк
rbind($имя_строки1 = $данные1,
$имя_строки2 = $данные2)
# объединение столбцов
cbind($имя_столбца1 = $данные1,
$имя_столбца2 = $данные2)
# разница элементов вектора
x <- diff(c)
# последний элемент вектора
last(x)
tail(x, 1)
3.2 Листы
# лист может содержать в себе элементы любого вида
x <- list(1:3, 'test', TRUE, list('f', 'F'))
3.3 Даты
# В памяти хранятся относительные значения дат (от 01.01.1970)
# простой метод задать дату
as.Date('2015-05-13')
# или с указанием формата
as.Date('3/15/2015', format = '%m/%d/%Y')
# относительное представление даты
a <- as.Date('2015-05-13')
s.numeric(a)
# точное задание времени с часовым поясом
as.POSIXct('2013-09-12 12:05:05', tz = 'Europe/Moscow') # в памяти хранит 1 число - кол-во секунд от относительной даты
POSIXlt # в памяти хранится лист со значениями секунд, минут, часов, дней, месяцев, лет от относительной даты
as.POSIXct('12.09.2013 12:05:05', format = '%d.%m.%Y %H:%M:%S', tz = 'UTC')
ISOdate(2014, 11, 28, 19, 11, 37, tz = 'GMT')
# задать временную последовательность
seq(as.POSIXct('2014-10-10', by = month, length = 4)) # зависит от часового пояса
seq(ISOdate(2014, 10, 10), ISOdate(2015, 10, 10), 'days') # GMT время
Sys.time() # выводит системную дату и время
Sys.timezone() # выводит системный часовой пояс
Sys.setenv(TZ = 'UTC') # задает часовой пояс в окружении
3.4 Атрибуты
attr(a, 'Myname') = 'test'
attr(a, 'Myname') # выводит значение атрибута
attributes(a) # выводит значения всех атрибутов переменной
names(x) <- c('a', 'b' ,'c') # пронаименовать вектор
3.5 Матрицы
# по-умолчанию элементы матрицы заполняются по строкам
# задать матрицу:
b <- matrix(1:12, 3, 4)
b[2,2] # обращение к конкретному элементу матрицы (строка/столбец, нумерация с 1)
# Включить заполнение по строкам:
a <- matrix(data = 1:12, nrow = 3, ncol = 4, byrow = TRUE)
# умножение матрицы на число/вектор
a * b
# матричное умножение
a %*% b
# транспонирование матрицы
t(a)
# Многомерные массивы
a=array(c(1:24), dim = c(1,2,3))
3.6 Фреймы
# Фрейм - матрица с именованными столбцами
# Создать:
n <- c(1, 2, 3)
a <- c('a','b','c')
b <- c(TRUE, FALSE, TRUE)
df <- data.frame(n, a, b) # функция создания фрейма
named(df) = c('N', 'String', 'Flag') # переименовать столбцы
df <- data.frame(N = n, String = a, b = Flag) # создать фрейм с нужными названиями
str(df) # строковое представление фрейма
nrow(df) # кол-во строк фрейма
ncol(df) # кол-во столбцов фрейма
df[2,2] # обращение к конкретному элементу фрейма
df[2] # выделить столбец
df[, 2:3] # выделить несколько столбцов
df[2, ] # выделить строку
df[df$Flag == TRUE] # выделить строки столбца Flag, в которых значения TRUE
# удалить столбцы с NA
file <- file[, colSums(is.na(file)) == 0]
# удалить первую строку
temp.frame <- temp.frame[-1, ]
# сортировка по ключевому значению (по убыванию)
file2 <- file2[order(-file2$profit),]
3.7 Файлы
# Задать текущий каталог
setwd('~/work/R')
getwd() # вывести рабочий каталог
# Функция записи в файл
write.table()
write.table(b, file = 'file.csv', sep = '\t') # запишет b в file.csv и разделит элементы табуляцией
# Функция чтения из файла
a <- read.table('file.csv')
# Функция для чтения .csv файлов
read.csv()
a <- read.csv(unz('1.zip', 'folder1/folder2/file.csv')) # прочитать файл из архива
f <- tempfile() # создать temp file
download.file('http://site.com/file.zip', f) # скачать архив
data <- read.csv(unzip(f))
unlink(f) # удалить temp
3.8 Ветвление & циклы
# простое ветвление
if ($условие) {
$действие
}
# альтернативное ветвление
if ($условие) {
$действие1
} else {
$действие2
}
# несколько альтернатив
if ($условие1) {
$действие1
} else if ($условие2) {
$действие2
}
# цикл
for (i in 1:5) {
print(i)
}
# цикл с предусловием
k = 5
while (k > 0) {
cat(k, ' ')
k=k-1
}
# бесконечный цикл
repeat {
$действие1
if ($условие_окончания) break
$действие2
}
## вместо явных циклов
# применить для каждого элемента в:
sapply(v, sqrt) # для векторов
lapply(v, sqrt) # для листов
apply(v, 1, min) # для матриц (найти min в строке)
apply(v, 1:2, min) # то же, но поэлементно
ifelse($условие, $true_действие, $false_действие) # проверяет на соответствие все элементы вектора
seq_len(5) # возвращает последовательность от 1 до n
seq_along(a) # возвращает индексы всех элеменотов вектора 'a', то же, что и seq_len(length(a))
3.9 Графики
## график функции
plot(x, type = 'o')
# график зависимости x от Y + сетка
plot(y, x, type = 'o')
grid()
# добавить еще один график к существующему
lines(y, y2)
# нарисовать горизонтальную линию
abline(h = 20, col = 'red', lwd = 2)
# нарисовать вертикальную линию
abline(v = 20, col = 'red', lwd = 2)
## гистаграмма
hist(x,
breaks = x1, freq = F, xlab = '', ylim = c(0, 0.5),
ylab = 'плотность вероятности', main = 'Гистограмма')
# огибающая
lines(density(b3), col = '$', lwd = $)
# график рядов из пакета zoo
plot.zoo(data[, c('equity', 'equity2')],
type = '1', plot.type = 'single', screens = c(1,1), col = c('blue', 'dark red'))
4. Пакеты для работы с временными рядами
4.1 quantmod
# Позволяет получать кодировки с Yahoo Finance, Google Finance, локальной MySQL, .rdata, .rda, .csv и пр.
# установка
install.packages('quantmod', dependencies = TRUE)
library(quantmod)
# загрузка котировок
getSymbols('MSFT', src = 'yahoo')
# график котировок
chartSeries(MSFT)
# график за период
chartSeries(MSFT['2014-01/2014-04'])
# преобразовать котировки в недельные
to.weekly()
# max значения
hi(x)
# min знаения
lo(x)
# свечной график
candleChart(MSFT['2013/2014'], theme = 'black', up.col = 'green', dn.col = 'red')
# барный график
barChart()
# добавить SMA к графику
addSMA(n = 20, on = 1, col = 'blue')
# добавить линию
addLines(h = 10, on = 1, col = 'green')
# удалить линию с графика
dropTA('BBands')
# цены закрытия/открытия/hi/low/etc..
# столбцы фрейма должны содержать стандартные названия: Open”, “High”, “Low”, “Close”, “Volume”, or “Adjusted”
# при вызове данных функцией getSymbols(), названия фрейма прописываются правильно из-коробки
Op(x)
Hi(x)
Lo(x)
Cl(x)
Vo(x)
Ad(x)
4.2 rusquant
# Заточен под наши сервера. Позволяет загружать статистику по малоинтервальным данным - часы, минуты, секунды, тики.
# По умолчанию загружается дневной график. Пакет поддерживается Финамом.
# Не позволяет загружать большие объёмы данных. Грузить большие данные лучше в нерабочее время.
## установка
install.packages('rusquant', repos = 'http://r-forge.r-project.org', dependencies = TRUE)
library(rusquant)
# дополнительные варианты установки
install.packages('rusquant', contriburl = 'http://r-forge.r-project.org/bin/windows/contrib/latest/')
# или из исходников:
install.packages('rusquant', repos = 'http://R-Forge.R-project.org', type = 'source')
# или скачать архив со страницы https://r-forge.r-project.org/R/?group_id=1175
# и поставить из архива (Install from: Package Archive File).
# загрузить данные
getSymbols('SBER', from = '2015-01-01', to = Sys.Date(), src = 'Finam')
# загрузить данные с указанием периода
# доступны периоды: 1min, 5min. 10min, 15min, 30min, hour, day, week, month
getSymbols('SBER', from = Sys.Date() - 1, src = 'Finam', period = '5min')
# построить график
chartSeries(SBER, theme = 'black')
# обавить exp-ю скользяшку (EMA)
addEMA(n = 20, col = 'blue')
# загрузить тиковые данные
getSymbols('SBER', from = Sys.Date() - 1, src = 'Finam', period = 'tick')
# тиковые данные можно преобразовать в лююые другие
SBER_15min <- to.period(SBER, 'mins', k = 15)
### Данные можно получать с сайта mfd.ru и oanda.ru
# загрузка списка доступных тикеров
date('tikers')
# поиск по тикерам
tickers[grep('EUR')]
## загрузка данных по тикерам
# временные периоды такие же, как на Финаме
getSymbols('EUR', from = Sys.Date() - 1, src = 'mfd', period = 'hour', auto.assign = FALSE)
# упорядочить данные
order(-s) # по убыванию
order(s) # по возрастанию
4.4 xts
# упорядочить по дате
order.by = dt
5. Простейшее тестирование стратегий
5.1 Пересечение цены с SMA (crossMA)
library(rusquant)
# для стандартизации имени функции данных
s <- get(getSymbols('EUR/USD', src = 'oanda', from = '2015-12-01', to = '2015-12-10'))
# переименовываем столбец (стобей данных только один - с oanda грузятся только цены закрытия)
names(s) <- c('Close')
# добавляем к данным еще один столбец со значениями SMA для 20 периодов
s$sma20 <- SMA(Cl(s), 20)
# необходимо определить, какую позицию открывать, исходя из пересечения SMA графиком цен
# выбранную позицию записываем в доп. столбец
s$position <- ifelse(Cl(s) > s$sma20, 1, -1)
# убрать из вектора строки-не числа
# s <- s[!is.na(s$position),]
s <- na.omit(s)
# вычисление equity
# лот 0.1, 1пункт = $1, для EURUSD 1пункт=0.0001
s$ret <- lag(s$position) * (s$Close - lag(s$Close)) * 10000
# в первой строке - нулевая прибыль
s[1,ret] <- 0
# кумулятивная сумма
s$equity <- cumsum(s$ret)
plot(s$equity)
5.2 Пересечение двух MA (в примере - EMA)
data <- getSymbols('GAZP',
from = '2015-01-01', to = '2015-12-01', src = 'Finam', period = '15min', auto.assign=FALSE)
head(data)
names(data) <- c('Open' , 'High' , 'Low' , 'Close' , 'Volume')
# вычисляем && записываем в вектор значения нужных EMA
data$ema20 <- EMA(Cl(date), n = 20)
data$ema80 <- EMA(Cl(date), n = 80)
# входим в позицию
data$position <- ifelse(data$ema20 >= data$ema80, 1, -1)
# удаление нечисловых значений
data <- na.omit(data)
# вычисление && график equity
data$ret <- lag(data$position) * (data$Close - lag(data$Close))
data$ret[1, 'ret'] = 0
data$equity <- cumsum(data$ret)
plot(data.equity)
5.3 Простейшая оптимизация
# создать функцию
test.strategy <- function(data, ema1, ema2, plot.show = TRUE) {
data$ema1 <- EMA(Cl(data), n=ema1)
data$ema2 <- EMA(Cl(data), n=ema2)
data$position <- ifelse(data$ema20 >= data$ema80, 1, -1)
data <- na.omit(data)
data$ret <- lag(data$position) * (data$Close - lag(data$Close))
data$ret[1, 'ret'] <- 0
data$equity <- cumsum(data$ret)
if (plot.show) {
plot(data$equity)
}
return(data$equity[[nrow(data)]])
}
# задать цикл оптимизации
smax <- -99999
ema1.opt <- 0
ema2.opt <- 0
for (ema1 in seq(1, 99, 1)) {
for (ema2 in seq(ema1 + 1, 100, 1)) {
s <- test.strategy(data, ema1, ema2, FALSE)
if (s > smax) {
smax <- s
ema1.opt <- ema1
ema2.opt <- ema2
}
}
if (ema1 %% 10 == 0) {
cat(ema1, ' ', sep='')
}
}
# все пары чисел n1 и n2
g <- as.matrix(expand.grid(n1, n2))
eq <- apply(g,
1,
function(r) {
test1(data, r[1], r[2], FALSE)
})
# номер строки, для которой эквити максимальна:
opt.ind <- which.max(eq)
# параметры в этой строке:
g.opt <- g[opt.ind, ]
#
test1(data, g.opt[1], g.opt[2], TRUE)
5.4 Учёт накладных расходов
# на примере стратегии пересечения цены со скользящей средней
library(rusquant)
data <- getSymbols('EURUSD',
from = Sys.Date() - 3,
to = Sys.Date(),
src = 'Finam', period = '15min', auto.assign = FALSE)
head(data, 3)
names(data) <- c('Open', 'High', 'Low', 'Close', 'Volume')
# можно и так переименовать: colnames(data) <- gsub('.*\\.', '', colnames(data))
# копия данных
data2 <- data
head(data2, 3)
# вычеслить EMA
data2$ma1 <- EMA(Cl(data), 50)
# параметр нормирования по пунктам
pip <- 0.0001
# 'мёртвая зона' вокруг EMA
delta <- 3 * pip
# алгоритм стратегии
data2$pos <- ifelse(data2$Close > data2$ma + delta,
1,
ifelse(data2$Close < data2$ma - delta, -1, 0))
data2 <- na.omit(data2) # убрали строки с NA
# результаты сделок
data2$ret <- lag(data2$pos) * (data2$Close - lag(data2$Close)) / pip
data2[1, 'ret'] <- 0 # в первой строке нулевая прибыль
# equity
data2$equity <- cumsum(data2$ret)
n <- nrow(data2) # количество строк
s <- data2$equity[[n]] # конечная сумма без учёта накладных расходов
# расчет накладных расходов
# количество сделок (входов)
y <- ifelse((data2$pos != lag(data2$pos)), abs(data2$pos), 0) # определение перехода
y[1] <- 0 # вместо NA.
# k - число сделок
k <- sum(y, na.rm = TRUE) + 1
spread <- 2 # спред 2 пункта .
commiss <- 1 # комиссия на круг (в пересчёте на пункты)
slip <- 2 # проскальзывание 1 пункт при входе и 1 пункт при выходе
# все накладные расходы (в пунктах)
overheads <- k * (spread + commiss + slip)
# итоговая прибыль/убыток
s2 <- s - overheads
## всё это в виде функции
test3 <- function(data, n, pip = 0.0001 , delta = 3) {
data$ma1 <- EMA(Cl(data), n)
delta2 <- delta * pip
data$pos <- ifelse(data$Close > data$ma + delta2,
1,
ifelse(data$Close < data$ma - delta2,
-1,
0))
data <- na.omit(data)
data$ret <- lag(data$pos) * (data$Close - lag(data$Close)) / pip
data[1, 'ret'] = 0
data$equity <- cumsum(data$ret)
m <- nrow(data)
s <- data$equity[[m]]
y <- ifelse(data$pos != lag(data$pos),
abs(data$pos),
0)
y[1] <- 0
k <- sum(y, na.rm = TRUE) + 1
spread <- 2
commiss <- 1
slip <- 2
overheads <- k * (spread + commiss + slip)
s2 <- s - overheads # прибыль с учётом накл. расходов
return(c(s, s2, k))
}
5.4 Оптимизация функций с двумя параметрами (опт. двух параметров одновременно)
# функцию test3 на период EMA и 'мертвой зоны'
n <- seq(10, 300, by = 5)
delta <- seq(-10, 20, by = 1)
# матрица тестовых значений
g <- as.matrix(expand.grid(n, delta))
# тест
y <- apply(g,
1,
function(r) {
test3(data, n=r[1], delta=r[2])
})
# выделяем строку с equity после учёта комиссий
s <- y[2, ]
# Ищем максимальный элемент :
k.opt <- which.max(s)
g.opt <- g[k.opt, ]
n.opt <- g.opt[[1]]
delta.opt <- g.opt[[2]]
n.opt; delta.opt # вывод данных
test3(data, n.opt, delta = delta.opt) # проверка
6. Графики функций с 2-мя переменными
6.1 Изометрические графики
y <- x <- seq(-10, 10, length = 30)
f <- function(x, y) {
r = sqrt(x^2 + y^2)
10 * sin(r) / r
}
# построение матрицы со значениями параметров и функции
z = outer(x, y, f) # Получили матрицу 30 x 3 0 .
# расцветка графика
col <- rainbow(30, start = 0.05, end = 0.75)
# функция рисования графика
persp(x, y, z, col = col, theta = 55, phi = 25)
## параметры полотна графика (здесь разбиваем полотно на матрицу графиков)
par(
mfrow = c(2, 3), # будем выводить графики в матрице 2x3
oma = c(0, 0, 0, 0), # поля для всего листа графиков
mar = c(0, 0, 0, 0) # поля сверху или справа от графика
)
## графики
# 1-й
persp(x, y, z, col = 'lightgreen', theta = 50, phi = 30) # 1-й
# 2-й
col <- rainbow(30, start = 0 , end = 0.95)
persp(x, y, z, col = col , theta = 50 , phi = 30)
# 3-й
col <- heat.colors(30)
persp(x, y, z, col = col, theta = 50, phi = 30)
# 4-й
col <- terrain.colors(30)
persp(x, y, z, col = col, theta = 50, phi = 30)
# 5-й
col <- topo.colors(30)
persp(x, y, z, col = col, theta = 50, phi = 30)
# 6-й
col <- cm.colors(30)
persp(x, y, z, col = col, theta = 50, phi = 30)
### пакет rgl
library(rgl)
persp3d(x, y, z1, col = rainbow(length(x)))
6.1 3D-графики
### пакет plot3D
library(plot3D)
persp3D(x, y, z1, contour = TRUE)
## 3D-гистаграмма
x2 <- y2 <- seq(-4, 4, by = 0.5)
f2 <- function(x2, y2) {
25 - (x2^2 - y2^2)
}
z2 <- outer(x2, y2, f2)
par(mfrow = c(1, 1), oma = c(2, 2, 2, 2))
hist3D(x2, y2, z2, border = 'black')
## 2D график (для функции с 2-мя переменными)
library(plot3D)
image2D(z2, x2, y2)
image2D(z, n1, n2, contour = TRUE, xlab = 'n1', ylab = 'n2')
# сохранить настройки текущих графиков
old.par <- par()
# два графика рядом
image2D(z.train, n1, n2, contour = TRUE, xlab = 'n1', ylab = 'n2', main = 'На обучающих данных')
image2D(z.test, n1, n2, contour = TRUE, xlab = 'n1', ylab = 'n2', main = 'На тестовых данных')
### пакет rgl
library(rgl)
rgl.open()
bg3d('white')
plot3d(g[, 1], z3, g[, 2],
labels = labels, type = 's',
size = 2, lwd = 2,
col = rainbow(length(g[, 2])))
identify3d(g[, 1], z3, g[, 2], labels = labels)
# щёлкаем правой кнопкой мыши по любой точке
# средняя кнопка - выход из режима
rgl.close()
### пакет car
library(car)
rgl.open()
scatter3d(g[ ,1], z3, g[ ,2],
labels = labels, neg.res.col = 'grey',
pos.res.col = rainbow(1 0 0 0), point.col = 'black')
### пакет Plotly
install.packages('plotly')
7. Учёт сделок
## при работе с временными рядами
# нумирация трейдов
TradeID <- function(state) {
x <- diff(state)
x[1] <- state[1]
cumsum(as.numeric(as.logical(abs(x)) & abs(state))) * abs(state)
}
# учёт входов
TradeEntries <- function(state) {
x <- diff(state)
x[1] <- state[1]
as.numeric(as.logical(x) * state)
}
# учёт выходов
TradeExits <- function(state) {
x <- as.numeric(as.logical(diff(state) * lag(state)))
x[1] <- 0
x
}
#смена позиции
TradeCh <- function(state) {
x <- diff(state)
x[1] <- state[1]
return(x)
}
## при работе с векторами
# нумерация трейдов
TradeID <- function(state) {
cumsum(as.numeric(as.logical(abs(c(state[1], diff(state)))) & abs(state))) * abs(state)
}
# учёт входов
TradeEntries <- function(state) {
as.numeric(as.logical(c(state[1], diff(state))) * state)
}
# учёт выходов
TradeExits <- function(state) {
as.numeric(c(FALSE, as.logical(diff(state) * state[-length(state)])))
}
# смена позиции
TradeCh <- function(state) {
c(state[1], diff(state))
}
## проверка ветора 'state' на правильность значений
if (!all(state %in% c(-1,0,1))) {
stop('state must be a numeric vector with -1,0,1')
}
## пересечение линий
# для векторов
Cross <- function(x1,x2) {
x <- diff(x1 > x2)
x[1] <- 0
x[x < 0] <- 0
return(as.logical(c(0,x)))
}
# для рядов
Cross <- function(x1,x2) {
x <- diff(x1>x2)
x[1] <- 0
x[x<0] <- 0
return(sign(x))
}
8. Доходность
## для работы с рядами
library(TTR)
# SR (simple return) для элеменов ряда
data$SR <- lag(data$state) * ROC(data$x, type = 'discrete')
data$SR[1] <- 0
# вычислить итоговую прибыль
data$equity <- cumprod(data$R + 1) - 1
profit <- data$x[[1]] * as.numeric(last(data$equity))
# LR (log return)
data$R.log <- lag(data$state) * ROC(data$x, type = 'continuous')
data$R.log[1] <- 0
# вычислить итоговую прибыль
data$equity <- cumsum(data$R.log)
profit <- data$x[[1]] * (exp(as.numeric(last(data$equity))) - 1)
## можно использовать вместо ROC() функцию из библиотеки quantmod
library(quantmod)
R <- Delt(x, type = 'arithmetic') # simple return
R <- Delt(x, type = 'log') # log return
## библиотека PerfomanceAnalytics
library(PerfomanceAnalytics)
# кумулятивная сумма equity = прибыль
profit <- Return.cumulative(data$diff, geometric = FALSE)
# simple return и прибыль
data$SR <- Return.calculate(data$x, method = 'discrete')
data$SR[1] <- 1
profit <- Return.cumulative(data$SR, geometric = TRUE) * s1
# log return
data$LR <- Return.calculate(data$x, method = 'log')
data$LR[1] <- 0
profit <- (exp(Return.cumulative(data$LR, geometric = FALSE)) - 1) * s1
## торговля с реинвестированием прибыли
# количество актива
w <- s0 / data$x[[1]]
# через абсолютные доходности
# количество актива
data$w <- data$state[[1]] * s0 / data$x[[1]]
data$delta <- 0
data$equity.abs <- s0
for (i in 2:nrow(data)) {
data$delta[i] <- data$w[[i - 1]] * (data$x[[i]] - data$x[[i - 1]])
data$equity.abs[i] <- data$equity.abs[[i - 1]] + data$delta[[i]]
data$w[i] <- data$state[[i]] * data$equity.abs[[i]] / data$x[[i]]
}
profit.abs <- sum(data$delta2)
# через относительные доходности
data$R <- data$state * ROC(data$x, type = 'discrete')
sata$R[1] <- 0
# вычислить итоговую прибыль
data$equity <- s0 * cumprod(data$R + 1)
profit <- as.numeric(last(data$equity) - s0)
9. Источники котировок
# статистика в .cvs формате
finance.yahoo.com/q/hp?s=AAPL
www.google.com/finance/historical?q=MSFT
www.dukascopy.com/swiss/english/marketwatch/historical/
www.finam.ru/profile/forex/eur-usd
mfd.ru/export/
## возможна выгрузка .cvs данных из trade-программ через меню экспорт:
# MetaTrader:
# Сервис -> Архив котировок (F2) -> выбрать нужную валютную пару, таймрм -> Экспорт
# Ninja Trader:
# Tools -> Historical Data Manager -> Export -> выбрать нужный фин.инструмент;
# тики или минуты; диапазон дат -> Export
# выгрузка .csv из буфера обмена в R
data1 <- read.table(pipe('pbpaste'))
# для Win
data1 <- read.table('clipboard')
# проверка типа данных
class(data1)
[1] 'data.frame'
# есди данные с Yahoo, то строки инвертированны, вернуть обратно:
data1 <- data1[rev(rownames(data1)), , drop = FALSE]
# выгрузка данных из файла (данные из MetaTrader)
data <- read.table(
file = 'D:/EURUSD1.csv', # имя файла
sep = ',', # разделитель - запятая
header = F, # заголовки столбцов отсутствуют
as.is = T # не преобразовывать строки в факторы
)
head(data, 3)
tm <- strptime(
paste(data[, 1], sprintf('%05s', data[, 2])),
format = '%Y.%m.%d %H:%M', tz = 'EET'
)
head(tm)
#
# замена ',' на '.'
temp.frame$var1 <- as.numeric(gsub('\\,','.',file[[var1]]))
# преобразование таблицы в ряд
library(xts)
Sys.setenv(TZ = 'UTC') # время - UTC
data2 <- xts(x = data[, c(3:6)], order.by = tm, tzone = 'UTC')
colnames(data2) <- c('Open', 'High', 'Low', 'Close')
# преобразование периодичности
data3 <- to.period(data2,
period = 'hours',
indexAt = 'startof')
# изменить временное окно
win <- '2014-12-01/2014-12-31'
data4 <- data3[win]
## REST API Yahoo finance
# список доступных тикеров
http://finance.yahoo.com/webservice/v1/symbols/allcurrencies/quote
# в JSON формате
http://finance.yahoo.com/webservice/v1/symbols/allcurrencies/quote?format=json
# загрузка дневных данных
ticker <- 'EUR=X' # обратная валютная пара EUR/USD .
start.date <- '2014-10-01'
end.date <- Sys.Date()
url <- paste0(
'http://ichart.finance.yahoo.com/table.csv?s=',
ticker,
# начальная дата
'&a=', as.numeric(substr(start.date, 6, 7)) - 1, # месяц
'&b=', substr(start.date, 9, 10), # день
'&c=', substr(start.date, 1, 4), # год
# конечная дата
'&d=', as.numeric(substr(end.date, 6, 7)) - 1,
'&e=', substr(end.date, 9, 10),
'&f=', substr(end.date, 1, 4),
'&g=d&ignore=.csv'
)
data <- read.csv(url, as.is = TRUE)
data2 <- xts::as.xts(data[, -1], order.by = as.POSIXct(data$Date))
quantmod::chartSeries(data2, theme = 'white')
## загрузка внутридневных котировк
# список доступных тикеров
http://in.finance.yahoo.com/lookup
ticker <- 'EURUSD=X'
url <- paste0(
'http://chartapi.finance.yahoo.com/instrument/1.0/',
ticker, '/chartdata;type=quote;range=1d/csv'
)
# получаем первые 17 строк и удаляем первую строку
metadata <- readLines(paste(url, collapse = ''), 17)[-1]
# создаём список пар 'параметр, значение'
metadata <- strsplit(metadata, ':')
# заменяем '-' в названиях на '_'
names(metadata) <- sub('-', '_', sapply(metadata, '[', 1))
metadata <- lapply(metadata,
function(x) {
strsplit(x[-1], ',')[[1]]
})
# преобразуем смещение относительно GMT в число
metadata$gmtoffset <- as.numeric(metadata$gmtoffset)
data <- as.xts(
read.zoo(
paste0(url, collapse = ''),
sep = ',',
header = FALSE, skip = 17,
FUN = function(i) {
.POSIXct(as.numeric(i))
}
)
)
# заголовки столбцов
colnames(data) <- metadata$values[-1]
# сохраняем метаданные как атрибуты
xtsAttributes(data) <- metadata[c('ticker','Company_Name', 'Exchange_Name', 'unit', 'timezone', 'gmtoffset')]
10. Логика с памятью
# заменить NA на предыдущий элемент
library(zoo)
na.locf(x)
# функция для удаления лишних сигналов
exrem <- function(x) {
x$a <- na.locf(x)
x$a <- ifelse(is.na(x$a) | is.nan(x$a) | is.infinite(x$a),
0, x$a)
ind <- which(x$a != lag(x$a))
x$y <- rep(NA, length(x$a))
x$y[ind - 1] <- x$a[ind]
return(x$y)
}
10.1 Торговая система: Выход стохастика из зон пере-купленности/проданности
# границы зоны стохастика
stlo <- 0.25
sthi <- 0.75
# точки пересечения стохастика с границами зоны
data$sigUp <- Cross(data4$slowD, stlo, FALSE)
data$sigDn <- Cross(sthi, data4$slowD, FALSE)
# точки позиций
data$sig <- data$sigUp - data4$sigDn
data.temp <- data[data4$sig != 0]
# убираем лишнее и добавляем очишенные данные в ряд
data.temp$sig.clean <- exrem(data.temp$sig)
# убираем лишние столбцы
data.temp$sig <- NULL
data.temp <- na.omit(data.temp)
data$sig <- NULL
#
data.temp2 <- merge.xts(data, data.temp$sig.clean)
colnames(data.temp2)[ncol(data.temp2)] <- 'sig'
data.temp2 <- na.locf(data.temp2, 'sig', 'state')
data.temp2$sig[is.na(data.temp2$sig)] <- 0
data.temp2$state[is.na(data.temp2$state)] <- 0
pip <- 0.0001
data.temp2$ret <- lag(data.temp2$state) * (data.temp2$Close - lag(data.temp2$Close)) / pip
data.temp2$ret[1] <- 0
data.temp2$equity <- cumsum(data.temp2$ret)
## визуализация
chartSeries(
data.temp2[, c('Open', 'High', 'Low', 'Close')],
name = 'EUR/USD H1', theme = 'white'
)
addTA(data.temp2$slowD, col = 'blue')
addLines(h = c(stlo, sthi), on = 2, col = 'red')
addTA(data.temp2$state, yrange = c(-1.2, 1.2))
addTA(data.temp2$equity, col = 'dark green')
#
zoomChart('2014-11-20::')
zooom() # интерактивно
zoomChart() # вернуть исходный масштаб
### доп функции
## Функция задержки для векторов и матриц
mylag <- function(m, nlag = 1) {
if (is.null(dim(m))) {
n <- length(m)
if (nlag > 0) {
m[(nlag + 1):n] <- m[1:(n - nlag)]
m[1:nlag] = NA
} else if (nlag < 0) {
m[1:(n + nlag)] <- m[(1 - nlag):n]
m[(n + nlag + 1):n] <- NA
}
} else {
n <- nrow(m)
if (nlag > 0) {
m[(nlag + 1):n, ] <- m[1:(n - nlag), ]
m[1:nlag, ] <- NA
} else if (nlag < 0) {
m[1:(n + nlag), ] <- m[(1 - nlag):n, ]
m[(n + nlag + 1):n, ] <- NA
}
}
return(m)
}
# Заменяем NA на предыдущий элемент:
na.prev <- function(x) {
f <- !is.na(x)
f[1] <- TRUE
ind <- cummax((1:length(x)) * f)
return(x[ind])
}
10.2 Обучение торговой системы
# среднее
mean(x)
# медиана
median(x)
## квантиль (вероятность, что переменная будет равна значению или находиться левее)
# для диапазона
quantile(x, seq(0,1, by=0.2))
# фиксированный
quantile(x, 0.5)
11. Эффективность ТС
library(PerformanceAnalytics)
11.1 Коэффициенты
# на вход подавать SR ряд
## SharpeRatio {PerformanceAnalytics}
SharpeRatio.annualized(returns, scale = 1)
## SortinoRatio
SortinoRatio(returns)
# CalmarRatio
CalmarRatio(returns, scale = 1)
# SterlingRatio
SterlingRatio(returns, scale = 1)
11.2 Drawdowns
# на вход подавать SR
# таблица по n-худшим просадкам (по-умолчанию geometric = TRUE)
table.Drawdowns(R, top = 1000, digits = 4)
# поиск просадок
findDrawdowns(R, geometric = TRUE, ...)
13. Параллельные вычисления
## Задача оптимизации параллельных процессов:
# Integer Mapping
# Output Aggregation
# Load Balancing
13.0 foreach
# позволяет строить циклы аналогично *apply функциям + параллелизовать
## базовая конструкция
foreach(i = 1:10) %do% {
что то
}
# вложенные циклы
foreach(i = 1:3, .combine = 'c') %:%
foreach(j = 1:3, .combine = 'c') %do% {
i*j
}
# то же, но с условием
foreach(a = rnorm(25), .combine = 'c') %:%
when(a >= 0) %do% {
sqrt(a)
}
# итераторы
require(iterators)
foreach(i = icount(2000), .combine = '+') %do% {}
foreach(time = iter(jan2010$DEP_TIME[1:2000], chunksize = 500),
.combine = '+') %do% {}
jan.matrix <- matrix(jan2010$DEP_TIME[1:2000], ncol = 500)
ans <- foreach(times = iter(jan.matrix,by = 'row'), .combine = '+') %do% {
count.hours(times)
}
# жесткое разделение задач по ядрам
foreach(byAirline = isplit(jan2010$DEP_TIME, jan2010$UNIQUE_CARRIER),
.combine = cbind) %do% {}
# в случае параллельной работы (с пакетами doParallel или doFuture)
foreach(row.num = 1:nrow(my.matrix),
.export = c('x', 'y'),
.packages = c('z')) %dopar% {
return(Vectorize(fib)(my.matrix[row.num,]))
}
13.1 parallel
# Пакет основан на сборке multicore и snow пакетов. Включён в Rbase
### multicore
# базовые функции
mcparallel(task1()) # запланировать задачу
mccollect(list(task1, task2)) # выполнить и собрать данные
mclapply(data, task(), mc.cores = 4)
mclapply(1:4, time.it, mc.cores = 2, mc.preschedule = FALSE) # авто-балансировка нагрузки
# распределение задач по ядрам
nr <- nrow(jan2010)
ncores <- 4
chunks <- split(1:nr, rep(1:ncores, each = nr / ncores))
pvec() # mc функция для работы с векторами
### SNOW
# регистрация SOCK кластера
parallel_cluster <-
detectCores() %>%
makeCluster(., type = 'PSOCK')
# регистрация mcore кластера
parallel_cluster <-
detectCores() %>%
makeCluster(., type = 'FORK')
# остановка кластера
stopCluster(parallel_cluster)
parallel_cluster <- c()
# проверка остановки кластера
if(!is.null(parallel_cluster)) {
stopCluster(parallel_cluster)
parallel_cluster <- c()
}
# визуализация тайминга кластера
tm <- snow.time(clusterApply(cl, 1:6, do.kmeans.nclusters))
plot(tm)
# балансировка нагрузки
clusterApplyLB()
# распределение задач по кластерам
clusterSplit(cl, jan2010$DEP_TIME)
13.2 doParallel
# Обертка для foreach и parallel библиотек (добавляет %dopar% в foreach цепочки)
#
library('foreach')
library('doParallel')
# регистрация mc worker'ов
# по-умолчанию запускается doParallelMC для Unix и doParallelSNOW для Win
workers <- 4
registerDoParallel(cores = workers)
stopImplicitCluster()
# можно собрать PSOCK-кластер и зарегистрировать его
cl <- makePSOCKcluster(2)
registerDoParallel(cl)
stopCluster(cl)
# определить число и тип worker'ов
getDoParWorkers()
getDoParName()
# определить версию пакета
getDoParVersion()
# базовые конструкции
# Returns a list
foreach(i = 1:4) %dopar% {
j <- i + 1
sqrt(j)
}
# Returns a vector
foreach(i = 1:4, .combine = c) %dopar% {
j <- i + 1
sqrt(j)
}
# Returns a matrix
foreach(i = 1:4, .combine = rbind) %dopar% {
j <- i + 1
matrix(c(i, j, sqrt(j)), nrow = 1)
}
# Returns a data frame
foreach(i = 1:4, .combine = rbind) %dopar% {
j <- i + 1
data.frame(i = i, j = j, sqrt.j = sqrt(j))
}
# использование mcore опций
mcoptions <- list(preschedule = FALSE, set.seed = TRUE, .inorder = FALSE)
foreach(i = 1:3, .options.multicore = mcoptions) %dopar% {
sqrt(i)
}
# жесткое разнесение задач по процессам
registerDoParallel(4)
mat.log <- foreach(col = iter(stocks[,-c(1,2)], by = 'col'), .combine = 'cbind') %dopar% {
log.returns(col)
}
stopImplicitCluster()
13.3 doFuture
# по-сути то же, что и doParallel, но добавляет %dopar% в future-цепочки
# плюс, добавляет автоматический экспорт текущего env в кластеры (что облегчает работу)
library('doFuture')
# регистрация mc worker'ов
workers <- 4
registerDoFuture(cores = workers)
plan(multiprocess)
# регистрация кластеров
registerDoFuture()
cl <- makeCluster(4)
plan(cluster, workers = cl)
# регистрация snow-кластера
registerDoFuture()
cl <- makeCluster(4)
plan(cluster, workers = cl)
# регистрация MPI-кластера
registerDoFuture()
cl <- makeCluster(4, type = 'MPI')
plan(cluster, workers = cl)
14. Асинхронные вычисления
14.1 future
# позволяет писать выражения с отложенным выполнением
## базовая конструкция
# с помощью присвоения
plan(eager)
a %<-% slow_sum(x[1:50]
# или a %<-% slow_sum(x[1:50] %plan% eager
y <- a
# в функциональном стиле
plan(eager)
f <- future(slow_sum(x[1:50]))
y <- value(f)
# доступные сценарии
eager # sequentially
lazy # only if needed
multiprocess # in parallel
cluster # on a set of machines
# сценарии могут быть вложеными
plan(list(cluster, multiprocess))
a %<-% {
c %<-% slow_sum(x[1:25])
d %<-% slow_sum(x[26:50])
c + d
}
|
3b7b785578a92fcd2204d99e6470ea069ca7906e
|
44a3fad6338a63ac5417b1e52e47420c0e013f45
|
/R/Madogram.R
|
f1b8c3360ba158a085502673c9b110884872fa83
|
[] |
no_license
|
cran/ExtremalDep
|
4faac60ce0040262a98410edc6488ddf939ad9bd
|
18238416ddb6567610c4457dc332316272dbd16e
|
refs/heads/master
| 2023-03-06T18:03:59.304908
| 2023-02-26T14:40:02
| 2023-02-26T14:40:02
| 236,595,530
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,494
|
r
|
Madogram.R
|
#######################################################
### Authors: Giulia Marcon and Simone Padoan ###
### Emails: giulia.marcon@phd.unibocconi.it, ###
### simone.padoan@unibocconi.it ###
### Institution: Department of Decision Sciences, ###
### University Bocconi of Milan ###
### File name: Madogram.r ###
### Description: ###
### This file enables to compute the multivariate ###
### madogram as proposed in Marcon et al. (2014) ###
### Last change: 14/07/2015 ###
#######################################################
# The routine estimates the Pickands function using
# the multivariate madogram
# Multivariate case
madogram <- function (w, data, margin = c("emp", "est", "exp", "frechet", "gumbel"))
{
lmadogram <- function(w, data, margin) {
sumdata <- dim(data)
d <- sumdata[2]
if (!is.matrix(w))
w <- matrix(w, ncol = d)
sumw <- dim(w)
if (sumw[2] != d)
stop("`x' must be a vector/matrix with `d' elements/columns")
if( length(margin)>1 )
margin = "emp"
ans <- numeric(sumw[1])
if (margin == "emp") {
data_emp <- apply(data, 2, rank, na.last = "keep")
nasm <- apply(data_emp, 2, function(x) sum(!is.na(x)))
data_emp <- data_emp/rep(nasm , each = nrow(data_emp))
Fdata <- data_emp
}
if(margin=="est"){
par <- NULL
Fdata <- data
for(i in 1:d){
par[[i]] <- fGEV(data[,i])
Fdata[,i] <- pgev(data[,i], loc=par[[i]]$param[1],
scale=par[[i]]$param[2], shape=par[[i]]$param[3])
}
}
if (margin == "exp") {
Fdata <- apply(data, 2, pexp)
}
if (margin == "frechet") {
Fdata <- apply(data, 2, pfrechet)
}
if (margin == "gumbel") {
Fdata <- apply(data, 2, pgumbel)
}
powerw <- function(j, xx, w, d)
sapply(c(1:d), function(i, x, w) x[, i]^(1/w[, i]), xx, t(w[j, ]))
u <- lapply(c(1:sumw[1]), powerw, Fdata, w, d)
ma <- sapply(c(1:sumw[1]), function(i, u) apply(u[[i]], 1, max), u)
me <- sapply(c(1:sumw[1]), function(i, u) rowMeans(u[[i]]), u)
mm <- ma - me
ans <- colMeans(mm)
return(ans)
}
ans <- lmadogram(w, data, margin)
sumdata <- dim(data)
d <- sumdata[2]
if (!is.matrix(w))
w <- matrix(w, ncol = d)
W <- w/(1 + w)
cst <- rowMeans(W)
A <- ((cst + ans)/(1 - cst - ans))
return(A)
}
|
ab64d64f4737f13b104a528dae1eaaa68f064799
|
f0cfbb85ed0b901bc7b4a9b50e7425195a65f665
|
/R/topology.R
|
f54b281cc794b9ec13cb0e675491a235eef3bc0f
|
[] |
no_license
|
mlansford/crgnet
|
5dd5336ef2f81f0546b818ab8c2f28e365ace1f3
|
94e3504f4545c325600f992bf96d2c3fe3630ce8
|
refs/heads/master
| 2023-02-06T01:24:02.423666
| 2020-12-23T17:49:04
| 2020-12-23T17:49:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 347
|
r
|
topology.R
|
topology <- function(fits){
ngene <- nrow(fits[[1]]$parents)
nexp <- length(fits[[1]]$trajectories)
grph <- matrix(0, nrow=ngene, ncol=nexp)
for(k in 1:length(fits)){
gtmp <- t(fits[[k]]$parents + 1)
for(j in 1:ncol(gtmp)){
grph[j, gtmp[,j]] <- grph[j, gtmp[,j]] + 1
}
}
cgrph = grph / length(fits)
return(cgrph)
}
|
dedf5f53a5f0916ec0f56dfe4221e5f7172661ea
|
8e514435f9fa089581850f19bde56c575810b1fe
|
/results/r.R
|
2c11f4cd04e43e5ec148cd7886813c427bc43d16
|
[] |
no_license
|
kevin-winter/Dataset-Anonymization
|
1a2b624c1f5765466a90e81ca262a008c172b390
|
de8025443922fa9afcc8a866594802bdd09df11a
|
refs/heads/master
| 2021-01-23T14:12:28.430340
| 2017-11-04T14:52:06
| 2017-11-04T14:52:06
| 93,245,795
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 423
|
r
|
r.R
|
library(readr)
x <- read_csv("~/Documents/workspace/_DATA SCIENCE/Dataset Anonymization/results/out_all.csv")
x <- x[!(x$algorithm %in% c("bnb", "gnb")),]
x$dt = x$dt_accuracy_sampled / x$dt_accuracy_original
scores <- c("accuracy", "cramers_v", "pearson", "iva", "dt")
xs <- x[scores]
scatterplotMatrix(xs, diagonal = "boxplot", var.labels=c("acc","mcv","pcd","iva","cad"), smoother=F, groups=x$algorithm, legend.plot = F)
|
9f4183998847af25feb0275f89f4ffcf2e5c1427
|
c90dac176024e17fc2f984c2e5bf3390ee08235b
|
/Chapter 2/2.4.9.r
|
fb2d0858fd6150c0841b251eaf501f33ff33f100
|
[] |
no_license
|
HariharasudhanAS/ISLR-Exercises
|
4317851a5c6fafe0f63f4f0be3b6363ea5bb9593
|
0b6066ce81c19cefeb582703f3b3a7f3148d5af3
|
refs/heads/master
| 2020-03-31T00:26:17.718212
| 2018-12-11T11:05:53
| 2018-12-11T11:05:53
| 151,739,548
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 695
|
r
|
2.4.9.r
|
Auto=read.table("/home/orange/Desktop/Auto.data",header=T,na.strings="?")
fix(Auto)
Auto=na.omit(Auto)
dim(Auto)
Auto=Auto[,-9]
names(Auto)
attach(Auto)
range(Auto)
#fix(Auto)
apply(Auto,2,range)
apply(Auto,2,sd)
apply(Auto,2,mean)
Auto=Auto[-seq(10,85),]
apply(Auto,2,range)
apply(Auto,2,sd)
apply(Auto,2,mean)
dim(Auto)
Auto=read.table("/home/orange/Desktop/Auto.data",header=T,na.strings="?")
Auto=na.omit(Auto)
Auto=Auto[,-9]
dim(Auto)
pdf("/home/orange/Documents/ISLR with Applications in R/2.4.9.pdf")
pairs(Auto)
cor(Auto, method = "pearson", use = "complete.obs")
#install.packages("PerformanceAnalytics")
library("PerformanceAnalytics")
chart.Correlation(Auto, histogram=TRUE)
dev.off()
|
35cabc788d73dbc0b9b3782a1391a05e1eafbaf6
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/stabs/examples/fitfuns.Rd.R
|
dbef8d6ffb6f6914f25082ee48bd937dbd665e3c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,227
|
r
|
fitfuns.Rd.R
|
library(stabs)
### Name: Fitting Functions
### Title: Fit Functions for Stability Selection
### Aliases: fitfun fitfuns glmnet.lasso glmnet.lasso_maxCoef lars.lasso
### lars.stepwise
### Keywords: models nonlinear nonparametric
### ** Examples
if (require("TH.data")) {
## make data set available
data("bodyfat", package = "TH.data")
} else {
## simulate some data if TH.data not available.
## Note that results are non-sense with this data.
bodyfat <- matrix(rnorm(720), nrow = 72, ncol = 10)
}
if (require("lars")) {
## selected variables
lars.lasso(bodyfat[, -2], bodyfat[,2], q = 3)$selected
lars.stepwise(bodyfat[, -2], bodyfat[,2], q = 3)$selected
}
if (require("glmnet")) {
glmnet.lasso(bodyfat[, -2], bodyfat[,2], q = 3)$selected
## selection path
glmnet.lasso(bodyfat[, -2], bodyfat[,2], q = 3)$path
## Using the anticonservative glmnet.lasso (see args.fitfun):
stab.glmnet <- stabsel(x = bodyfat[, -2], y = bodyfat[,2],
fitfun = glmnet.lasso,
args.fitfun = list(type = "anticonservative"),
cutoff = 0.75, PFER = 1)
}
|
215ffaf6b27577bf9ade4f67daaf5f725bea9a47
|
8d0eb323a84a89fbb81045c492c8c7ed72c7cc84
|
/RNA-seq/step4-DEG-0-install.R
|
ed39a3a3482a1f8fcbc3f0b9efdde76d8f4dbbaa
|
[] |
no_license
|
reedliu/NGS-codes
|
e23157e1bf4a244b9e1ec0fc5b356fe562ea026d
|
d31dd42874789e9f0bf6979e47a7cd804f9bd5f4
|
refs/heads/master
| 2020-04-12T01:05:37.111875
| 2019-11-10T13:58:51
| 2019-11-10T13:58:51
| 162,221,667
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,334
|
r
|
step4-DEG-0-install.R
|
##############################
## install cran
##############################
if(length(getOption("CRAN"))==0) options(CRAN="https://mirrors.tuna.tsinghua.edu.cn/CRAN/")
cran_packages <- c('tidyverse',
'ggpubr',
'ggstatsplot')
for (pkg in cran_packages){
if (! require(pkg,character.only=T) ) {
install.packages(pkg,ask = F,update = F)
require(pkg,character.only=T)
}
}
##############################
## install bioconductor
##############################
# first prepare BioManager on CRAN
if(length(getOption("CRAN"))==0) options(CRAN="https://mirrors.tuna.tsinghua.edu.cn/CRAN/")
if(!require("BiocManager")) install.packages("BiocManager",update = F,ask = F)
if(length(getOption("BioC_mirror"))==0) options(BioC_mirror="https://mirrors.ustc.edu.cn/bioc/")
# use BiocManager to install
Biocductor_packages <- c('org.Hs.eg.db',
'hgu133a.db',
'CLL',
'hgu95av2.db',
'survminer',
'survival',
'hugene10sttranscriptcluster',
'limma')
for (pkg in Biocductor_packages){
if (! require(pkg,character.only=T) ) {
BiocManager::install(pkg,ask = F,update = F)
require(pkg,character.only=T)
}
}
|
302a2943d0adf00037aef0bb4953ed44bd39ce16
|
95e3fe90769a784bae00b7baa17be7521faea529
|
/cBioHeatMapClusters.R
|
c63a25b6e537d5cb09b2013558f3873a1168d42a
|
[] |
no_license
|
zhangyupisa/work
|
9207c75e0c930f1789f25ae0a22b72b188f002fe
|
f7ce63db8a140ceb9ec43f8d8f5ef1f699166038
|
refs/heads/master
| 2020-09-22T02:54:54.358498
| 2018-02-16T21:48:02
| 2018-02-16T21:48:02
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,899
|
r
|
cBioHeatMapClusters.R
|
library(tidyverse)
library(RColorBrewer)
library(reshape2)
library(ISLR)
library(gplots)
library("Biobase")
nan.na <- function (x) {
x[is.nan(x)] <- NA
return(x)
}
dist_cor <- function(x) {
as.dist(1 - cor(t(x), method = "pearson"))
}
clus_wd2 <- function(x) {
hclust(x, method = "ward.D2")
}
clus_wd <- function(x) {
hclust(x, method = "ward.D")
}
file.path <- ("~/Desktop/Clay/Mass Spec Results/WebData/brca_tcga_pub/brca_tcga_pub/")
patientdata <- data.frame(read.csv(paste(file.path, "data_clinical.csv", sep=""), header = TRUE, sep = ",", stringsAsFactors = FALSE))
expressiondata <- read.csv(paste(file.path, "data_expression_median.csv", sep=""), sep = ",", stringsAsFactors = FALSE, check.names = FALSE)
expressiondata[expressiondata == "null"] <- NA
colnames(patientdata) <- lapply(patientdata[5,], as.character)
patientdata <- patientdata[-(1:5),]
#patientdata <- patientdata[1:100,]
patientdata <- subset(patientdata, select = c(PATIENT_ID, Tumor, `AJCC Stage`))
expressiondata <- subset(expressiondata, select = -Entrez_Gene_Id)
expressiondata <- expressiondata %>% drop_na()
#x <- expressiondata[complete.cases(expressiondata), ]
expressionDF <- melt(expressiondata, id.vars = "Hugo_Symbol")
colnames(expressionDF) <- c("GENE", "PATIENT_ID", "EXPRESSION_LEVEL")
#Merging Data frames
patientDF <- merge(x = expressionDF, y = patientdata, by = "PATIENT_ID") # all = TRUE)
patientDF <- dcast(patientDF, PATIENT_ID+Tumor+`AJCC Stage`~ GENE, value.var = "EXPRESSION_LEVEL")
patientDF <- arrange(patientDF, `AJCC Stage`, Tumor)
patientDF <- patientDF %>%
unite(ID, PATIENT_ID, Tumor, 'AJCC Stage', sep = "_", remove = TRUE)
patientDF[,2:ncol(patientDF)] <- sapply(patientDF[,2:ncol(patientDF)],as.numeric)
sapply(patientDF, class) #to check classes
patientDF <- patientDF %>% remove_rownames %>% column_to_rownames(var="ID")
#patientDF <- t(patientDF)
redblackgreen <- colorRampPalette(c("green", "black", "red"))(n = 100)
#class_labels <- ifelse(all_var$mol.biol == "NEG", "grey80", "grey20")
#x <- patientDF %>% drop_na()
#row.has.na <- apply(expressiondata, 1, function(x){any(is.na(x))})
#row.has.nan <- apply(expressiondata, 1, function(x){any(is.nan(x))})
#row.has.inf <- apply(expressiondata, 1, function(x){any(is.infinite(x))})
#row.has.null <- apply(expressiondata, 1, function(x){any(is.null(x))})
#sum(row.has.na)
#final.filtered <- final[!row.has.na,]
test <- as.matrix(patientDF)
heatmap.2(test, distfun = dist_cor, hclustfun = clus_wd2,
# clustering
#distfun = dist,
#hclust = clus,
# scaling (genes are in rows)
scale = "row",
# color
col = redblackgreen,
# labels
labRow = "",
#ColSideColors = class_labels,
# tweaking
trace = "none",
density.info = "none")
set <- expressiondata[, ALL$mol.biol %in% c("BCR/ABL", "ALL1/AF4")]
heatmapdata = t(scale(t(expressiondata),center=TRUE,scale=FALSE))
X = t(scale(t(ncidat),center=TRUE,scale=FALSE))
heatmapdata <- t(expressiondata)
heatmapdata <- t(scale(heatmapdata, center = TRUE, scale = TRUE))
colnames(expressionDF) <- c("GENE", "PATIENT_ID", "EXPRESSION_LEVEL")
#Merging Data frames
patientDF <- merge(x = expressionDF, y = patientdata, by = "PATIENT_ID") # all = TRUE)
patientDF <- arrange(patientDF, PATIENT_ID, `AJCC Stage`, Tumor, GENE)
######Hierarchial Clustering Analysis
zzz <- patientDF
zzz <- zzz[,-(1:2)]
colnames(zzz) <- NULL
#ncidat1 = t(NCI60$data)
ncidat <- t(zzz)
#colnames(ncidat1) = NCI60$labs
#unique(colnames(ncidat1))
#dim(ncidat)
#colnames(ncidat) <- lapply(ncidat[1,], as.character)
colnames(ncidat) <- expressiondata$Hugo_Symbol
#ncidat <- ncidat[-1,]
#ncidat <- ncidat[,-1]
X = t(scale(t(ncidat),center=TRUE,scale=FALSE))
sv = t(X)
#is.na(X) <- do.call(cbind,lapply(X, is.infinite))
#length(which(!is.finite(as.matrix(X))))
w <- which(is.na(as.matrix(sv)))
#sv <- sv[which(!is.finite(as.matrix(sv)))] <- 0
sv[w] <- 0
sv = svd(sv)
U = sv$u
V = sv$v
D = sv$d
aa<- grep("grey",colors())
bb<- grep("green",colors())
cc<- grep("red",colors())
gcol2<- colors()[c(aa[1:30],bb[1:20],rep(cc,2))]
## use the genes that drive the first PC1. This is the first major patter in the data
k=1
ord1<- order(abs(V[,k]),decreasing=TRUE)
x1<- as.matrix(X[ord1[1:nrow(X)],])
heatmap(x1,col=gcol2)
expression$tumor_grade <- patientDF$GRADE[match(expression$variable, patientDF$PATIENT_ID)]
expression$stage <- patientDF$TUMOR_STAGE[match(expression$variable, patientDF$PATIENT_ID)]
#expression$value <- nan.na(ips.hits$value)
x <- subset(expression, select = -variable)
y <- dcast(x, Hugo_Symbol ~ stage, mean)
z <- dcast(x, Hugo_Symbol ~ stage, median)
#write.csv(y, "test.csv")
dy <- dist(as.matrix(y)) # find distance matrix
dz <- dist(as.matrix(z))
hcy <- hclust(dy) # apply hirarchical clustering
hcz <- hclust(dz)
plot(hcy)
file.path <- ("~/Desktop/")
gene <- data.frame(read.csv(paste(file.path, "TotalUniqueiPSGENEandPID.csv", sep=""), header = TRUE, sep = ",", stringsAsFactors = FALSE))
gene <- as.factor(gene$Gene)
split.names <- str_split_fixed(proteinDF$Composite.Element.REF,"[|]", 2) #special character, so needed [ ]
proteinDF <- data.frame(proteinDF, split.names)
proteinDF <- subset(proteinDF, select = -c(Composite.Element.REF, `X2`))
names(proteinDF)[names(proteinDF) == 'X1'] <- 'Gene'
ips.targets <- proteinDF[proteinDF$Gene %in% gene,]
ips.targets <- melt(ips.targets)
ips.targets <- subset(ips.targets, select = -variable)
ips.targets$value <- nan.na(ips.targets$value)
ips.targets <- ips.targets %>%
group_by(Gene) %>%
mutate(mean = mean(value, na.rm = TRUE))
ips.hits <- rnaDF[rnaDF$Hugo_Symbol %in% gene,]
ips.hits <- subset(ips.hits, select = -Entrez_Gene_Id)
ips.hits <- melt(ips.hits)
ips.hits <- subset(ips.hits, select = -variable)
ips.hits$value <- nan.na(ips.hits$value)
ggplot(ips.hits) +
geom_point(aes(x=Hugo_Symbol, y=value)) +
ggtitle("Median Z-Scores (mRNA) vs Gene by Tumor Grade") +
xlab("Median Z-score") +
ylab("Gene") +
theme(axis.text.x = element_text(angle = 90))
ips.hits <- ips.hits %>%
group_by(Hugo_Symbol) %>%
mutate(mean = mean(value, na.rm = TRUE))
######Hierarchial Clustering Analysis
install.packages("ISLR")
library(ISLR)
zzz <- expressiondata
zzz <- zzz[,-(1:2)]
colnames(zzz) <- NULL
#ncidat1 = t(NCI60$data)
ncidat <- t(zzz)
#colnames(ncidat1) = NCI60$labs
#unique(colnames(ncidat1))
#dim(ncidat)
#colnames(ncidat) <- lapply(ncidat[1,], as.character)
colnames(ncidat) <- expressiondata$Hugo_Symbol
#ncidat <- ncidat[-1,]
#ncidat <- ncidat[,-1]
X = t(scale(t(ncidat),center=TRUE,scale=FALSE))
sv = t(X)
#is.na(X) <- do.call(cbind,lapply(X, is.infinite))
#length(which(!is.finite(as.matrix(X))))
w <- which(is.na(as.matrix(sv)))
#sv <- sv[which(!is.finite(as.matrix(sv)))] <- 0
sv[w] <- 0
sv = svd(sv)
U = sv$u
V = sv$v
D = sv$d
aa<- grep("grey",colors())
bb<- grep("green",colors())
cc<- grep("red",colors())
gcol2<- colors()[c(aa[1:30],bb[1:20],rep(cc,2))]
## use the genes that drive the first PC1. This is the first major patter in the data
k=1
ord1<- order(abs(V[,k]),decreasing=TRUE)
x1<- as.matrix(X[ord1[1:nrow(X)],])
heatmap(x1,col=gcol2)
expression$tumor_grade <- patientDF$GRADE[match(expression$variable, patientDF$PATIENT_ID)]
expression$stage <- patientDF$TUMOR_STAGE[match(expression$variable, patientDF$PATIENT_ID)]
#expression$value <- nan.na(ips.hits$value)
x <- subset(expression, select = -variable)
y <- dcast(x, Hugo_Symbol ~ stage, mean)
z <- dcast(x, Hugo_Symbol ~ stage, median)
#write.csv(y, "test.csv")
dy <- dist(as.matrix(y)) # find distance matrix
dz <- dist(as.matrix(z))
hcy <- hclust(dy) # apply hirarchical clustering
hcz <- hclust(dz)
plot(hcy)
install.packages("factoextra")
install.packages("cluster")
install.packages("magrittr")
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.