blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ceead9354294119f94899e4f300519c075a92177 | bebe94f1d0b3a30f12ad309a6629737ae706fb19 | /R/gg.R | 6451bf08f735c38d34b2c3cdfe882a9312e27875 | [] | no_license | BHGC/bhgc.wx | d19956db738f841cda62ef3e176c7a3c54ab1709 | a4b28cfabf4ec2107799dda868601c7d3cdbdfb3 | refs/heads/master | 2022-03-24T21:49:31.627577 | 2020-10-11T04:20:45 | 2020-10-11T04:20:45 | 156,008,704 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,055 | r | gg.R | ggplot_datetime_labels <- function(t, tz = timezone(), flavor = getOption("flavor", "wide")) {
flavor <- match.arg(flavor, choices = c("wide", "narrow"))
## message(sprintf("ggplot_datetime_labels(): tz=%s", tz))
hours <- strftime(t, format = "%H", tz = tz)
uhours <- sort(unique(hours))
near_noon <- uhours[which.min(abs(as.integer(uhours) - 12L))]
if (flavor == "wide") {
times <- strftime(t, format = "%H:%M", tz = tz)
dates <- strftime(t, format = "%a %b %d", tz = tz)
dates[hours != near_noon] <- NA_character_
last <- rev(which(!is.na(dates)))[1]
dates[last] <- strftime(t[last], format = "%a %b %d (%Z)", tz = tz)
labels <- ifelse(is.na(dates), times, paste0(times, "\n", dates))
} else if (flavor == "narrow") {
times <- strftime(t, format = "%H", tz = tz)
days <- strftime(t, format = "%a", tz = tz)
days[hours != near_noon] <- NA_character_
dates <- strftime(t, format = "%b %d", tz = tz)
dates[hours != near_noon] <- NA_character_
last <- rev(which(!is.na(dates)))[1]
dates[last] <- strftime(t[last], format = "%a %b %d (%Z)", tz = tz)
labels <- ifelse(is.na(dates), times, paste0(times, "\n", days))
}
labels
}
#' @importFrom lubridate as_datetime ceiling_date floor_date
#' @import ggplot2
ggplot_noaa_wind_direction <- function(values, x_limits = date_range(values), days = NULL, windows_size = Inf) {
## To please R CMD check
start <- wind_direction <- NULL
if (is.null(windows_size)) windows_size <- 1024
## https://clrs.cc/
color_map <- c(black = "#111111", gray = "#AAAAAA", green = "#2ECC40", yellow = "#FFDC00", red = "#FF4136")
bins <- cut(values$wind_direction, breaks = c(-Inf, 135, 180-1, 270, 300, Inf))
cols <- color_map[c("red", "yellow", "green", "yellow", "red")[bins]]
if (!is.null(days)) {
stopifnot(all(is.finite(days)), all(days > 0))
if (length(days) == 1L) {
days <- seq(from = Sys.Date(), by = 1L, length.out = as.integer(days))
} else {
stopifnot(length(days) == 2L)
}
tz <- timezone()
x_limits[1] <- floor_date(as_datetime(days[1] + 1L, tz = tz), unit = "days")
x_limits[2] <- ceiling_date(as_datetime(days[2] + 1L, tz = tz), unit = "days")
}
x_breaks <- seq(from = x_limits[1], to = x_limits[2], by = "12 hours")
ndays <- length(x_breaks) / 2
flavor <- if (8/ndays * windows_size[1] < 1000) "narrow" else "wide"
options(flavor = flavor)
gg <- ggplot(values, aes(start, wind_direction)) + geom_point(color = cols, size = 2.0)
wind_dirs <- c(N = 0, E = 90, S = 180, W = 270, N = 360)
gg <- gg + scale_y_continuous(limits = c(0, 360), breaks = wind_dirs, labels = names(wind_dirs), minor_breaks = seq(0, 360, by = 30), sec.axis = sec_axis(~., breaks = as.integer(wind_dirs)))
gg <- gg + labs(y = "Wind direction")
gg <- gg + scale_x_datetime(limits = x_limits, breaks = x_breaks, labels = ggplot_datetime_labels, position = "top")
gg <- gg + theme(axis.title.x = element_blank())
# rect <- data.frame(xmin = -Inf, xmax = +Inf, ymin = 180, ymax = 270)
# gg <- gg + geom_rect(data=rect, aes(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax), color = "green", alpha = 0.3, inherit.aes = FALSE)
gg
}
#' @importFrom lubridate as_datetime ceiling_date floor_date
#' @import ggplot2
ggplot_noaa_surface_wind <- function(values, x_limits = date_range(values), days = NULL, windows_size = Inf) {
## To please R CMD check
start <- surface_wind <- gust <- NULL
if (is.null(windows_size)) windows_size <- 1024
if (!is.null(days)) {
stopifnot(all(is.finite(days)), all(days > 0))
if (length(days) == 1L) {
days <- seq(from = Sys.Date(), by = 1L, length.out = as.integer(days))
} else {
stopifnot(length(days) == 2L)
}
tz <- timezone()
x_limits[1] <- floor_date(as_datetime(days[1] + 1L, tz = tz), unit = "days")
x_limits[2] <- ceiling_date(as_datetime(days[2] + 1L, tz = tz), unit = "days ")
}
x_breaks <- seq(from = x_limits[1], to = x_limits[2], by = "12 hours")
y_limits <- c(0, 25)
ndays <- length(x_breaks) / 2
flavor <- if (8/ndays * windows_size[1] < 1000) "narrow" else "wide"
options(flavor = flavor)
gg <- ggplot(values)
gg <- gg + scale_y_continuous(limits = y_limits, minor_breaks = seq(0, 20, by = 1), sec.axis = sec_axis(~ 0.44704 * .))
gg <- gg + labs(y = "Wind speed (mph <-> m/s)")
gg <- gg + scale_x_datetime(limits = x_limits, breaks = x_breaks, labels = ggplot_datetime_labels) ## , date_minor_breaks = "6 hours"
rain <- values$precipitation_potential/100
rain[rain == 0] <- NA_real_
## gg <- gg + geom_point(aes(start, diff(y_limits)*rain, color = rain), size = 2.0) + scale_colour_gradient(low = "white", high = "blue")
gg <- gg + geom_bar(stat = "identity", aes(start, diff(y_limits)*rain), fill = "blue", alpha = 0.25, size = 2.0)
gg <- gg + geom_point(aes(start, surface_wind), size = 2.0)
gg <- gg + geom_point(aes(start, gust), size = 2.0, shape = 4L, color = "red")
gg <- gg + theme(axis.title.x = element_blank())
gg
}
|
e4cd10422c0bd4ad19a0e1b08c25c4cfd9b304db | fa2218ab1b6ba64096c29fd1c565873de8ddf8ab | /man/se_lines.Rd | ddb3bd07c139ffa6de39a71865980c1a60ba5098 | [] | no_license | ivyleavedtoadflax/teaplots | 7cce00c16cfe1a518457ce40b14ceb6bdb3c0f7f | 06031aa671ddc47b4bb2535c30ed73525be83fa5 | refs/heads/master | 2020-06-20T00:07:23.328450 | 2016-11-27T15:10:34 | 2016-11-27T15:13:11 | 74,895,312 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 500 | rd | se_lines.Rd | % Generated by roxygen2 (4.1.0): do not edit by hand
% Please edit documentation in R/se_lines.R
\name{se_lines}
\alias{se_lines}
\title{se_lines}
\usage{
se_lines(x, SE, y, bar_width = 0.1)
}
\arguments{
\item{x}{A vector}
}
\value{
numeric vector of length 1 giving number of zero values in vector \code{x}.
}
\description{
\code{se_lines} Add s confidence intervals to scatterplot
}
\details{
This functions counts the number of zeros in a vector
}
\examples{
a <- cbind(1:10,0)
count_zeros(x)
}
|
8e388602b62d88762bf3daa774c1a38c8c764a27 | 12c20cd032553e7fd8e23238ee28e8d3e8d32be1 | /misc/erythropoiesis/marjorieDemos/tms.R | c1ec3ef784382f1035bbd39f56923620b62276af | [
"MIT"
] | permissive | PriceLab/TrenaMultiScore | 5b33bdc4c8a00870c8a76523ed4542f76e338717 | c6d91402d83534fee27d157a933a0c2687a7b3d3 | refs/heads/master | 2022-09-03T03:52:27.013503 | 2022-08-19T21:00:22 | 2022-08-19T21:00:22 | 233,648,550 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,368 | r | tms.R | library(TrenaMultiScore)
library(TrenaProjectErythropoiesis)
library(RUnit)
library(factoextra)
library(RSQLite)
library(ghdb)
library(httr)
#------------------------------------------------------------------------------------------------------------------------
if(!exists("ghdb"))
ghdb <- GeneHancerDB()
if(!exists("tbl.atac"))
tbl.atac <- get(load("~/github/TrenaProjectErythropoiesis/misc/multiScore/brandAtacSeqCuration/tbl.atac.fp.RData"))
#------------------------------------------------------------------------------------------------------------------------
if(!exists("tmse")) {
message(sprintf("--- creating instance of TrenaMultiScore"))
tpe <- TrenaProjectErythropoiesis()
tmse <- TrenaMultiScore(tpe, "TAL1");
}
# from marjorie (email 29 aug 2020) use HSC/MPP for Day0 and CMP for Day2
if(!exists("tbl.atac.corces")){
tbl.corces.full <- get(load("~/github/TrenaProjectErythropoiesis/prep/import/buenrostro/GSE74912-atacSeq.RData"))
hsc.cols <- grep("\\.HSC", colnames(tbl.corces.full), ignore.case=TRUE)
mpp.cols <- grep("MPP", colnames(tbl.corces.full), ignore.case=TRUE)
cmp.cols <- grep("CMP", colnames(tbl.corces.full), ignore.case=TRUE)
tbl.corces <- tbl.corces.full[, c(1:3, hsc.cols, mpp.cols, cmp.cols)]
dim(tbl.corces)
colnames(tbl.corces) <- c("chrom", "start", "end", "day0.1", "day0.2", "day0.3", "day0.4", "day2.1", "day2.2")
tbl.atac.corces <- tbl.corces
tbl.atac.corces.day0.quiet <- subset(tbl.atac.corces, day0.1 == 0 & day0.2 == 0 & day0.3==0 & day0.4 == 0 & day2.1 > 0)
tbl.atac.corces.day0.quiet <- subset(tbl.atac.corces, day0.1 == 0 & day0.2 == 0 & (day2.1 > 0 | day2.2 > 0))
tbl.atac.corces.day0.quiet <- subset(tbl.atac.corces, (day0.1 + day0.2 + day0.3 + day0.4) < 3 & (day2.1 + day2.2 > 6))
}
#------------------------------------------------------------------------------------------------------------------------
# genes with zero-to-high expression between day 0 and day 2, and open chromatin at day 2
findCandidates <- function()
{
expected <- c("brandLabDifferentiationTimeCourse-16173x28", "brandLabDifferentiationTimeCourse-27171x28")
checkTrue(all(expected %in% getExpressionMatrixNames(tpe)))
mtx <- getExpressionMatrix(tpe, expected[1])
cutoff.1 <- 0.5
cutoff.2 <- 1
goi.0 <- names(which(sapply(rownames(mtx), function(geneName) all(mtx[geneName, 1:2] < cutoff.1))))
print(length(goi.0))
goi.2 <- names(which(sapply(rownames(mtx), function(geneName) all(mtx[geneName, 3:4] > cutoff.2))))
print(length(goi.2))
length(intersect(goi.0, goi.2))
goi <-intersect(goi.0, goi.2)
length(goi)
tissues <- "Common myeloid progenitor CD34+"
tissues <- "all"
tbls.gh <- lapply(goi, function(gene) retrieveEnhancersFromDatabase(ghdb, gene, tissues=tissues))
tbls.ghp <- lapply(goi, function(gene){
tbl.gh <- retrieveEnhancersFromDatabase(ghdb, gene, tissues=tissues)
if(nrow(tbl.gh) == 0)
return(data.frame())
subset(tbl.gh, combinedscore > 500)
})
length(tbls.ghp)
names(tbls.ghp) <- goi
tbls.ghp <- tbls.ghp[names(which(lapply(tbls.ghp, nrow) > 0))]
length(tbls.ghp)
# displayTrack(igv, DataFrameQuantitativeTrack("GH.CCL1", tbls.gh[["CCL1"]][, c("chrom", "start", "end", "combinedscore")], color="random", autoscale=FALSE, min=0,max=50))
ghAndATAC <- function(tbl.gh){
tbl.ov <- as.data.frame(findOverlaps(GRanges(tbl.gh[1, c("chrom", "start", "end")]),
GRanges(tbl.atac.corces.day0.quiet[, c("chrom", "start", "end")]),
type="any"))
if(nrow(tbl.ov) > 0)
return(tbl.atac.corces.day0.quiet[tbl.ov$subjectHits,])
return(data.frame())
} # ghAndATAC
tbls.ghp.atacPattern <- lapply(tbls.ghp, ghAndATAC)
tbls.ghp.atacPattern <- tbls.ghp.atacPattern[names(which(lapply(tbls.ghp.atacPattern, nrow) > 0))]
length(tbls.ghp.atacPattern)
tbl.ccl1 <- ghAndATAC(tbls.gh[["CCL1"]])
displayTrack(igv, DataFrameQuantitativeTrack("CCL1-atac-HSC.r1", tbl.ccl1[, c(1:3, 4)], color="random", autoscale=FALSE, min=0, max=20))
displayTrack(igv, DataFrameQuantitativeTrack("CCL1-atac-HSC.r2", tbl.ccl1[, c(1:3, 5)], color="random", autoscale=FALSE, min=0, max=20))
displayTrack(igv, DataFrameQuantitativeTrack("CCL1-atac-MPP.r1", tbl.ccl1[, c(1:3, 6)], color="random", autoscale=FALSE, min=0, max=20))
displayTrack(igv, DataFrameQuantitativeTrack("CCL1-atac-MPP.r2", tbl.ccl1[, c(1:3, 7)], color="random", autoscale=FALSE, min=0, max=20))
displayTrack(igv, DataFrameQuantitativeTrack("CCL1-atac-CMP.r1", tbl.ccl1[, c(1:3, 8)], color="random", autoscale=FALSE, min=0, max=20))
displayTrack(igv, DataFrameQuantitativeTrack("CCL1-atac-CMP.r2", tbl.ccl1[, c(1:3, 9)], color="random", autoscale=FALSE, min=0, max=20))
genes.filtered <- names (tbls.ghp.atacPattern)
uri <- sprintf("http://localhost:8000/goEnrich")
body.jsonString <- sprintf('%s', toJSON(list(geneSymbols=genes.filtered)))
r <- POST(uri, body=body.jsonString)
#sprintf('{"geneSymbols": "%s"}', goi.string))
tbl <- fromJSON(content(r)[[1]])
dim(tbl)
wdth(1000)
head(tbl, n=10)
} # findCandidates
#------------------------------------------------------------------------------------------------------------------------
build.model <- function(targetGene, fimoThresholdAsNegativeExponent=5, tbl.openChromatin=data.frame())
{
printf("=========== building model for %s, fimoThreshold: %f", targetGene,
fimoThresholdAsNegativeExponent)
results.subDirectory <- sprintf("fimo%d", fimoThresholdAsNegativeExponent)
filename <- sprintf("%s.RData", targetGene)
if(!file.exists(results.subDirectory))
dir.create(results.subDirectory)
tms.tg <- TrenaMultiScore(tpe, targetGene);
printf("--- getGeneHancerRegion")
gh.span <- as.list(getGeneHancerRegion(tms.tg))
printf("--- getOpenChromatin")
if(nrow(tbl.openChromatin) == 0)
findOpenChromatin(tms.tg)
else
tms.tg@state$openChromatin <- tbl.openChromatin
if(nrow(getOpenChromatin(tms.tg)) == 0){
message(sprintf("no open chromatin for %s, bailing out, saving empty model", targetGene))
tbl <- data.frame()
save(tbl, file=file.path(results.subDirectory, filename))
return(data.frame())
}
fimoThreshold <- 10^(-fimoThresholdAsNegativeExponent)
printf("--- findFimoTFBS")
jaspar2018.human <- query(MotifDb, c("sapiens", "jaspar2018"))
hocomocov11.core <- query(MotifDb, "hocomocov11-core")
motifs <- c(jaspar2018.human,hocomocov11.core)
printf("--- using %d motifs only for fimo", length(motifs))
findFimoTFBS(tms.tg, motifs=motifs, fimo.threshold=fimoThreshold)
printf("--- scoreMotifHitsForConservation")
scoreMotifHitsForConservation(tms.tg)
printf("--- scoreMotifHitsForGeneHancer")
scoreMotifHitsForGeneHancer(tms.tg)
addDistanceToTSS(tms.tg)
mtx <- getExpressionMatrix(tpe, "brandLabDifferentiationTimeCourse-27171x28")
printf("--- addGeneExpressionCorrelations")
addGeneExpressionCorrelations(tms.tg, mtx)
printf("--- addGenicAnnotations")
addGenicAnnotations(tms.tg)
printf("--- addChip")
addChIP(tms.tg)
tbl <- getMultiScoreTable(tms.tg)
tbl$cor[which(is.na(tbl$cor))] <- 0
tbl$motifScore <- round(-log10(tbl$p.value), 2)
tbl$targetGene <- targetGene
printf("--- model for %s has %d rows", targetGene, nrow(tbl))
if(nrow(tbl) > 0){
filePath <- file.path(results.subDirectory, filename)
save(tbl, file=filePath)
message(sprintf("saving %d rows model for %s to %s", nrow(tbl), targetGene, filePath))
}
invisible(tbl)
} # build.model
#------------------------------------------------------------------------------------------------------------------------
# goi <- function()
# {
# tbl <- read.table("G2vsG3GenesUP.txt", sep="\t", as.is=TRUE, header=TRUE, nrow=100)
# dim(tbl)
# genes <- tbl$GeneName
# additional.genes <- read.table(file="additionalGenes.txt", stringsAsFactors=FALSE)$V1
# length(genes)
# length(additional.genes)
# all.goi <- sort(unique(c(genes, additional.genes)))
# length(all.goi)
# library(org.Hs.eg.db)
# tbl.ref <- select(org.Hs.eg.db, key=all.goi, columns=c("SYMBOL", "ENTREZID"), keytype="SYMBOL")
# successes <- tbl.ref[which(!is.na(tbl.ref$ENTREZID)),]$SYMBOL
# failures <- tbl.ref[which(is.na(tbl.ref$ENTREZID)),]$SYMBOL
# tbl.ref2 <- select(org.Hs.eg.db, key=failures, columns=c("SYMBOL", "ALIAS", "ENTREZID"), keytype="ALIAS")
# successes.round2 <- tbl.ref2[which(!is.na(tbl.ref2$SYMBOL)), "SYMBOL"]
# goi.final <- sort(unique(c(successes, successes.round2)))
# length(goi.final) # 112
#
# return(goi.final)
#
#} # goi
#------------------------------------------------------------------------------------------------------------------------
buildAll <- function(goi, fimoThresholdAsNegativeExponent)
{
#if(!exists("haney.erythropoiesis.tfs"))
# source("~/github/regulatoryGenomePaper/demos/common.R")
#tfs.oi <- c("GATA1", "GATA2", "FLI1", "SPI1")
#tfs.oi <- goi()[24:112]
#tfs.oi <- goi()
printf("running tms on %d genes", length(goi))
f <- function(targetGene){
tryCatch({
fimo.threshold <- fimoThresholdAsNegativeExponent
results.file.path <- file.path(sprintf("./fimo%d", fimo.threshold),
sprintf("%s.RData", targetGene))
if(file.exists(results.file.path))
printf("results file exists, skipping: %s", results.file.path)
else
build.model(targetGene, fimo.threshold)
},
error = function(e){
print("tms error")
system(sprintf("touch %s", results.file.path))
print(targetGene)
print(e)
})
} # f
tbls.all <- lapply(goi, f)
names(tbls.all) <- goi
invisible(tbls.all)
} # buildAll
#------------------------------------------------------------------------------------------------------------------------
collectResults <- function(directory, outfile)
{
full.path.to.directory <- sprintf("~/github/TrenaMultiScore/misc/erythropoiesis/marjorieDemos/%s", directory)
rdata.files <- list.files(path=full.path.to.directory, pattern=".RData")
printf("RData file count: %d", length(rdata.files))
tbls <- lapply(head(rdata.files, n=-1), function(file){
full.path <- file.path(directory, file)
if(file.size(full.path) > 1000){
tbl <- get(load(full.path))
printf("%30s: %d rows, %d cols", file, nrow(tbl), ncol(tbl))
return(tbl)
}})
tbl.all <- do.call(rbind, tbls)
printf("about to save combined table (%d rows) to %s", nrow(tbl.all), outfile)
if(!is.null(outfile))
save(tbl.all, file=outfile)
invisible(tbl.all)
} # collectResults
#------------------------------------------------------------------------------------------------------------------------
to.sqlite <- function(tbl, sqlite.filename)
{
db <- dbConnect(SQLite(), sqlite.filename, synchronous=NULL)
system.time(dbWriteTable(db, name="tms", value=tbl, overwrite=TRUE)) # less than 30 seconds
dbDisconnect(db)
} # to.sqlite
#------------------------------------------------------------------------------------------------------------------------
|
453ae57e443e7f7b2522f9f8b6b2e0e182b29488 | 52efacf9230d27099208b340b52b8f11103f405d | /r_fun_utils/monty_hall_simulator.R | c3857f0ee0e76fd6cba3e0495308fed438046515 | [] | no_license | leviabowles/lb_r_utils | 8a70b062996db7e466a19703dc7c38ee877c9d5d | 13b7bcb912ca273a6cd532aa27585276328b02b5 | refs/heads/master | 2023-02-05T03:48:48.795476 | 2020-12-31T02:01:08 | 2020-12-31T02:01:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 865 | r | monty_hall_simulator.R | ## Monty Hall simulation game for variable number of games played, whether switch strategy is in play and variable numbers of doors
simulations = 10000
doors = 5
switcher = 1
monty_hall = function(simulations,doors,switcher){
sim_matrix = matrix(nrow = simulations, ncol = doors, runif(simulations*doors))
winners = apply(sim_matrix, 1, which.max)
door = c(1:doors)
chosen = sample(door,simulations, replace = TRUE)
catch = c()
for(i in c(1:simulations)){
eliminate = sample(setdiff(door,c(chosen[i],winners[i])),1)
if(switcher == 0){chosed = chosen[i]}else{chosed = sample(setdiff(door,c(eliminate,chosen[i])),1)}
print(length(chosed))
print(chosed)
result = if(chosed == winners[i]){1}else{0}
catch = c(result, catch)
}
summary(catch)
}
monty_hall(simulations, doors, switcher)
|
2888f3d1e8e34e76248d12d6bda5fcfc30dc6351 | 42fb36a62430a122ad6649095670043680d84522 | /man/CleanJSTOR_df.Rd | 120d5ad7fa9a0e55878f0cfa578541ac97ce15af | [] | no_license | arthurbnetto/tidyJSTOR | 662e4254c6bc6cbfbc76e4e4e6578449b88cd2ca | 4a613591a7f11f4e70940472ee3ab5384c7c2ee5 | refs/heads/master | 2020-06-06T01:54:24.148254 | 2020-02-12T18:38:16 | 2020-02-12T18:38:16 | 192,606,371 | 8 | 0 | null | 2020-01-29T19:50:24 | 2019-06-18T20:14:02 | R | UTF-8 | R | false | true | 625 | rd | CleanJSTOR_df.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hello.R
\name{CleanJSTOR_df}
\alias{CleanJSTOR_df}
\title{CleanJSTOR_df}
\usage{
CleanJSTOR_df(df, Titles = FALSE)
}
\arguments{
\item{df}{A dataframe in the JSTOR_df format (see: JSTOR_df())}
\item{Titles}{whether to clean titles (instead of abstracts). Default = FALSE.}
}
\value{
A JSTOR_df dataframe containing informations about: Journal, Publisher, Title, Year, Abstract and Language
}
\description{
This cleans a JSTOR_df, excluding repeated name papers, non-english papers, NAs (in titles or abstracts)
}
\examples{
CleanJSTOR_df(df)
}
|
672621e4ba427a75b89a97d9934433f045fec9a0 | 205a269537cc4bfbc526da048db8d185e1e678c9 | /R/ipar.R | 5fc7f9c196fd6c7b89e95a6f95df938c4cf6707e | [] | no_license | davidgohel/ggiraph | bca2fc5c61ef7cbeecc0a0d067f7479822117ab0 | b3ce2998b57d8c8b63055499925fd9fe99f4d1a7 | refs/heads/master | 2023-09-03T00:06:10.817100 | 2023-08-30T14:35:40 | 2023-08-30T14:35:40 | 40,061,589 | 735 | 86 | null | 2023-09-03T09:50:54 | 2015-08-01T22:17:06 | R | UTF-8 | R | false | false | 14,470 | r | ipar.R | #' @title Interactive parameters
#'
#' @description
#' Throughout ggiraph there are functions that add interactivity to ggplot plot elements.
#' The user can control the various aspects of interactivity by supplying
#' a special set of parameters to these functions.
#'
#' @param tooltip Tooltip text to associate with one or more elements.
#' If this is supplied a tooltip is shown when the element is hovered.
#' Plain text or html is supported.
#'
#' To use html markup it is advised to use [htmltools::HTML()] function
#' in order to mark the text as html markup.
#' If the text is not marked as html and no opening/closing tags were detected,
#' then any existing newline characters (`\r\n`, `\r` and `\n`)
#' are replaced with the `<br/>` tag.
#'
#' @param onclick Javascript code to associate with one or more elements.
#' This code will be executed when the element is clicked.
#'
#' @param hover_css Individual css style associate with one or more elements.
#' This css style is applied when the element is hovered and overrides the default style,
#' set via [opts_hover()], [opts_hover_key()] or [opts_hover_theme()].
#' It can also be constructed with [girafe_css()],
#' to give more control over the css for different element types (see [opts_hover()] note).
#'
#' @param selected_css Individual css style associate with one or more elements.
#' This css style is applied when the element is selected and overrides the default style,
#' set via [opts_selection()], [opts_selection_key()] or [opts_selection_theme()].
#' It can also be constructed with [girafe_css()],
#' to give more control over the css for different element types (see [opts_selection()] note).
#'
#' @param data_id Identifier to associate with one or more elements.
#' This is mandatory parameter if hover and selection interactivity is desired.
#' Identifiers are available as reactive input values in Shiny applications.
#'
#' @param tooltip_fill Color to use for tooltip background when [opts_tooltip()] `use_fill` is TRUE.
#' Useful for setting the tooltip background color in [geom_text_interactive()] or
#' [geom_label_interactive()], when the geom text color may be the same as the tooltip text color.
#'
#' @param hover_nearest Set to TRUE to apply the hover effect on the nearest element
#' while moving the mouse. In this case it is mandatory to also set the `data_id` parameter
#'
#' @section Details for interactive geom functions:
#' The interactive parameters can be supplied with two ways:
#' \itemize{
#' \item As aesthetics with the mapping argument (via [aes()]).
#' In this way they can be mapped to data columns and apply to a set of geometries.
#'
#' \item As plain arguments into the geom_*_interactive function.
#' In this way they can be set to a scalar value.
#' }
#'
#' @section Details for annotate_*_interactive functions:
#' The interactive parameters can be supplied as arguments in the relevant function
#' and they can be scalar values or vectors depending on params on base function.
#'
#' @section Details for interactive scale and interactive guide functions:
#' For scales, the interactive parameters can be supplied as arguments in the relevant function
#' and they can be scalar values or vectors, depending on the number of breaks (levels) and
#' the type of the guide used.
#' The guides do not accept any interactive parameter directly, they receive them from the scales.
#'
#' \itemize{
#' \item When guide of type `legend` or `bins` is used, it will be converted to a
#' [guide_legend_interactive()] or [guide_bins_interactive()] respectively,
#' if it's not already.
#'
#' The length of each scale interactive parameter vector should match the length of the breaks.
#' It can also be a named vector, where each name should correspond to the same break name.
#' It can also be defined as function that takes the breaks as input and returns a named or
#' unnamed vector of values as output.
#'
#' The interactive parameters here, give interactivity only to the key elements of the guide.
#'
#' \item When guide of type `colourbar` or `coloursteps` is used, it will be converted to a
#' [guide_colourbar_interactive()] or [guide_coloursteps_interactive()]
#' respectively, if it's not already.
#'
#' The scale interactive parameters in this case should be scalar values and give interactivity
#' to the colorbar only.
#' }
#'
#' To provide interactivity to the rest of the elements of a guide, (title, labels, background, etc),
#' the relevant theme elements or relevant guide arguments can be used.
#' The `guide` arguments `title.theme` and `label.theme` can be defined as
#' `element_text_interactive` (in fact, they will be converted to that if they are not
#' already), either directly or via the theme.
#' See the element_*_interactive section for more details.
#'
#' @section Details for element_*_interactive functions:
#' The interactive parameters can be supplied as arguments in the relevant function
#' and they should be scalar values.
#'
#' For theme text elements ([element_text_interactive()]), the interactive parameters
#' can also be supplied while setting a label value, via the [labs()] family
#' of functions or when setting a scale/guide title or key label.
#' Instead of setting a character value for the element, function
#' [label_interactive()] can be used to define interactive parameters
#' to go along with the label.
#' When the parameters are supplied that way, they override the default values
#' that are set at the theme via [element_text_interactive()] or via the `guide`'s
#' theme parameters.
#'
#' @section Details for interactive_*_grob functions:
#' The interactive parameters can be supplied as arguments in the relevant function
#' and they can be scalar values or vectors depending on params on base function.
#'
#' @section Custom interactive parameters:
#' The argument `extra_interactive_params` can be passed to any of the *_interactive functions
#' (geoms, grobs, scales, labeller, labels and theme elements),
#' It should be a character vector of additional names to be treated as interactive parameters
#' when evaluating the aesthetics.
#' The values will eventually end up as attributes in the SVG elements of the output.
#'
#' Intended only for expert use.
#'
#' @seealso [girafe_options()], [girafe()]
#' @rdname interactive_parameters
#' @name interactive_parameters
NULL
# A list of interactive parameters.
# Important: data_id should always be first,
# so that it's the first attribute that is set in the svg element.
IPAR_DEFAULTS <- list(
data_id = NULL,
tooltip = NULL,
onclick = NULL,
hover_css = NULL,
selected_css = NULL,
tooltip_fill = NULL,
hover_nearest = NULL
)
IPAR_NAMES <- names(IPAR_DEFAULTS)
#' Checks if passed object contains interactive parameters.
#' @noRd
has_interactive_attrs <- function(x, ipar = IPAR_NAMES) {
length(intersect(names(x), ipar)) > 0
}
#' Returns the names of the interactive parameters that may exist in an object.
#' @noRd
get_interactive_attr_names <- function(x, ipar = IPAR_NAMES) {
intersect(names(x), ipar)
}
#' Returns the active names of the interactive parameters,
#' combining the default names with any extra names.
#' @noRd
get_default_ipar <- function(extra_names = NULL) {
if (is.character(extra_names) && length(extra_names) > 0) {
extra_names <- Filter(x = extra_names, function(x) {
!is.na(x) && nzchar(trimws(x))
})
}
unique(c(IPAR_NAMES, extra_names))
}
#' Returns the interactive parameters that may exist in an object
#' or in the parent environment by default,
#' or inside an "interactive" attribute of the object.
#' @noRd
#' @importFrom rlang env_get_list caller_env
get_interactive_attrs <- function(x = caller_env(), ipar = IPAR_NAMES) {
if (is.environment(x)) {
env_get_list(env = x, ipar, NULL)
} else {
if (!is.null(attr(x, "interactive"))) {
x <- attr(x, "interactive")
}
x[get_interactive_attr_names(x, ipar = ipar)]
}
}
#' Removes the interactive parameters from an object.
#' @noRd
remove_interactive_attrs <- function(x, ipar = IPAR_NAMES) {
for (a in ipar) {
x[[a]] <- NULL
}
x
}
#' Copies interactive parameters from one object to the other.
#' and returns the result
#' @noRd
copy_interactive_attrs <- function(src,
dest,
...,
useList = FALSE,
rows = NULL,
ipar = IPAR_NAMES) {
hasDots <- length(list(...)) > 0
for (a in ipar) {
if (!is.null(src[[a]])) {
if (length(rows) == 0 || length(src[[a]]) == 1) {
val <- src[[a]]
} else {
val <- src[[a]][rows]
}
if (is.function(val)) {
dest[[a]] <- val
} else if (hasDots && useList) {
dest[[a]] <- unlist(mapply(rep, val, ...))
} else if (hasDots) {
dest[[a]] <- rep(val, ...)
} else {
dest[[a]] <- val
}
}
}
dest
}
#' Add the interactive parameters from a data object to a grob.
#' and changes its class
#' @noRd
add_interactive_attrs <- function(gr,
data,
rows = NULL,
cl = NULL,
overwrite = TRUE,
data_attr = "data-id",
ipar = IPAR_NAMES) {
# check for presence of interactive parameters
anames <- Filter(x = get_interactive_attr_names(data, ipar = ipar), function(a) {
!is.null(data[[a]])
})
if (length(anames) == 0)
return(gr)
# if passed grob is a gTree, loop through the children
# note that some grobs (like labelgrob) inherit from gTree,
# but have no children. So we need to check the children length, first.
if (inherits(gr, "gTree") && length(gr$children) > 0) {
# check the lengths of children grobs and data
data_len <- nrow(data)
children_len <- length(gr$children)
if (is.null(data_len) || data_len == 1) {
# pass the data as a whole
for (i in seq_along(gr$children)) {
gr$children[[i]] <-
do_add_interactive_attrs(
gr = gr$children[[i]],
data = data,
rows = rows,
cl = cl,
overwrite = overwrite,
data_attr = data_attr,
ipar = anames
)
}
} else if (children_len == data_len) {
# pass the correct data row
for (i in seq_along(gr$children)) {
gr$children[[i]] <-
do_add_interactive_attrs(
gr = gr$children[[i]],
data = data[i, , drop = FALSE],
rows = rows,
cl = cl,
overwrite = overwrite,
data_attr = data_attr,
ipar = anames
)
}
} else {
abort("Can't add interactive attrs to gTree", call = NULL)
}
return(gr)
} else {
do_add_interactive_attrs(
gr = gr,
data = data,
rows = rows,
cl = cl,
overwrite = overwrite,
data_attr = data_attr,
ipar = anames
)
}
}
#' Delegate for add_interactive_attrs
#' @noRd
do_add_interactive_attrs <- function(gr,
data,
rows = NULL,
cl = NULL,
overwrite = TRUE,
data_attr = "data-id",
ipar = IPAR_NAMES) {
# check that is a grob
if (!is.grob(gr) || is.zero(gr))
return(gr)
# check if it's interactive grob already
isInteractive <- length(grep("interactive_", class(gr))) > 0
ip <- get_interactive_data(gr)
if (length(rows) == 0) {
for (a in ipar) {
if (!isInteractive || isTRUE(overwrite) || is.null(ip[[a]]))
ip[[a]] <- data[[a]]
}
} else {
for (a in ipar) {
if (!isInteractive || isTRUE(overwrite) || is.null(ip[[a]]))
ip[[a]] <- data[[a]][rows]
}
}
gr$.ipar <- ipar
gr$.interactive <- ip
gr$.data_attr <- data_attr
if (is.null(cl) && !isInteractive) {
cl <- paste("interactive", class(gr)[1], "grob", sep = "_")
# some grobs have class name which contains "grob" already, like rastergrob
# and labelgrob, so they end up named like interactive_rastergrob_grob.
# we normalize the name here, to use class interactive_raster_grob.
cl <- sub("grob_grob", "_grob", cl, ignore.case = TRUE)
}
# just extend the class, so that it inherits other grob methods
class(gr) <- c(cl, class(gr))
gr
}
get_interactive_data <- function(x, default = list()) {
(if(!is.atomic(x)) x$.interactive) %||% attr(x, "interactive") %||% default
}
get_ipar <- function(x, default = IPAR_NAMES) {
ipar <- (if(!is.atomic(x)) x$.ipar) %||% attr(x, "ipar")
if (length(ipar) > 0 && is.character(ipar)) {
ipar
} else {
default
}
}
get_data_attr <- function(x, default = "data-id") {
data_attr <- (if(!is.atomic(x)) x$.data_attr) %||% attr(x, "data_attr")
if (length(data_attr) == 1 && is.character(data_attr)) {
data_attr
} else {
default
}
}
#' Sets the interactive attributtes to the svg output.
#' @noRd
interactive_attr_toxml <- function(x,
ids = character(0),
rows = NULL) {
if (length(ids) < 1)
return(invisible())
ip <- get_interactive_data(x)
ipar <- get_ipar(x)
data_attr <- get_data_attr(x)
# check for presence of interactive parameters
anames <- Filter(x = get_interactive_attr_names(ip, ipar = ipar), function(a) {
!is.null(ip[[a]])
})
if (length(anames) == 0)
return(invisible())
for (a in anames) {
if (length(rows) == 0) {
attrValue <- ip[[a]]
} else {
attrValue <- ip[[a]][rows]
}
attrValue <- switch(a,
tooltip = encode_cr(attrValue),
hover_css = check_css_attr(attrValue),
selected_css = check_css_attr(attrValue),
attrValue)
if (!is.character(attrValue))
attrValue <- as.character(attrValue)
attrName <- switch(a,
tooltip = "title",
data_id = data_attr,
a)
set_attr(name = attrName,
ids = as.integer(ids),
values = attrValue)
}
invisible()
}
|
8e4d20c17f8f1dfe9f3198311bf77f2108b4c9e2 | 5e9174e8c66e695e1d2fe39b919d64dc1f537ba8 | /Homework 4/Solution/HW4-R-ajmn100.R | 7a6bf9b00ce0433759582ba5b5b52d05a36526af | [
"MIT"
] | permissive | jonathannjeunje/Applied_Statistical_Methods_With_R | 32e89c61d737e337b0c2a195e1c496132e41792b | c6c71b2a12da50be50dc1c5fd14a80962c7d32a2 | refs/heads/main | 2023-02-23T00:50:08.200810 | 2021-01-27T01:55:51 | 2021-01-27T01:55:51 | 333,272,540 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 486 | r | HW4-R-ajmn100.R | #1.If X is Binomial (n = 100, p = 0.3)
#P(X = 39)
dbinom(39,100,0.3)
#P(X >= 25) = 1 - P(X <= 24)
1 - pbinom(24,100,0.3)
#2.If X is Beta (a = 5, b = 3)
#P(0.4 <= X <= 0.5)
pbeta(0.5,5,3) - pbeta(0.4,5,3)
#The value of k such that P(X <= k) = 0.7
qbeta(0.7,5,3)
#3.
#histogram of 100 random numbers taken from a gamma distribution with a = 2 and b = 5
hist(rgamma(100,shape=2,scale=5), main='100 random values from Gamma(2,5)', xlab = 'x', ylab = 'y')
|
2826c22a606cf4d063044d7fe09521d3bab3a385 | 9e51d436c3516e8045a331724519e2d47785b635 | /assets/projects/BayesianIRT/shinyStan_for_shinyapps/server_files/utilities/update_multiparam_selectize.R | 21b4e826d13442f90d30fe371bfbc4e1b0849e5b | [
"MIT"
] | permissive | rfarouni/rfarouni.github.io | 383df890b03254e34210f23d0c350cd5ca38fcf9 | bb5eb702736d57f48c3bff99bb8339cc22736099 | refs/heads/master | 2023-04-15T16:04:20.199574 | 2022-09-30T15:18:44 | 2022-09-30T15:18:44 | 32,032,321 | 0 | 6 | MIT | 2023-04-11T22:44:18 | 2015-03-11T17:48:14 | HTML | UTF-8 | R | false | false | 548 | r | update_multiparam_selectize.R | # functions for updating the choices and selected for
# the selectizeInput input$params_to_plot when the sorting
# option is changed in input$multiparam_sort
copy_params_to_plot <- reactive({
copy <- input$params_to_plot
if (is.null(copy) | length(copy) == 0) {
return(NULL)
}
copy
})
observe({
x <- input$multiparam_sort
choices <- make_param_list_with_groups_sort()
selected <- copy_params_to_plot()
updateSelectizeInput(session, inputId = "params_to_plot", choices = choices,
selected = selected)
})
|
8bf886477c0dc54beb62ceb8e56e5b6670d853c7 | 792db68cb166df96f92b841bb0a86c4bd1d536d2 | /Practica6_Covid-19.R | c3530f23158a7f4394b9249335f96a2435f9a66b | [] | no_license | AllanZamb/ProcesamientoR | 74ac7524d8b8e9563827c3b5e8d7d25ae1c01d61 | 3a5e29b0c7fbc6a35ca71846e89caaa1b008dcd7 | refs/heads/main | 2023-03-06T22:20:01.317589 | 2021-02-06T04:48:10 | 2021-02-06T04:48:10 | 316,684,351 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,458 | r | Practica6_Covid-19.R | paises <- read.csv("https://covid19.who.int/WHO-COVID-19-global-data.csv")
#Estructura de nuestros datos
str(paises)
#Resumen de los datos
summary(paises$Cumulative_cases)
#Colunmas de nuestro dataset
colnames(paises)
colnames(paises) <- c("Fecha_de_reporte",
"Código_de_país",
"Paises", "Región_OMS" ,
"Casos_Nuevos",
"Casos_Acumulados",
"Defunciones_Nuevas",
"Defunciones_Acumuladas" )
#Revisamos cuántos paises tenemos en total
total_paises<- length(unique(paises$Paises))
print(paste("tenemos en total", total_paises, "paises" ))
## Cortamos el dataset original en función de las variables que necesitemos
paises_filtrados <- split(paises, paises$Paises)
fechas_filtrados <- split(paises, paises$Fecha_de_reporte)
regiones_filtrados <- split(paises, paises$Región_OMS)
################################################################################
### DESINTEGRAMOS EL DATASET ORIGINAL
#Lapply se aplica a una función para cada elemento de la lista para enviar cada una de las listas a un archivo de escritura DataFrame
lapply(names(paises_filtrados),
function(x){write.csv(paises_filtrados[[x]], paste0("BASES/Paises/",x,".csv"),
row.names = FALSE)})
lapply(names(regiones_filtrados),
function(x){write.csv(regiones_filtrados[[x]], paste0("BASES/Regiones/",x,".csv"),
row.names = FALSE)})
lapply(names(fechas_filtrados),
function(x){write.csv(fechas_filtrados[[x]], paste0("BASES/Fechas/",x,".csv"),
row.names = FALSE)})
################ INTEGRAR EL DATASET############################################
## Buscamos los patrones que se repiten dentro de las carpetas
patrones_paises_csv <- list.files("BASES/Paises/", pattern = "*.csv")
patrones_regiones_csv <- list.files("BASES/Regiones/", pattern = "*.csv")
patrones_fechas_csv <- list.files("BASES/Fechas/", pattern = "*.csv")
integrar_fechas <- ldply(paste0("BASES/Fechas/",patrones_fechas_csv), read.csv)
integrar_regiones <- ldply(paste0("BASES/Regiones/",patrones_regiones_csv), read.csv)
integrar_paises <- ldply(paste0("BASES/Paises/",patrones_paises_csv), read.csv)
################################################################################
|
e78c8d3642a2ac4bd38378077e8a0ed245809432 | 7db2c001e9fbaadf8a796ae80e5d09d90925c46d | /R/explorer_header.R | e5cbf6b12e96810e6d390b015b946be635e1a6d8 | [] | no_license | DavidBarke/shinyExplorer | a466d490243c241a914a12c8ab30e641b594526d | 4a0b3f101bb46db6727ade489ae177e40b23a2c9 | refs/heads/master | 2020-09-03T21:14:14.750167 | 2020-08-25T21:37:04 | 2020-08-25T21:37:04 | 219,572,569 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,056 | r | explorer_header.R | explorer_header_ui <- function(id) {
ns <- shiny::NS(id)
htmltools::tagList(
shiny::uiOutput(
outputId = ns("header")
)
)
}
explorer_header <- function(
input, output, session, .values, .explorer_classes, .explorer_class_returns,
.explorer_rvs, .root_node_r
) {
ns <- session$ns
rvs <- shiny::reactiveValues(
ancestor_counter = -1,
children_contextmenu_item_used_ids = list()
)
node_to_name <- function(node) {
node$get_object()$get_name()
}
# The generation of the current node is the number of ancestor nodes including
# the root node the current node has
generation_r <- shiny::reactive({
get_node_distance(.explorer_rvs$current_node, .root_node_r())
})
# Header contains links to all direct ancestors of the current displayed
# children in the body (like Windows Explorer). After a link is clicked, a
# context menu opens up, in which links to all children of this ancestor are
# displayed
output$header <- shiny::renderUI({
indices <- c(0, seq_len(generation_r()))
ancestor_list <- purrr::map(indices, function(i) {
node <- get_ancestor_node(.explorer_rvs$current_node, i)
if (rvs$ancestor_counter < i) {
# This code chunk is only called one for every index so that each
# observer is just assigned once
# Increment counter
rvs$ancestor_counter <- rvs$ancestor_counter + 1
# Initialise character vector, which stores all ids of children, which
# are observed by a context menu item
rvs$children_contextmenu_item_used_ids[[i + 1]] <- character()
shiny::observeEvent(input[["child_link" %_% i]], {
.explorer_rvs$current_node <- get_ancestor_node(.explorer_rvs$current_node, i)
})
shiny::observeEvent(input[["children_link" %_% i]], {
shiny::req(input[["children_link" %_% i]] > 0)
node <- get_ancestor_node(.explorer_rvs$current_node, i)
children <- node$get_children()$get_objects()
is_group_node <- purrr::map_lgl(children, function(node) {
explorer_class_id <- node$get_explorer_class_id()
.explorer_class_returns[[explorer_class_id]]$is_group_r()
})
sibling_group_nodes <- children[is_group_node]
# If node has no children, that are group nodes, no contextmenu is
# displayed
if (!length(sibling_group_nodes)) return()
# Only create contextmenu_items for group nodes
contextmenu_items <- purrr::map(sibling_group_nodes, function(node) {
node_id <- node$get_id()
node_object <- node$get_object()
node_object_class <- class(node_object)
if ("Object" %in% node_object_class) {
icon <- shiny::icon("folder")
} else if ("DatasetObject" %in% node_object_class) {
icon <- shiny::icon("table")
} else {
icon <- shiny::icon("plus")
}
# Use of i + 1, since index starts with zero
if (!(node_id %in% rvs$children_contextmenu_item_used_ids[[i + 1]])) {
rvs$children_contextmenu_item_used_ids[[i + 1]] <- c(
rvs$children_contextmenu_item_used_ids[[i + 1]],
node_id
)
shiny::observeEvent(
input[["children_contextmenu" %_% i %_% "item" %_% node_id]],
{
.explorer_rvs$current_node <- .root_node_r()$get_node(node_id)
}
)
}
# Return context menu item. If clicked, the current node is set
# to the sibling node represented by this item.
contextmenu_item(
inputId = ns("children_contextmenu" %_% i %_% "item" %_% node_id),
label = node_to_name(node),
icon = icon
)
})
show_contextmenu(
contextmenu(
x = input[["children_link" %_% i %_% "position"]]$left,
y = input[["children_link" %_% i %_% "position"]]$bottom,
contextmenu_items
),
session = session
)
})
}
# Display is in opposite direction due to direction:
htmltools::tags$li(
htmltools::div(
class = "explorer-text",
shiny::actionLink(
inputId = ns("child_link" %_% i),
label = node_to_name(node)
)
),
htmltools::span(
class = "explorer-vr"
),
htmltools::div(
class = "wide-icon explorer-angle",
positional_input(
inputId = ns("children_link" %_% i),
label = "",
icon = shiny::icon("angle-right")
)
)
)
})
ui <- htmltools::tagList(
htmltools::tags$ul(
class = "explorer-ancestor-list inner-box",
# Revert, as the most remote ancestor has to be on the left side
rev(ancestor_list)
)
)
ui
})
}
|
6a739d3505b740210c6fe554015604c1f7b88a89 | b438c7e240506b851d4f9179190ce83dd5e45f95 | /R/set_Classes.R | d93cc4a333ad989ed6957fb892ef152ff2398e7f | [] | no_license | meerapatelmd/secretary | bd75a6ef4b3ab23eb9177b08e0610cefdfbed7d7 | 492f0555fa07aeebc8ee64c6ed4962053636d8b0 | refs/heads/master | 2023-08-06T22:21:02.641198 | 2021-10-05T20:54:37 | 2021-10-05T20:54:37 | 253,169,208 | 0 | 0 | null | 2021-10-05T20:54:38 | 2020-04-05T06:20:40 | R | UTF-8 | R | false | false | 1,220 | r | set_Classes.R | #' Set TypewriteMessage Class
#' @noRd
#' @export TypewriteMessage
#' @exportClass TypewriteMessage
TypewriteMessage <- setClass("TypewriteMessage",
representation(Plain = "character",
Formatted = "character"),
prototype(Plain = NA_character_,
Formatted = NA_character_))
#' Set the TypewriteLines Class
#' @export TypewriteLines
#' @exportClass TypewriteLines
TypewriteLines <-
setClass("TypewriteLines",
representation(TypewriteMessage = "TypewriteMessage",
"Blank Lines" = "numeric",
"Indents" = "numeric"))
#' Set TypewriteMessageLog Class
#' @export TypewriteMessageLog
#' @exportClass TypewriteMessageLog
TypewriteMessageLog <-
setClass("TypewriteMessageLog",
representation(Timestamp = "character",
"R Script" = "character",
Message = "TypewriteMessage",
Error = "character"
),
prototype("R Script" = NA_character_,
Error = NA_character_))
|
0c640612297345280cad92de1f9aef50721fe928 | 7e7646c0aebbfc8becdf74b64f05e0ec24fc4d3f | /R/mice-init.R | ae6aec41e4204a406ee276ce1b17a6313b95fb2a | [] | no_license | roliveros-ramos/mice | bfddb78ff8b9b19bf587c3c1bc4e7398ec0cdecb | 3aebda0b3d2a6d22197544244d9c357bc195d18f | refs/heads/master | 2023-08-18T15:23:25.581157 | 2023-08-09T19:22:35 | 2023-08-09T19:22:35 | 109,646,515 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 11,874 | r | mice-init.R |
# Initialization of Functional Groups -------------------------------------
checkGroups = function(groups, ndtPerYear, subDtPerYear, T, resources) {
groupNames = sapply(groups, FUN = "[[", i="name")
names(groups) = groupNames
ndt = ndtPerYear*T
if(!is.null(resources)) groups = updateResourceBiomass(groups, resources)
for(i in seq_along(groups)) {
if(is.null(groups[[i]]$type)) groups[[i]]$type = "functional_group"
groups[[i]]$target = .initExclusion(preyExclude = groups[[i]]$preyExclude,
target = groups[[i]]$target,
groupNames = groupNames)
# check for resources
if(groups[[i]]$type=="resource") {
groups[[i]]$biomass = .checkResourceBiomass(groups[[i]]$biomass,
ndtPerYear, subDtPerYear, T)
groups[[i]]$recruitment = "resource"
ngroups = groups[[i]]$ngroups
if(is.null(ngroups)) groups[[i]]$ngroups = 5
groups[[i]]$TL = .checkTL(groups[[i]]$TL)
} # end of check for resources
## get recruitment function
recruitmentType = groups[[i]][["recruitment", exact=TRUE]]
if(is.null(recruitmentType)) recruitmentType = "ricker"
# recruitment seasonality
recruitmentSeasonality = groups[[i]]$recruitmentSeasonality
if(is.null(recruitmentSeasonality))
recruitmentSeasonality = rep(1, ndtPerYear)/ndtPerYear
if(length(recruitmentSeasonality)==ndtPerYear) {
recruitmentSeasonality = recruitmentSeasonality/sum(recruitmentSeasonality)
recruitmentSeasonality = rep(recruitmentSeasonality, length=ndt)
}
if(length(recruitmentSeasonality)!=ndt)
stop(sprintf("Recruitment seasonality must be a vector of length %s or %s.", ndtPerYear, ndt))
if(anyNA(recruitmentSeasonality)) stop("Recruitment seasonality must not include NAs.")
groups[[i]]$recruitmentSeasonality = recruitmentSeasonality
recruitmentModel = match.fun(paste("recruitment", recruitmentType, "spec", sep="."))
src = attr(recruitmentModel, "srcref")
attr(recruitmentModel, "type") = recruitmentType
attr(recruitmentModel, "source") = src
class(recruitmentModel) = "recruitment.function"
groups[[i]]$recruitment = recruitmentModel
# end of recruitment
}
return(groups)
}
initGroups = function(groups, dt) {
out = lapply(groups, FUN=.initGroups, dt=dt)
names(out) = sapply(groups, FUN = "[[", i="name")
return(out)
}
.initGroups = function(par, dt) {
if(is.null(par$type)) return(.initSpecies(par=par, dt=dt))
if(par$type=="resource") return(.initResources(par=par, dt=dt))
return(.initSpecies(par=par, dt=dt))
}
.initSpecies = function(par, dt) {
ns = 20 # number of bins for growth, constant for now...
A = par$A
Linf = par$Linf
k = par$k
t0 = par$t0
a = par$a
b = par$b
am = par$am
B0 = par$B0*1e6 # tonnes -> g
psmin = par$predRange[1] # to be updated with other criteria
psmax = par$predRange[2]
M = if(is.null(par$M)) -log(0.01)/A else par$M
egg_size = if(is.null(par$egg_size)) 1e-4 else par$egg_size
out = data.frame(name=par$name, age=seq(0, A, by=dt), stringsAsFactors=FALSE)
L1 = VB(age=out$age, Linf=Linf, k=k, t0=t0, egg_size=egg_size) # t=t
L2 = VB(age=out$age+dt, Linf=Linf, k=k, t0=t0, egg_size=egg_size) # t=t+dt
out$size = L1 # size at t
out$w = a*out$size^b # weight at t
out$dL = (L2-L1)
out$a = a
out$b = b
out$s_mean = (L1+L2)/2 # mean length in [t, t+dt]
out$w_mean = (a*(L2^(b+1) - L1^(b+1))/(L2-L1))/(b+1) # mean weight in [t, t+dt]
out$s_min = L1 + 1*(L2-L1)/ns # lower threshold for prey size
out$s_max = L1 + (ns-1)*(L2-L1)/ns # upper threshold for prey size
mw = exp(-M*out$age)*out$w
out$N = B0*(mw/sum(mw))/out$w
out$mature = (out$age+dt >= am) + 0
out$ssb = out$w_mean*out$mature
out$feedType = .getFeedingType(par)
# temporal
out$feedType[out$s_mean<5] = "planktivorous"
out$logPredLength = log10(out$s_min)
out$psize_min = 10^predict(preySizeModel$min, newdata=out) # prey upon with mean length
out$logPredLength = log10(out$s_max)
out$psize_max = 10^predict(preySizeModel$max, newdata=out) # prey upon with mean length
out$logPredLength = NULL
out$Mold = mortality.senecence.spec(out$age + 0.5*dt, par=par)
out$Mb = mortality.constant.spec(out$age, par=par)
out$Mstarv = if(is.null(par$Mstarv)) 1 else par$Mstarv
out$Ystar = if(is.null(par$Ystar)) 3.5 else par$Ystar
out$delta = if(is.null(par$delta)) 0.9 else par$delta
out$TL = NA # test is 2 is a better initialization
out$egg_tl = if(is.null(par$egg_tl)) 2 else par$egg_tl
# out$group = par$group
out$type = par$type
class(out) = c("mice_species", class(out))
return(out)
}
.initResources = function(par, dt) {
B0 = par$biomass[1]*1e6 # tonnes -> g
ngroups = par$ngroups
age = rep(0, ngroups)
out = data.frame(name=par$name, age=age, stringsAsFactors=FALSE)
logSize = log10(c(par$size_min, par$size_max))
L = 10^seq(from=logSize[1], to=logSize[2], length=ngroups+1)
L1 = head(L, -1)
L2 = tail(L, -1)
out$size = (L1+L2)/2 # size at t
out$w = 1 # weight at t
out$dL = 0
out$a = 1
out$b = 0
out$s_mean = (L1+L2)/2 # mean length in [t, t+dt]
out$w_mean = 1 # mean weight in [t, t+dt]
out$s_min = L1 # lower threshold for prey size
out$s_max = L2 # upper threshold for prey size
out$N = B0/ngroups # biomass is constant in log-space bin size (Sheldon, 1972).
out$mature = 0
out$ssb = 0
out$psize_min = -1 # prey upon with mean length
out$psize_max = -1 # prey upon with mean length
# out$group = par$group
out$Mold = 0
out$Mb = 1e-8
out$Mstarv = 0
out$Ystar = 0
out$delta = if(is.null(par$delta)) 0.99999999 else par$delta
TL = par$TL
Dn = diff(TL)
eta = 0.1
i = seq_len(ngroups+1)-1
Di = (log((eta^Dn -1)*i + ngroups) - log(ngroups))/log(eta)
TLi = TL[1] + Di
out$TL = TLi[-1] - diff(TLi)/2
out$egg_tl = out$TL[1]
out$type = par$type
class(out) = c("mice_resources", class(out))
return(out)
}
# Initialization of Fleets ------------------------------------------------
checkFleets = function(fleets) {
fleetNames = sapply(fleets, FUN = "[[", i="name")
names(fleets) = fleetNames
return(fleets)
}
initFleets = function(fleets, groups, ndtPerYear, T) {
groupNames = sapply(groups, FUN = "[[", i="name")
out = lapply(fleets, FUN=.initFleet, ndtPerYear=ndtPerYear, T=T, groupNames=groupNames)
names(out) = sapply(fleets, FUN = "[[", i="name")
return(out)
}
.initFleet = function(par, ndtPerYear, T, groupNames, tiny=1e-3) {
# add seasonality
# move to checkFleets
if(is.null(par$E)) par$E = 1
ndt = ndtPerYear*T
F = par$q*par$E
if(length(F)==1) F = rep(F, ndt) # constant F
if(length(F)==T) F = rep(F, each=ndtPerYear) # F by Year
if(length(F)!=ndt) stop("Effort must be provided by year.") # improve: message
par$F = F
selectivityType = par$selectivity
selectivityModel = match.fun(paste("selectivity", selectivityType, "spec", sep="."))
par$selectivity = function(L) selectivityModel(x=L, par=par, tiny=tiny)
attr(par$selectivity, "type") = selectivityType
class(par$selectivity) = "selectivity.function"
par$target = .initTargets(target=par$target, groupNames=groupNames)
return(par)
}
.initTargets = function(target, groupNames) {
out = setNames(numeric(length(groupNames)), nm = groupNames)
if(is.null(target)) {
out[] = 1
return(out)
}
if(is.character(target)) {
if(!all(target %in% groupNames)) {
ind = which(!(target %in% groupNames))
msg = paste(target[ind], collapse=",")
msg = sprintf("target names (%s) do not match species group names.", msg)
stop(msg)
}
out[target] = 1
return(out)
}
if(!is.null(names(target))) {
if(!all(names(target) %in% groupNames))
stop("Target names do not match species group names.")
out[names(target)] = target
return(out)
}
}
.initExclusion = function(preyExclude, target, groupNames) {
out = setNames(numeric(length(groupNames)), nm = groupNames)
out[] = 1
if(is.null(preyExclude) & is.null(target)) {
return(out)
}
if(!is.null(target)) {
if(!is.numeric(target)) stop("Target must be numeric.")
if(is.null(names(target))) stop("Target must be a named vector.")
if(!all(names(target) %in% groupNames))
stop("Target names do not match species group names.")
out[names(target)] = target
}
if(!is.null(preyExclude)) {
if(!is.character(preyExclude)) stop("preyExclude must be a character vector.")
if(is.character(preyExclude)) {
if(!all(preyExclude %in% groupNames)) {
ind = which(!(preyExclude %in% groupNames))
msg = paste(preyExclude[ind], collapse=",")
msg = sprintf("preyExclude names (%s) do not match species group names.", msg)
stop(msg)
}
out[preyExclude] = 0
}
}
return(out)
}
# Initialization of the environmental variables
checkEnvironment = function(environment, ndt) {
if(is.null(environment)) return(NULL)
if(!is.list(environment)) stop("The 'environment' argument must be a list.")
ll = sapply(environment, length)
if(any(ll < ndt)) stop("You must provide at least one value per time step.")
return(environment)
}
.checkEnvironment = function(environment, ndt) {
NULL
}
# update Parameters -------------------------------------------------------
updateParameters = function(target, par) {
if(!is.list(par)) stop("par must be a list.")
parNames = names(par)
if(any(parNames=="")) stop("all 'par' elements must be named.")
validNames = sapply(target, FUN = "[[", i="name")
for(parName in parNames) {
gNames = names(par[[parName]])
gNames = gNames[which(gNames %in% validNames)]
if(length(gNames)==0) next
for(iName in gNames) {
target[[iName]][parName] = par[[parName]][iName]
}
}
return(target)
}
updateResourceBiomass = function(target, par) {
if(is.null(par)) return(target)
if(!is.list(par)) stop("par must be a list.")
gNames = names(par)
if(any(gNames=="")) stop("all 'resources' elements must be named.")
validNames = sapply(target, FUN = "[[", i="name")
gNames = gNames[which(gNames %in% validNames)]
if(length(gNames)==0) return(target)
for(iName in gNames) {
target[[iName]][["biomass"]] = par[[iName]]
}
return(target)
}
# Trophic level -----------------------------------------------------------
.checkTL = function(TL) {
if(is.null(TL)) stop("TL must be provided for resources.")
if(length(TL)>2) stop("TL must be a vector of length 2 (min and max TL).")
if(length(TL)==1) TL = rep(TL, 2)
if(TL[1]>TL[2]) stop("min TL is greater than max TL.")
if(TL[1]==1 & TL[2]!=1) {
TL[2]=1
warning("Autotroph group, ignoring max TL.")
}
return(TL)
}
initialTL = function(preyed, tl) {
maxit = 100
preyed = t(t(preyed)/colSums(preyed))
preyed[is.nan(preyed)] = 0
TL = tl
natl = sum(is.na(TL))
mtl = 1
niter = 1
while(((natl>0) | (mtl>1e-5)) & (niter<maxit)) {
TL0 = TL
tlp = preyed*TL
tlp[tlp==0] = NA
omitTL = apply(tlp, 2, FUN=function(x) all(is.na(x)))
TL = colSums(tlp, na.rm=TRUE) + 1
TL[omitTL] = NA
TL[!is.na(tl)] = tl[!is.na(tl)]
natl = sum(is.na(TL))
natl0 = sum(is.na(TL0))
if(natl==0 & natl0==0) mtl = abs(mean(TL0-TL))
niter = niter + 1
}
return(TL)
}
calculateTL = function(preyed, tl, isResource, t) {
if(t==1) {
tl = initialTL(preyed, tl)
return(tl)
}
# otl = TL[, t-1]
preyed = t(t(preyed)/colSums(preyed))
preyed[is.nan(preyed)] = 0
tlp = tl*preyed
TL = colSums(tlp, na.rm=TRUE) + 1
TL[TL==1] = tl[TL==1] # if no prey, keep last TL.
TL[isResource] = tl[isResource] # keep constant resources' TL
return(TL)
}
|
284c5591d0cd6ea004fc186e304dc566f5ec94ee | 5507eeff38ec29024331f6ac2d170bf154e39eb6 | /Assignment_6_1001.R | 4f18aa0583d2b64d34bc220065a88ec0b26a5aba | [] | no_license | aayushsaini/Numerical-methods-using-R | cf61d326b675a31ca42e974cd2fdfe1714903c55 | 8f1919ded37acabc60c3e29ffcfa52a6f2dc90e5 | refs/heads/main | 2023-06-16T20:02:01.178986 | 2021-07-16T13:15:20 | 2021-07-16T13:15:20 | 369,499,716 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,310 | r | Assignment_6_1001.R | #Assignment 6
#---------Q.1------------
#------------------------
n <- 100
p <- 0.05 #5% defective rate
m <- n*p #mean number of defective bulbs
#Distribution: given below (line:11)
print("Ans1(i) We are going to use Poisson Distribution, since P is very small and no. of trials is large i.e., 100")
pmf <- dpois(0:n, lambda=m)
cdf <- ppois(0:n, lambda=m)
plot(cdf, main="a) Poisson's Distrubution", pch=16, col="orange")
lines(cdf, col="orange")
par(new = T)
plot(pmf, add=TRUE, axes= FALSE, pch=16, col="red")
lines(pmf, col = "red")
legend("right", legend = c("PDF", "CDF"), lty = 5, col = c(2,7), lwd = 3, box.lty = 0)
#---------Q.2------------
#------------------------
sub1 <- runif(20, 1, 100)
sub2 <- runif(20, 1, 100)
sub3 <- runif(20, 1, 100)
#correlation for the data frame
df <- data.frame(x=sub1, y=sub2, z=sub3)
correl = cor(df)
print(correl)
#NOTE: Please use the arrow icons on plots page's toolbar to navigate between each graph
plot(sub1, sub2, main="b) Subject 1 & 2", pch=16, col="darkgreen")
abline(lm(sub2~sub1), col="orange")
plot(sub1, sub3, main="c) Subject 1 & 3", pch=16, col="orange")
abline(lm(sub3~sub1), col="red")
plot(sub2, sub3, main="d) Subject 2 & 3\n(Navigate using arrow keys above for other plots)", pch=16, col="blue")
abline(lm(sub3~sub2), col="red") |
65181a6491b3ee4e3d14c66fe5e263e4c4f73d5f | 622a08cd7f8c01c86930aa70b70fb8c79d7af0e4 | /R/processGIMMS.R | 719f50a4fd97a23a76d4a2d95c7671e4e326fdb4 | [] | no_license | npp97/paper_kilimanjaro_ndvi_comparison | 6651cd2c295f7b913729fee41b47d27ab117283c | f443c6d1e5d1d5a25bdd00b95855938146f3aa2c | refs/heads/master | 2021-01-16T23:22:35.421214 | 2015-10-15T14:21:24 | 2015-10-15T14:21:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,931 | r | processGIMMS.R | ### environmental stuff
## functions (located in repository, not on external hdd)
source("R/aggregateGimms.R")
## working directory
library(Orcs)
setwdOS(path_lin = "/media/fdetsch/XChange/", path_win = "D:/",
path_ext = "kilimanjaro/ndvi_comparison")
## install old 'MODIS' version (bug in MODIS_0.10-33::whittaker.raster has not
## been fixed yet)
install.packages("http://download.r-forge.r-project.org/src/contrib/MODIS_0.10-18.tar.gz",
repos = NULL)
## packages
lib <- c("Rsenal", "doParallel", "MODIS", "remote")
jnk <- sapply(lib, function(x) library(x, character.only = TRUE, quietly = TRUE))
## 'MODIS' global settings
MODISoptions(localArcPath = paste0(getwd(), "/data/MODIS_ARC/"),
outDirPath = paste0(getwd(), "/data/MODIS_ARC/PROCESSED/"),
MODISserverOrder = c("LAADS","LPDAAC"), quiet = TRUE)
## parallelization
cl <- makeCluster(4)
registerDoParallel(cl)
## geographic extent
rst_kili <- kiliAerial(rasterize = TRUE, minNumTiles = 20)
## download available GIMMS data
fls_gimms <- downloadGimms(dsn = "data/GIMMS")
# Rearrange GIMMS files according to timestamp
fls_gimms <- rearrangeGimms(dsn = "data/GIMMS",
pattern = "^geo",
rename_yearmon = TRUE,
full.names = TRUE)
# Create .hdr companion file
fls_hdr <- createHdr(file = "data/gimms3g.hdr")
## Data processing
rst_gimms <-
foreach(i = fls_gimms, .packages = c("Rsenal", "zoo"), .combine = "stack",
.export = ls(envir = globalenv())) %dopar% {
# rasterize
fls_rst <- paste0("data/rst/GIMMS3g/rst/", basename(i))
rst <- rasterizeGimms(file = i,
headerfile = fls_hdr,
file_out = fls_rst,
format = "GTiff", overwrite = TRUE)
# crop
fls_crp <- paste0("data/rst/GIMMS3g/crp/CRP_", basename(fls_rst), ".tif")
rst_crp <- crop(rst, extent(c(37, 37.72, -3.4, -2.84)), snap = "out",
filename = fls_crp, format = "GTiff", overwrite = TRUE)
# project
fls_prj <- paste0("data/rst/GIMMS3g/prj/PRJ_", basename(fls_crp))
rst_crp_prj <- projectRaster(rst_crp, crs = "+init=epsg:21037",
filename = fls_prj,
format = "GTiff", overwrite = TRUE)
# return processed raster
return(rst_crp_prj)
}
## remove white margins
# trim
fls_prj <- list.files("data/rst/GIMMS3g/prj", pattern = "^PRJ_.*.tif$",
full.names = TRUE)
rst_prj <- stack(fls_prj)
spy_prj <- rasterToPolygons(rst_prj[[1]])
dir_trm <- "data/rst/GIMMS3g/trm/"
fls_trm <- paste0(dir_trm, "TRM_", basename(fls_prj))
rst_trm <- foreach(i = (1:nlayers(rst_prj))[c(283, 284, 291)], .packages = c("raster", "rgdal"),
.combine = "stack") %dopar% {
rst <- crop(rst_prj[[i]], spy_prj)
rst <- writeRaster(rst, filename = fls_trm[i], format = "GTiff",
overwrite = TRUE)
}
# reimport files
fls_trm <- list.files(dir_trm, pattern = "^TRM_.*.tif$", full.names = TRUE)
rst_trm <- stack(fls_trm)
# ## test
# st <- grep("2000", fls_trm)[1]
# nd <- grep("2010", fls_trm)[length(grep("2012", fls_trm))]
# rst_trm <- rst_trm[[st:nd]]
# fls_prj <- fls_prj[st:nd]
## gap-filling
# setup `orgTime` object -> replace %Y%m with %Y%m%d (compatible to `as.Date` in
# `orgTime`)
org_gimms <- basename(fls_prj)
for (i in 1:length(org_gimms)) {
dt_yrmn <- substr(org_gimms[i], 12, 17)
dt_yrmndy <- paste0(dt_yrmn, ifelse(substr(org_gimms[i], 18, 20) == "15a", "01", "15"))
org_gimms[i] <- gsub(dt_yrmn, dt_yrmndy, org_gimms[i])
}
org_gimms <-
orgTime(org_gimms, pillow = 0, pos1 = 12, pos2 = 19, format = "%Y%m%d")
# whittaker.raster
rst_wht <-
whittaker.raster(rst_trm, timeInfo = org_gimms, lambda = 6000, nIter = 3,
removeOutlier = TRUE, outlierThreshold = 0.2, groupYears = FALSE,
outDirPath = "data/rst/GIMMS3g/whittaker", overwrite = TRUE)
# store
rst_wht <- stack("data/rst/GIMMS3g/whittaker/NDVI_YearlyLambda6000_fullPeriod.tif")
dir_wht <- "data/rst/GIMMS3g/whittaker/"
fls_wht <- paste0(dir_wht, "WHT_", basename(fls_trm))
ls_rst_wht <- foreach(i = 1:nlayers(rst_wht), j = fls_wht,
.packages = c("raster", "rgdal")) %dopar% {
writeRaster(rst_wht[[i]], filename = j, format = "GTiff", overwrite = TRUE)
}
rst_wht <- stack(ls_rst_wht)
## Monthly aggregation
# available files
fls_wht <- list.files("data/rst/GIMMS3g/whittaker",
pattern = "^WHT_TRM.*.tif$", full.names = TRUE)
# aggregate
dir_mvc <- "data/rst/GIMMS3g/whittaker_mvc/"
fls_mvc <- paste0(dir_mvc, "MVC_", unique(substr(basename(fls_wht), 1, 25)))
rst_agg <- aggregateGimms(files = fls_wht, files_out = fls_mvc,
start = 20, stop = 25, nodes = 3L,
format = "GTiff", overwrite = TRUE)
## deseason
# import data
fls_agg <- list.files("data/rst/GIMMS3g/whittaker_mvc/",
pattern = "^MVC_.*.tif$", full.names = TRUE)
fls_agg <- fls_agg[7:length(fls_agg)] # remove jul-dec 1981
rst_agg <- stack(fls_agg)
# deseason
rst_dsn <- deseason(rst_agg, use.cpp = TRUE)
# store
dir_dsn <- "data/rst/GIMMS3g/whittaker_dsn/"
fls_dsn <- paste0(dir_dsn, "DSN_", basename(fls_agg))
ls_rst_dsn <- foreach(i = 1:nlayers(rst_dsn), j = fls_dsn,
.packages = c("raster", "rgdal")) %dopar% {
writeRaster(rst_dsn[[i]], filename = j, format = "GTiff", overwrite = TRUE)
}
rst_dsn <- stack(ls_rst_dsn)
## deregister parallel backend
stopImplicitCluster()
## re-install most recent 'MODIS' version
detach("package:MODIS")
install.packages("http://download.r-forge.r-project.org/src/contrib/MODIS_0.10-33.tar.gz",
repos = NULL)
|
2c1dc1de96c6f1b08b2e715b4f81e9192948a940 | d65574d30ee29a7b7661df8e9d74e4fd3f043f1f | /server.R | bbe3a52e9927aa50e4bb3f15af56d20e52cece8c | [] | no_license | anumoshsad/Developing-Data-Products | 3332322a277a3f7eb285813c5f6d6a7600570b70 | 3a6f80e6f5f6e6981982581b93f0b1c207e4a6f5 | refs/heads/master | 2020-02-26T15:15:14.407315 | 2016-08-14T06:34:36 | 2016-08-14T06:34:36 | 65,653,322 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 949 | r | server.R | library(shiny)
shinyServer(
function(input,output){
nosim <- 10000
output$newHist <- renderPlot({
n <- input$n
lambda <- 0.2
dat <- apply(matrix(rexp(nosim*n, lambda), nosim),
1 , mean)
# Histogram of mean sample
hist(dat, main = "Sample Mean Distribution",
breaks = 50, border = "gray", prob = TRUE,
col = "LightBlue",
xlab = "Sample Mean of 1000 simulation")
# Density curve
lines(density(dat), col = "blue", lwd = 2)
# Density curve of a normal distribution
xfit <-seq(min(dat),max(dat), length= 100)
yfit <-dnorm(xfit, mean = 1/lambda, sd = 1/lambda/sqrt(n))
lines(xfit, yfit, pch = 25, col ='red', lwd = 1)
legend('topright',c('Sample mean density',
'Theoretical mean density'),
col =c('red','blue'), lty =c(1,1))
})
})
|
f99d8ddeb655ccfad875f528cdd360c42b05f443 | 4041625bb47c72dd6ca504a2db4e591f8dd0f4c0 | /inprep/eusidatasets.r | 80a37aade6db854904dfe76c590e251b671b8a12 | [] | no_license | pwasiewi/eusi | fa1eaba2f005a36417e7e176b0b9c96817d4c745 | 7c0d3365a1433a0764d3f5637f8e5f1070764ba9 | refs/heads/master | 2020-04-14T05:03:37.410622 | 2018-03-31T16:01:39 | 2018-03-31T16:01:39 | 372,294 | 0 | 4 | null | null | null | null | UTF-8 | R | false | false | 3,133 | r | eusidatasets.r | # skrypt do zajęć EU SI: dane testowe
# TODO: test, testować
# Licence LGPL
# Author: Piotr Wąsiewicz
########################################################################################################
##########################################################################################################
nData<-"glass"
#nData<-"nihills"
#nData<-"photocar"
#nData<-"meatspec"
#nData<-"skoliozaNIL1.csv"
#nData<-"narchecked.csv"
##########################################################################################################
#wczytywanie zbioru treningowego o nazwie nData
#assign(nData,read.csv(paste("file://",mypath,nData,sep=""),head=TRUE,sep=";",dec=",",na.strings=c("NA", "BD", "bd", "", "?")))
data(list=nData)
DataSet<-get(nData)
##########################################################################################################
if(nData=="glass"){
#te atrybuty, co są faktorami, zmiennymi jakościowymi z kategoriami, etykietami
etykiety=c("Type")
#kruskal
#w parnokruskal to co zostanie nie wysłane do isoMDS(daisy) np. parnokruskal=c("Type")
not2kruskal=c()
#atrybut pojedyńczy do zescorowania po jego dyskretnych wartościach
vecfactorzesc=c("Type")
#atrybuty w plotMDS do kolorowania powstałych punktów
zmiennain<-names(DataSet)
#zmienne drzewa
#zmienne, które nie wchodzą do drzewa m.in. jego liście, target, cel optymalizacji drzewa
not2tree=c("Type")
#liść drzewa, etykieta
liscie=c("Type")
#zmienne zależne i inne zbędne w regresji
zalezne=c("Type","RI")
#wybieramy zmienną zależną, target dla regresji, zwykle zmiennoprzecinkowy
zmiennaout<-"RI"
}
##########################################################################################################
if(nData=="nihills"){
etykiety=c()
#kruskal
not2kruskal=c()
zmiennain<-names(DataSet)
#zmienne drzewa
not2tree=c('time')
liscie=c('time')
#zmienne zależne i inne zbędne w regresji
zalezne=c("time")
zmiennaout<-"time"
}
##########################################################################################################
if(nData=="photocar"){
etykiety=c("group","event","tumor")
vecfactorzesc=c("event")
#kruskal
not2kruskal=c()
zmiennain<-names(DataSet)
#zmienne drzewa
not2tree=c("group")
liscie=c("group")
#zmienne zależne i inne zbędne w regresji
#parvecnolm=c("time","group","tumor","event")
zalezne=c("time")
zmiennaout<-"time"
}
##########################################################################################################
if(nData=="meatspec"){
etykiety=c("")
vecfactorzesc=c("")
#kruskal
not2kruskal=c()
zmiennain<-names(DataSet)
#zmienne drzewa
not2tree=c("fat")
liscie=c("fat")
#zmienne zależne i inne zbędne w regresji
zalezne=c("fat")
zmiennaout<-"fat"
}
##########################################################################################################
if(nData=="skoliozaNIL1.csv"){
etykiety=c()
vecfactorzesc=c()
#kruskal
not2kruskal=c()
zmiennain<-names(DataSet)
#zmienne drzewa
not2tree=c("NI")
liscie=c("NI")
#zmienne zależne i inne zbędne w regresji
zalezne=c("NI")
zmiennaout<-"NI"
}
|
ba3dac098563835e35bd1742af3b76b3a083a34d | 9818edb917a1244192e03076b920114518715e8a | /Financial-Risk-Modelling-and-Portfolio-Optimization-with-R/source-code/Part3Chapter4Ex9.R | 3429b9208ee83a8f186936bca2eb37b93b93f6dd | [
"MIT"
] | permissive | rivu-basu/R_for_Quantitative_Finance | 927d06495a1668e32ec0671b7b88ba72e6bdf183 | f4c78c547c28408cc0f859630ebe57f2fb61b6c8 | refs/heads/master | 2020-05-21T17:07:27.580327 | 2014-08-29T03:25:52 | 2014-08-29T03:25:52 | 186,116,962 | 1 | 0 | MIT | 2019-05-11T10:07:26 | 2019-05-11T10:07:25 | null | UTF-8 | R | false | false | 1,014 | r | Part3Chapter4Ex9.R | ## Defining portfolio specifications
SSTPrior <- function(x, spec = NULL, ...){
list(mu = c(MSTfit@fit$beta), Sigma = MSTfit@fit$Omega)
}
BlCopPost <- function(x, spec = NULL, ...){
Sim <- CopPost@posteriorSims
list(mu = colMeans(Sim), Sigma = cov(Sim))
}
## Skewed Student's t
MSPriorSST <- portfolioSpec()
setEstimator(MSPriorSST) <- "SSTPrior"
## BLCOP specification
MSBlCop <- portfolioSpec()
setEstimator(MSBlCop) <- "BlCopPost"
## Tangency portfolios
R <- as.timeSeries(R)
BLR <- PostDist[[27]]
PSpecs <- list(MSPrior, MSBl, MSPriorSST, MSBlCop)
POpt <- lapply(PSpecs, function(x)
tangencyPortfolio(data = R, spec = x,
constraints = BoxC)
)
PWeights <- unlist(lapply(POpt, getWeights))
Weights <- matrix(PWeights, ncol = NAssets, nrow = 4,
byrow = TRUE) * 100
colnames(Weights) <- ANames
rownames(Weights) <- c("Gauss", "Skewed Student's t",
"BL", "BLCop")
Weights
|
52346b48647366c39e26983a3cabc4668d9be7e7 | ec7d134a1337e3542e98032b8ef1b56caf95a25c | /phd/codes/similarity calculation.R | a7fdd9f1baf0c05c6a4d2ee908d9fc705123742f | [] | no_license | sylphs22/my-phd-work | 007ec88de63a265899245bef7d29c5b5a5fbaecb | e9b25ced9addd60bfe07afa95a59037ea9a6b50f | refs/heads/master | 2020-03-19T15:59:01.825169 | 2018-06-09T06:09:57 | 2018-06-09T06:09:57 | 136,694,540 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,824 | r | similarity calculation.R | #data=read.table("C:/Users/bip/Documents/Visual Studio 2012/Projects/recommend/recommend/dump1.txt",header=F)
data=read.table("C:/Users/bip/Desktop/Abhishek/95_1000_diff_sparse/95_1000_diff_sparse/95_1000_pers.txt",header=F)
#data=data[,36:47]#needs
data=data[,48:52]#values
#data=data[,1:35]#Big5
#d=as.matrix(dist(data, method = "euclidean", diag = FALSE, upper = T, p = 2))
#similarity1=1/d
#similarity=ifelse(similarity1=='Inf',0,similarity1)
#write.table(similarity1,row.names=FALSE,col.names=FALSE, "C:/Users/bip/Documents/Visual Studio 2012/Projects/recommend/recommend/similarity1.txt", sep="\t")
######################cosine###########################
library(lsa)
data=t(data)
data=as.matrix(data)
similarity<-cosine(data)
similarity=ifelse(similarity==1,0,similarity)
similarity_cos=ifelse(similarity=='NA',0,similarity)
write.table(similarity,row.names=FALSE,col.names=FALSE, "C:/Users/bip/Documents/Visual Studio 2012/Projects/recommend/recommend/similarity.txt", sep="\t")
############pearson correlation##############################
#data=t(data)
#d=cor(data,use="complete.obs")
#d[is.na(d)] <- 0
#d[d == 1] <- 0
#write.table(d,row.names=FALSE,col.names=FALSE, "C:/Users/bip/Documents/Visual Studio 2012/Projects/recommend/recommend/similarity.txt", sep="\t")
#library(akmeans)
set.seed(3)
clusters<-kmeans(data,4)
#clusters<-norm.sim.ksc(data,4)
u<-0:94
cl<-clusters$cluster
mydata <- cbind(data,cl,u)
uc<-mydata[,6:7]
uc=data.table(uc,key=c("u"))
Final2 <- Final1[uc, nomatch=0]
cl1<-subset(Final2,cl==1)
cl2<-subset(Final2,cl==2)
cl3<-subset(Final2,cl==3)
cl4<-subset(Final2,cl==4)
#cl5<-subset(Final2,cl==5)
RMSE1<-mean(cl1$MSE)
sqrt(RMSE1)
RMSE2<-mean(cl2$MSE)
sqrt(RMSE2)
RMSE3<-mean(cl3$MSE)
sqrt(RMSE3)
RMSE4<-mean(cl4$MSE)
sqrt(RMSE4)
#mean(cl5$MAE)
|
a79ed2c9eba4755dc6fff89895bced0e350c0bf4 | d1b1cead5e9525fbfec5b7df989ebc0a09c8d782 | /man/getAttributesColumnHeaders.Rd | 5bd87d913a9c0d19ab99cfcf32194e6f3f8e4698 | [] | no_license | AmundsenJunior/pfsrsdk | 0ce8195f9d9a96562d31992f44303ee151bd4111 | d799defb9447a4e70cb2906205f6023020fc621a | refs/heads/master | 2020-06-06T00:24:06.491018 | 2019-07-15T16:59:12 | 2019-07-15T20:37:59 | 192,584,459 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,575 | rd | getAttributesColumnHeaders.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getAttributesColumnHeaders.R
\name{getAttributesColumnHeaders}
\alias{getAttributesColumnHeaders}
\title{getAttributesColumnHeaders - Gets the attribute column header for all or specified attributes in an entityType.
\code{getAttributesColumnHeaders} Gets the attribute column header for all or specified attributes in an entityType.}
\usage{
getAttributesColumnHeaders(coreApi, attributeList = NULL, entityType,
...)
}
\arguments{
\item{attributeList}{list of attribute names (usually obtained through getEntityMetadata) to limit the column header names. Default = NULL.}
\item{entityType}{entity type which has the desired attribute column headers assigned}
\item{...}{additional arguments passed to \code{apiGET}}
\item{coreAPI}{coreApi object with valid jsessionid}
}
\value{
List of length 2, containing \code{entity} and \code{response} objects:
\itemize{
\item{\code{entity}} is a character element with associated column header names.
\item{\code{response}} is the entire HTTP response.
}
}
\description{
\code{getAttributesColumnHeaders} - Gets the attribute column header for all or specified attributes in an entityType.
}
\examples{
\dontrun{
api <- coreAPI("PATH TO JSON FILE")
login <- authBasic(api)
experimentAssayType <- getAttributesColumnHeaders(login$coreApi, "CI_TEMPERATURE", "BEER")
logOut(login$coreApi)
}
}
\author{
Edgardo Gutierrez edgardo.gutierrez@thermofisher.com
Natasha Mora natasha.mora@thermofisher.com
Francisco Marin francisco.marin@thermofisher.com
}
|
e03a2fe1f44965f2b03ae0d8b184d16e7607bd5c | 3ad3a2b20fb23eaa6e3d6934007207914ae46e6f | /man/box_plot_max_weekly_cases.Rd | dbe83827e190d91fa77cdcb558f40d68f24340bb | [
"MIT"
] | permissive | dachuwu/ringbp | f44de5d3eb2cc4df7a83fd7ab6e85acd1db6ecd5 | a48964b10276d8673d5560ec7f6081e055446dbc | refs/heads/master | 2022-06-26T23:39:34.745345 | 2020-05-11T05:55:00 | 2020-05-11T05:55:00 | 261,081,295 | 0 | 0 | NOASSERTION | 2020-05-04T04:36:09 | 2020-05-04T04:36:08 | null | UTF-8 | R | false | true | 1,704 | rd | box_plot_max_weekly_cases.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/box_plot_max_weekly_cases.R
\name{box_plot_max_weekly_cases}
\alias{box_plot_max_weekly_cases}
\title{Create box plots of maximum weekly cases by scenario}
\usage{
box_plot_max_weekly_cases(
results = NULL,
cap_cases = 5000,
extinct_thresold = 0.1,
theta_value = "15\%",
prop_asym = 0,
facet_scales = "fixed",
filt_control_effectiveness = 0.4,
flip_coords = FALSE,
num_initial_cases = 20,
record_params = FALSE,
y_lim = NULL
)
}
\arguments{
\item{results}{results of the branching model in a data.frame or tibble}
\item{cap_cases}{the maximimum number of cases per outbreak scenario; default is 5000}
\item{theta_value}{A Character string defaulting to "15\%". Determines the proportion of infections that occur prior to
sypmtom onset.}
\item{prop_asym}{A numeric string defaulting to 0. Filters the proportion of infectiouns are asymptomatic}
\item{facet_scales}{passed to facet_grid’s scales parameter; default is "fixed"}
\item{filt_control_effectiveness}{filters by the minimum control effectiveness proportion; default is 0.4}
\item{flip_coords}{flip coordinates of the box plot; default is FALSE}
\item{num_initial_cases}{filters by the number of initial cases in the scenario; default is 40}
\item{record_params}{option to display the params as a caption (used for testing); default FALSE}
\item{y_lim}{Numeric the limit of the y axis to show.}
\item{extinct_threshold}{filters the minimum proportion of simulations that become extinct per scenario; default 0.8}
}
\description{
Create box plots of maximum weekly cases by scenario
}
\examples{
}
\author{
Amy Gimma and Sam Abbott
}
|
c6062695f94877ee714602717d62a06f481cf6e2 | 52802ff28ca37aa7d028c1411b2e3948ecbf02f0 | /text_mining/tidy_text/Ch06_topic_modeling.R | e32906eb1a78007ecb0e2573f1a614ef9752cc5b | [] | no_license | PyRPy/ML_Py_Templates | a4cd06c5e0cc54ccf544269ae4bf3a8aece15b85 | 677d29207c459bbc9e89e0f1239a1792f128a413 | refs/heads/master | 2022-02-23T06:18:51.466170 | 2022-02-16T22:52:02 | 2022-02-16T22:52:02 | 167,294,425 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,569 | r | Ch06_topic_modeling.R | # -- Latent Dirichlet allocation ---
# In Chapter 5 we briefly introduced the AssociatedPress dataset provided by the topicmodels package,
# as an example of a DocumentTermMatrix. This is a collection of 2246 news articles from an American news
# agency, mostly published around 1988.
library(topicmodels)
data("AssociatedPress")
AssociatedPress
# set a seed so that the output of the model is predictable
# k = 2 , two topics
ap_lda <- LDA(AssociatedPress, k = 2, control = list(seed = 1234))
ap_lda
# Word-topic probabilities
# tidytext package provides method for extracting the per-topic-per-word probabilities, called ??
# ("beta"), from the model.
library(tidytext)
ap_topics <- tidy(ap_lda, matrix = "beta")
ap_topics
# use dplyr's top_n() to find the 10 terms that are most common within each topic
library(ggplot2)
library(dplyr)
ap_top_terms <- ap_topics %>%
group_by(topic) %>%
top_n(10, beta) %>%
ungroup() %>%
arrange(topic, -beta)
ap_top_terms %>%
mutate(term = reorder(term, beta)) %>%
ggplot(aes(term, beta, fill = factor(topic))) +
geom_col(show.legend = FALSE) +
facet_wrap(~ topic, scales = "free") +
coord_flip()
# consider the terms that had the greatest difference in ??between topic 1 and topic 2. This can be
# estimated based on the log ratio of the two
library(tidyr)
beta_spread <- ap_topics %>%
mutate(topic = paste0("topic", topic)) %>%
spread(topic, beta) %>%
filter(topic1 > .001 | topic2 > .001) %>%
mutate(log_ratio = log2(topic2 / topic1))
beta_spread
# Document-topic probabilities
# Besides estimating each topic as a mixture of words, LDA also models each document as a mixture of topics.
ap_documents <- tidy(ap_lda, matrix = "gamma")
ap_documents
# document 6 was drawn almost entirely from topic 2, having a ?? from topic 1 close to zero.
tidy(AssociatedPress) %>%
filter(document == 6) %>%
arrange(desc(count))
# --- Example: the great library heist ---
# This vandal has torn the books into individual chapters, and left them in one large pile. How can
# we restore these disorganized chapters to their original books?
titles <- c("Twenty Thousand Leagues under the Sea", "The War of the Worlds",
"Pride and Prejudice", "Great Expectations")
library(gutenbergr)
books <- gutenberg_works(title %in% titles) %>%
gutenberg_download(meta_fields = "title")
# divide these into chapters, use tidytext's unnest_tokens() to separate them into words, then remove stop_words
library(stringr)
# divide into documents, each representing one chapter
by_chapter <- books %>%
group_by(title) %>%
mutate(chapter = cumsum(str_detect(text, regex("^chapter ", ignore_case = TRUE)))) %>%
ungroup() %>%
filter(chapter > 0) %>%
unite(document, title, chapter)
# split into words
by_chapter_word <- by_chapter %>%
unnest_tokens(word, text)
# find document-word counts
word_counts <- by_chapter_word %>%
anti_join(stop_words) %>%
count(document, word, sort = TRUE) %>%
ungroup()
word_counts
# LDA on chapters
# word_counts is in a tidy form, with one-term-per-document-per-row, but the topicmodels package
# requires a DocumentTermMatrix
chapters_dtm <- word_counts %>%
cast_dtm(document, word, n)
chapters_dtm
# use the LDA() function to create a four-topic model. In this case we know we're looking for four
# topics because there are four books
chapters_lda <- LDA(chapters_dtm, k = 4, control = list(seed = 1234))
chapters_lda
chapter_topics <- tidy(chapters_lda, matrix = "beta")
chapter_topics
# the term "joe" has an almost zero probability of being generated from topics 1, 2, or 3,
# but it makes up 1.45% of topic 4
top_terms <- chapter_topics %>%
group_by(topic) %>%
top_n(5, beta) %>%
ungroup() %>%
arrange(topic, -beta)
top_terms
library(ggplot2)
top_terms %>%
mutate(term = reorder(term, beta)) %>%
ggplot(aes(term, beta, fill = factor(topic))) +
geom_col(show.legend = FALSE) +
facet_wrap(~ topic, scales = "free") +
coord_flip()
# Per-document classification
# Can we put the chapters back together in the correct books? We can find this by examining
# the per-document-per-topic probabilities, ??("gamma").
chapters_gamma <- tidy(chapters_lda, matrix = "gamma")
chapters_gamma
# Now that we have these topic probabilities, we can see how well our unsupervised learning
# did at distinguishing the four books.
chapters_gamma <- chapters_gamma %>%
separate(document, c("title", "chapter"), sep = "_", convert = TRUE)
chapters_gamma
# reorder titles in order of topic 1, topic 2, etc before plotting
chapters_gamma %>%
mutate(title = reorder(title, gamma * topic)) %>%
ggplot(aes(factor(topic), gamma)) +
geom_boxplot() +
facet_wrap(~ title)
# We notice that almost all of the chapters from Pride and Prejudice, War of the Worlds, and
# Twenty Thousand Leagues Under the Sea were uniquely identified as a single topic each.
# some chapters from Great Expectations (which should be topic 4) were somewhat associated
# with other topics.
chapter_classifications <- chapters_gamma %>%
group_by(title, chapter) %>%
top_n(1, gamma) %>%
ungroup()
chapter_classifications
# compare each to the "consensus" topic for each book (the most common topic among its chapters
book_topics <- chapter_classifications %>%
count(title, topic) %>%
group_by(title) %>%
top_n(1, n) %>%
ungroup() %>%
transmute(consensus = title, topic)
chapter_classifications %>%
inner_join(book_topics, by = "topic") %>%
filter(title != consensus)
# By word assignments: augment
# take the original document-word pairs and find which words in each document were
# assigned to which topic.
assignments <- augment(chapters_lda, data = chapters_dtm)
assignments
# combine this assignments table with the consensus book titles to find which words
# were incorrectly classified.
assignments <- assignments %>%
separate(document, c("title", "chapter"), sep = "_", convert = TRUE) %>%
inner_join(book_topics, by = c(".topic" = "topic"))
assignments
# visualize a confusion matrix, showing how often words from one book were assigned to
# another, using dplyr's count() and ggplot2's geom_tile
assignments %>%
count(title, consensus, wt = count) %>%
group_by(title) %>%
mutate(percent = n / sum(n)) %>%
ggplot(aes(consensus, title, fill = percent)) +
geom_tile() +
scale_fill_gradient2(high = "red") +
theme_minimal() +
theme(axis.text.x = element_text(angle = 90, hjust = 1),
panel.grid = element_blank()) +
labs(x = "Book words were assigned to",
y = "Book words came from",
fill = "% of assignments")
# We notice that almost all the words for Pride and Prejudice, Twenty Thousand Leagues
# Under the Sea, and War of the Worlds were correctly assigned, while Great Expectations
# had a fair number of misassigned words
# What were the most commonly mistaken words?
wrong_words <- assignments %>%
filter(title != consensus)
wrong_words
wrong_words %>%
count(title, consensus, term, wt = count) %>%
ungroup() %>%
arrange(desc(n))
# a number of words were often assigned to the Pride and Prejudice or War of the Worlds
# cluster even when they appeared in Great Expectations.
word_counts %>%
filter(word == "flopson")
# --- Alternative LDA implementations ---
# mallet package : takes non-tokenized documents and performs the tokenization itself,
# and requires a separate file of stopwords
library(mallet)
# create a vector with one string per chapter
collapsed <- by_chapter_word %>%
anti_join(stop_words, by = "word") %>%
mutate(word = str_replace(word, "'", "")) %>%
group_by(document) %>%
summarize(text = paste(word, collapse = " "))
# create an empty file of "stopwords"
file.create(empty_file <- tempfile())
docs <- mallet.import(collapsed$document, collapsed$text, empty_file)
mallet_model <- MalletLDA(num.topics = 4)
mallet_model$loadDocuments(docs)
mallet_model$train(100)
# Once the model is created, however, we can use the tidy() and augment() functions
# described in the rest of the chapter in an almost identical way.
# word-topic pairs
tidy(mallet_model)
# document-topic pairs
tidy(mallet_model, matrix = "gamma")
# column needs to be named "term" for "augment"
term_counts <- rename(word_counts, term = word)
augment(mallet_model, term_counts)
|
d61f5d31a176edcf627e7994f1d3ee7381cb3c4c | e1c533911bfe894b5819103b18b4e76fa182e038 | /metadata_workflow_Postgres_Postgis/scripts/write_metadata_EML.R | f24b99d009bd46dfb40b9809d7e706f4b4beb855 | [] | no_license | juldebar/R_Metadata | 2ea1abf7bca0739c342caef9650f8ea16d573d03 | 0c93dd844774eae99eca16e865c843a632fb9350 | refs/heads/master | 2020-03-21T07:30:28.044333 | 2019-08-22T12:12:12 | 2019-08-22T12:12:12 | 138,283,915 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,172 | r | write_metadata_EML.R | # DONE BY FOLLOWING ONLINE TUTORIAL https://github.com/ropensci/EML/blob/master/vignettes/creating-EML.Rmd
# https://github.com/ropensci/EML
# GOAL IS TO GENERATE EML FOR GBIF IPT : eg http://vmirdgbif-proto.mpl.ird.fr:8080/ipt/eml.do?r=ecoscope_observation_database&v=6.0
# download.file(url="http://vmirdgbif-proto.mpl.ird.fr:8080/ipt/eml.do?r=ecoscope_observation_database&v=6.0", destfile = "test_eml.xml", method="curl")
# f <- system.file("test_eml.xml", package = "EML")
# f <- system.file("/home/julien.barde/R/x86_64-pc-linux-gnu-library/3.3/EML/xsd/test/eml.xml", package = "EML")
# eml <- read_eml(f)
#write_metadata_OGC_19115
write_EML_metadata_from_Dublin_Core <- function(config = NULL,
metadata = NULL,
contacts_metadata = NULL,
spatial_metadata = NULL,
temporal_metadata = NULL,
keywords_metadata = NULL, # DATAFRAME WITH ALL (STATIC & DYNAMIC) KEYWORDS
urls_metadata= NULL # LIST OF DYNAMIC / COMMON URLs
)
{
#config shortcuts
con <- config$sdi$db$con
logger <- config$logger
logger.info <- config$logger.info
logger.warn <- config$logger.warn
logger.error <- config$logger.error
logger.info("----------------------------------------------------")
logger.info("EML: MAIN METADATA ELEMENTS")
logger.info("----------------------------------------------------")
pubDate <- as.character(as.Date(metadata$Date))
title <- metadata$Title
abstract <- metadata$Description
intellectualRights <- metadata$Rights
logger.info("----------------------------------------------------")
logger.info("DATA DICTIONNARY => TO BE DONE => MANAGE COMMON CODE TO GET DATA DICTIONNARY FROM FEATURE CATALOG")
logger.info("----------------------------------------------------")
#
# entityName => entityDescription / physical / attributeList
#################################################################################
# entityName=paste("../",local_subDirCSV,"/",static_metadata_dataset_name,".csv",sep="")
# entityName="east_pacific_ocean_catch_1954_10_01_2016_01_01_tunaatlasIRD_level1.csv"
# attributes
# factors <- "TO BE DONE"
# attributeList <- set_attributes(attributes, factors, col_classes = NULL)
# physical <- set_physical(objectName=entityName,numHeaderLines="1",fieldDelimiter=",")
# physical <- set_physical(entityName)
# class(entityName)
# dataTable <- new("dataTable",
# entityName = entityName,
# entityDescription = static_metadata_table_description,
# physical = physical,
# attributeList = attributeList)
# attributes <- data.frame(
# attributeName = c(
# "date",
# "geom",
# "species",
# "length"),
# attributeDefinition = c(
# "This column contient la date",
# "la position",
# "l'espèce",
# "la taille"),
# formatString = c(
# "YYYY-DDD-hhmm",
# "DD-MM-SS",
# NA,
# NA),
# definition = c(
# "which run number",
# NA,
# NA,
# NA),
# unit = c(
# NA,
# NA,
# NA,
# "meter"),
# numberType = c(
# NA,
# NA,
# NA,
# "real"),
# stringsAsFactors = FALSE
# )
#
# attributeList <- set_attributes(attributes, NA, col_classes = c("Date", "numeric", "character", "character"))
logger.info("----------------------------------------------------")
logger.info("Coverage metadata => TO BE DONE => geographicCoverage / temporalCoverage / taxonomicCoverage.")
logger.info("----------------------------------------------------")
# TO BE DONE => CHECK IF ONE COVERAGE PER SPECIES
if(is.null(temporal_metadata$dynamic_metadata_temporal_Extent)==FALSE){
start_date <- temporal_metadata$dynamic_metadata_temporal_Extent$start_date
end_date <- temporal_metadata$dynamic_metadata_temporal_Extent$end_date
}
coverage <- set_coverage(begin = as.character(as.Date(start_date)),
end = as.character(as.Date(end_date)),
sci_names = "Sarracenia purpurea", # TO BE DONE CHECK THE MEANING AND USE => taxonomicCoverage !!!
geographicDescription = "geographic_identifier", # TO BE DONE REMOVE i
west = spatial_metadata$dynamic_metadata_spatial_Extent$xmin,
east = spatial_metadata$dynamic_metadata_spatial_Extent$ymax,
north = spatial_metadata$dynamic_metadata_spatial_Extent$xmax,
south = spatial_metadata$dynamic_metadata_spatial_Extent$ymin,
altitudeMin = 0, # TO BE DONE
altitudeMaximum = 0, # TO BE DONE
altitudeUnits = "meter")
logger.info("Spatial and Temporal extent added!")
logger.info("----------------------------------------------------")
logger.info("Creating parties => TO BE DONE => MANAGE ALL CONTACTS IN A LOOP.")
logger.info("----------------------------------------------------")
contacts <- config$gsheets$contacts
number_row<-nrow(contacts_metadata$contacts_roles)
if(is.null(contacts_metadata$contacts_roles)==FALSE && number_row > 0){
for(i in 1:number_row){
if(contacts_metadata$contacts_roles$dataset[i]== metadata$Identifier){#@julien => condition inutile ?
the_contact <- contacts[contacts$electronicMailAddress%in%contacts_metadata$contacts_roles$contact[i],]
cat(the_contact$electronicMailAddress)
cat(contacts_metadata$contacts_roles$RoleCode[i])
HF_address <- new("address",
deliveryPoint = the_contact$deliveryPoint,
city = the_contact$city,
administrativeArea = the_contact$administrativeArea,
postalCode = the_contact$postalCode,
country = the_contact$country)
eml_contact_role <-NULL
eml_contact_role <- switch(contacts_metadata$contacts_roles$RoleCode[i],
"metadata" = "associatedParty",
"pointOfContact" = "contact",
"principalInvestigator" = "contact",
"publisher" = "contact",
"owner" = "contact",
"originator" = "contact"
)
new_eml_contact <- new(eml_contact_role,
individualName = paste(the_contact$Name,the_contact$firstname, sep=" "),
electronicMail = the_contact$electronicMailAddress,
address = HF_address,
organizationName = the_contact$organisationName,
phone = the_contact$voice)
}
if(is.null(eml_contact_role)){
logger.info("No mapping has been found for the role of the conctact !")
# the_contact <- contacts[contacts$electronicMailAddress%in%contacts_metadata$contacts_roles$contact[i],]
# cat(the_contact$electronicMailAddress)
# cat(contacts_metadata$contacts_roles$RoleCode[i])
}
}
}
logger.info("----------------------------------------------------")
logger.info("ADDING KEYWORDS")
logger.info("----------------------------------------------------")
# TO BE DONE => MANAGE PROPERLY KEYWORDS FOR SPECIES AS TAXONOMIC COVERAGE...")
if(is.null(keywords_metadata)==FALSE){
different_thesaurus <- unique(keywords_metadata$thesaurus)
number_thesaurus<-length(unique(different_thesaurus))
all_thesaurus <- vector("list",number_thesaurus)
# all_thesaurus <- c()
keywordSet <- c()
for(t in 1:number_thesaurus){
if(is.null(keywords_metadata)==FALSE){
number_row_kw<-nrow(keywords_metadata$all_keywords)
vector <- character(0)
for (i in 1:number_row_kw) {
if(keywords_metadata$all_keywords$thesaurus[i]==different_thesaurus[t] & !is.na(keywords_metadata$all_keywords$keyword[i])){
vector[[length(vector)+1]] <- keywords_metadata$all_keywords$keyword[i]
}
}
}
all_thesaurus <- new("keywordSet",
keywordThesaurus = different_thesaurus[t],
keyword = vector)
keywordSet[[t]] <- all_thesaurus
class(all_thesaurus)
}
}
logger.info("----------------------------------------------------")
logger.info("WRITE EML METADATA")
logger.info("----------------------------------------------------")
dataset <- new("dataset",
title = title,
creator = new_eml_contact,
pubDate = pubDate,
intellectualRights = intellectualRights,
abstract = abstract,
associatedParty = new_eml_contact,#@julien => select new_eml_contact where role=associatedParty
# associatedParty = new_eml_contact[new_eml_contact$eml_contact_role%in%"associatedParty",],
keywordSet = keywordSet,
coverage = coverage,
contact = new_eml_contact,
# methods = methods,
dataTable = NULL)
eml <- new("eml",
packageId = "toto-2619-425e-b8be-8deb6bc6094d", # from uuid::UUIDgenerate(),
system = "uuid", # type of identifier
dataset = dataset)
logger.info("----------------------------------------------------")
logger.info("EML metadata has been generated.")
logger.info("----------------------------------------------------")
return(eml)
} |
21a0142be26df3ccad837f75a438dc4db57edd83 | 74f428a05dc1aa31b2caad6b5da3cef2b90e1ed5 | /maldonado/R/function_brainstorms.R | 8b63fdb4dfadb35328fe3fe9c4a18db7acbbfa14 | [] | no_license | BiologicalDataAnalysis2019/2020 | e689d9401ea632b5033a7b2bea7654cefcfa246f | a1f587d39abd16b1edd18fc5292ab53b92f40f59 | refs/heads/master | 2023-06-19T06:54:38.173314 | 2020-11-20T22:38:50 | 2020-11-20T22:38:50 | 283,593,237 | 0 | 12 | null | 2020-12-01T21:50:04 | 2020-07-29T20:12:06 | HTML | UTF-8 | R | false | false | 665 | r | function_brainstorms.R | #' Calculate size ranges for each life stage of each species in database
#' Summarize which trap type in which each species was caught the most
#' Run a ttest or ANOVA on different parameters to determine significance
#'
#'
#'
#' Plot a frequency distribution graph for numbers of individuals at different sizes with individuals colored for season or time of year
#' Determine which stream factors most predicted occurance of a species
#' Pull records data for Mississippi amphibians and reptiles from multiple open-source datasets, sort to county level, and eliminate duplicates to produce a dataset from which Mississippi distribution maps could be generated |
32244d4caf646871e11b7b1dce0a96792004eea7 | 1502987a5a27865fc634b74b55f4a2c67f6d9a78 | /logistic_gwas_example/server.R | 2eb621da5b7d42407fc1e5383a04a685b7588458 | [] | no_license | syyang93/shiny_snp | 811b441f1f5fa29c4c8fa78a46ca823bb6d24303 | 00d3c00189c4013856281c1a3aa66b6f700edff6 | refs/heads/master | 2023-03-10T17:02:07.250461 | 2021-02-24T16:43:47 | 2021-02-24T16:43:47 | 285,067,739 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,619 | r | server.R | library(shiny)
library(shinythemes)
library(rsconnect)
library(ggplot2)
formatbeta = function(beta){
formatC(round(beta, 3), 3, format = "f")
}
formatpval = function(pval){
ifelse(pval > 0.001, format(round(pval, 3), nsmall = 3), formatC(pval, format = "e", digits = 2))
}
Sys.setlocale("LC_CTYPE", "en_US.UTF-8") # must specify encoding!
Sys.setlocale("LC_ALL", "English")
function(input, output) {
# plot(predictor, outcome)
# lm(outcome ~ predictor) %>% summary
df <- reactive ({
# Individuals with disease
# dis.0 = input$disease_0
# dis.1 = input$disease_1
# dis.2 = input$disease_2
dis.0 = 10
dis.1 = 50
dis.2 = 100
disease = rbind(data.frame(alleles = rep(0, dis.0), disease = rep(1, dis.0)), data.frame(alleles = rep(1, dis.1), disease = rep(1, dis.1)), data.frame(alleles = rep(2, dis.2), disease = rep(1, dis.2)))
# Individuals without disease
# nodis.0 = input$nodisease_0
# nodis.1 = input$nodisease_1
# nodis.2 = input$nodisease_2
nodis.0 = 100
nodis.1 = 50
nodis.2 = 10
nodisease = rbind(data.frame(alleles = rep(0, nodis.0), disease = rep(0, nodis.0)), data.frame(alleles = rep(1, nodis.1), disease = rep(0, nodis.1)), data.frame(alleles = rep(2, nodis.2), disease = rep(0, nodis.2)))
df = rbind(disease, nodisease)
df$ID = sample(1:nrow(df))
df = dplyr::select(df, ID, alleles, disease)
df = df[order(df$ID),]
})
output$downloadData <- downloadHandler(filename ="simulated_data.csv",
content = function(file){
write.csv(print(df()), file, row.names = F)})
output$lm <- renderText({
paste0('Estimates from logistic regression:')
})
output$text <- renderUI({
data = df()
lm.fit = coef(summary(glm(disease ~ alleles, data = data, family = 'binomial')))
str1 <- paste("Effect estimate (Points gained per hour studied):", formatbeta(lm.fit[2,1]))
str2 <- paste("Standard error (Reliability of estimate):", formatbeta(lm.fit[2,2]))
str3 <- paste("P-value (Significance of association):", formatpval(lm.fit[2,4]))
HTML(paste(str1, str2, str3, sep = '<br/>'))
})
output$showPlot <- renderPlot({
data = df()
p = ggplot(data, aes(alleles, disease)) + stat_smooth(method="glm", method.args=list(family="binomial"), se=FALSE) +
geom_jitter(width = 0.1, height = 0.1) +
xlab('Number of disease alleles') +
scale_x_continuous(breaks=c(0, 1, 2)) +
scale_y_continuous(breaks=c(0, 1)) + ylab('Disease status')
print(p)
})
}
|
07ad5f53bb17fa7d009410d1554b83a6b340b074 | c05891fe5cdbd620b4e1f6fa782c7beb8f9c2a07 | /R/08asBibEntry.R | 6efb774da8f45a907bbabdc5d981a40e6b119bae | [] | no_license | aurora-mareviv/RefManageR | 03e7d165cc587609db3069478c5f7639f08a13d6 | b9adf6175fd9effb2065b2519bc9fc4de9c30a1f | refs/heads/master | 2021-01-14T13:17:28.766337 | 2014-08-18T00:00:00 | 2014-08-18T00:00:00 | 25,426,581 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,290 | r | 08asBibEntry.R | #' Coerce to a BibEntry object
#'
#' Functions to check if an object is a BibEntry, or coerce it if possible.
#'
#' @param x any \code{R} object.
#' @details \code{as.BibEntry} is able to coerce suitably formatted character vectors, \code{\link{bibentry}} objects, lists,
#' and data.frames to BibEntry objects. See the examples.
#' @note Each entry to be coerced should have a bibtype, key, and all required fields for the specified bibtype.
#' @return \code{as.BibEntry} - if successful, an object of class BibEntry.
#' @aliases is.BibEntry
#' @keywords utilities
#' @export
#' @seealso \code{\link{BibEntry}}
#' @examples
#' file.name <- system.file("Bib", "biblatexExamples.bib", package="RefManageR")
#' bib <- suppressMessages(ReadBib(file.name))[[20:21]]
#' identical(as.BibEntry(unlist(bib)), bib) ## see also RelistBibEntry
#'
#' identical(as.BibEntry(unclass(bib)), bib)
#'
#' identical(as.BibEntry(as.data.frame(bib)), bib)
#'
#' bib <- c(bibtype = "article", key = "mclean2014", title = "My New Article",
#' author = "Mathew W. McLean", journaltitle = "The Journal", date = "2014-01")
#' as.BibEntry(bib)
#'
#' bib <- bibentry(bibtype = "article", key = "mclean2014", title = "My New Article",
#' journal = "The Journal", year = 2014, author = "Mathew W. McLean")
#' print(bib, .bibstyle = "JSS")
#' as.BibEntry(bib)
#'
#' bib <- list(c(bibtype = "article", key = "mclean2014a", title = "My New Article",
#' author = "Mathew W. McLean", journaltitle = "The Journal", date = "2014-01"),
#' c(bibtype = "article", key = "mclean2014b", title = "Newer Article",
#' author = "Mathew W. McLean", journaltitle = "The Journal", date = "2014-02"))
#' as.BibEntry(bib)
as.BibEntry <- function(x){
if (!length(x))
return(x)
if (inherits(x, 'BibEntry')){
class(x) <- c('BibEntry', 'bibentry')
}else if (inherits(x, 'bibentry')){
att <- attributes(x)
x <- lapply(unclass(x), function(y){
attr(y, "dateobj") <- ProcessDates(y)
if (!length(attr(y, "key")))
attr(y, "key") <- CreateBibKey(y[['title']], y[['author']], y[['year']])
check <- try(.BibEntryCheckBibEntry1(y), TRUE)
if (inherits(check, 'try-error')){
message(paste0('Ignoring entry titled \"', y[['title']], '\" because ', strsplit(check, '\\n[[:space:]]*')[[1]][2]))
return(NULL)
}
y
})
x <- x[!sapply(x, is.null)]
if (length(x)){
attributes(x) <- att
class(x) <- c('BibEntry', 'bibentry')
}
}else if (is.character(x)){
if (is.na(x['bibtype']) || is.na(x['key']))
stop("Object of class character must have entries named bibtype and key.")
x <- as.list(x)
attr(x, 'entry') <- x$bibtype
attr(x, 'key') <- x$key
x$bibtype <- NULL
x$key <- NULL
x <- MakeBibEntry(x, FALSE)
}else if(is.data.frame(x)){
.fields <- colnames(x)
if (is.null(x$bibtype))
stop("data.frame must have column for 'bibtype'.")
keys <- rownames(x)
if (keys[1L] == '1')
warning('rownames of data.frame not meaningful for creating keys')
y <- vector('list', length(x))
for (i in seq_len(nrow(x))){
na.ind <- which(!is.na(x[i, ]))
y[[i]] <- as.BibEntry(c(setNames(as.character(x[i, na.ind]), .fields[na.ind]), key = keys[i]) )
}
y <- MakeCitationList(y)
return(y)
}else if(is.list(x)){
if(length(x) == 1L && !is.null(attr(x, 'bibtype'))){
class(x) <- c('BibEntry', 'bibentry')
}else if (!is.null(x$dateobj)){ # x has been unlist'ed
x <- RelistBibEntry(x)
}else if (!is.null(attr(x[[1L]], 'bibtype'))){ # x simply unclass'ed
class(x) <- c('BibEntry', 'bibentry')
}else{
if (length(x[[1L]]) == 1L){
x <- do.call(BibEntry, x)
}else{
x <- sapply(x, function(...) do.call(BibEntry, as.list(...)))
class(x) <- c("BibEntry", "bibentry")
}
}
}else{
stop(paste0("Cannot coerce object of class '", class(x), "' to BibEntry"))
}
return(x)
}
#' @rdname as.BibEntry
#' @return \code{is.BibEntry} - logical; \code{TRUE} if \code{x} is a BibEntry object.
#' @export
is.BibEntry <- function(x){
inherits(x, "BibEntry")
} |
cf3a152d9638f9865fe93b68367d6da1f951a9e9 | a7d61ba51e7cae6001fc0778775f2852b641b59f | /tests/testthat/test_matrix_functions.R | 8c3ef47a98ae78179f2fcdd6e505ccabf3d461c0 | [
"MIT"
] | permissive | user01/uvadsi | 2a06dc1f4383dfd0ad49b0b807ca25b155812d5c | 07b5e4c86ffb6cd995ec61c2b95710f3deab1283 | refs/heads/master | 2021-01-11T04:13:35.981998 | 2016-11-12T00:03:28 | 2016-11-12T00:03:28 | 71,203,663 | 1 | 2 | null | 2016-11-12T00:03:28 | 2016-10-18T03:03:24 | R | UTF-8 | R | false | false | 4,828 | r | test_matrix_functions.R |
# Libraries
suppressPackageStartupMessages(library(purrr))
suppressPackageStartupMessages(library(dplyr))
# #############################################################################
# Column Selection
# #############################################################################
test_that("Get empty column", {
matrix(1:6, nrow = 2) %>%
get_col() %>%
expect_equal(matrix(1:6, nrow = 2))
})
test_that("Get one column index", {
matrix(1:6, nrow = 2) %>%
get_col(2) %>%
expect_equal(c(3, 4))
})
test_that("Get two columns indices", {
matrix(1:6, nrow = 2) %>%
get_col(c(2, 3)) %>%
expect_equal(matrix(3:6, nrow = 2))
})
test_that("Get one column name", {
matrix(1:6,
nrow = 2,
byrow = TRUE,
dimnames = list(c("a", "b"), c("x", "y", "z"))) %>%
get_col("z") %>%
expect_equal(c(a = 3, b = 6))
})
test_that("Get two column name", {
matrix(1:6,
nrow = 2,
byrow = TRUE,
dimnames = list(c("a", "b"), c("x", "y", "z"))) %>%
get_col(c("x", "z")) %>%
expect_equal(
matrix(c(1, 3, 4, 6),
nrow = 2,
byrow = TRUE,
dimnames = list(c("a", "b"), c("x", "z")))
)
})
# #############################################################################
# Row Selection
# #############################################################################
test_that("Get empty row", {
matrix(1:6, nrow = 3) %>%
get_row() %>%
expect_equal(matrix(1:6, nrow = 3))
})
test_that("Get one row index", {
matrix(1:6, nrow = 3) %>%
get_row(2) %>%
expect_equal(c(2, 5))
})
test_that("Get two row indices", {
matrix(1:6, nrow = 3) %>%
get_row(c(2, 3)) %>%
expect_equal(matrix(c(2, 3, 5, 6), nrow = 2))
})
test_that("Get one row name", {
matrix(1:6,
nrow = 3,
byrow = TRUE,
dimnames = list(c("a", "b", "c"), c("x", "y"))) %>%
get_row("b") %>%
expect_equal(c(x = 3, y = 4))
})
test_that("Get two row names", {
matrix(1:6,
nrow = 3,
byrow = TRUE,
dimnames = list(c("a", "b", "c"), c("x", "y"))) %>%
get_row(c("a", "c")) %>%
expect_equal(
matrix(c(1, 2, 5, 6),
nrow = 2,
byrow = TRUE,
dimnames = list(c("a", "c"), c("x", "y")))
)
})
# #############################################################################
# Set Column Names
# #############################################################################
test_that("Set Column Names 2", {
matrix(1:6, nrow = 3) %>%
set_colnames(c("a", "b")) %>%
expect_equal(
matrix(1:6,
nrow = 3,
dimnames = list(NULL, c("a", "b")))
)
})
test_that("Set Column Names 3", {
matrix(1:6, nrow = 2) %>%
set_colnames(c("a", "b", "c")) %>%
expect_equal(
matrix(1:6,
nrow = 2,
dimnames = list(NULL, c("a", "b", "c")))
)
})
test_that("Override Column Names", {
matrix(1:6, nrow = 2, dimnames = list(NULL, c("x", "y", "z"))) %>%
set_colnames(c("a", "b", "c")) %>%
expect_equal(
matrix(1:6,
nrow = 2,
dimnames = list(NULL, c("a", "b", "c")))
)
})
test_that("Set Column Names Clear", {
matrix(1:6,
nrow = 2,
dimnames = list(NULL, c("a", "b", "c"))) %>%
set_colnames(NULL) %>%
expect_equal(matrix(1:6, nrow = 2, dimnames = list(NULL, NULL)))
})
test_that("Set Column Names Clear Empty", {
matrix(1:6,
nrow = 2,
dimnames = list(NULL, c("a", "b", "c"))) %>%
set_colnames() %>%
expect_equal(matrix(1:6, nrow = 2, dimnames = list(NULL, NULL)))
})
# #############################################################################
# Set Row Names
# #############################################################################
test_that("Set Row Names 2", {
matrix(1:6, nrow = 2) %>%
set_rownames(c("a", "b")) %>%
expect_equal(
matrix(1:6,
nrow = 2,
dimnames = list(c("a", "b"), NULL))
)
})
test_that("Set Row Names 3", {
matrix(1:6, nrow = 3) %>%
set_rownames(c("a", "b", "c")) %>%
expect_equal(
matrix(1:6,
nrow = 3,
dimnames = list(c("a", "b", "c"), NULL))
)
})
test_that("Override Row Names", {
matrix(1:6, nrow = 3, dimnames = list(c("x", "y", "z"), NULL)) %>%
set_rownames(c("a", "b", "c")) %>%
expect_equal(
matrix(1:6,
nrow = 3,
dimnames = list(c("a", "b", "c"), NULL))
)
})
test_that("Set Row Names Clear", {
matrix(1:6,
nrow = 3,
dimnames = list(c("a", "b", "c"), NULL)) %>%
set_rownames(NULL) %>%
expect_equal(matrix(1:6, nrow = 3, dimnames = list(NULL, NULL)))
})
test_that("Set Row Names Empty", {
matrix(1:6,
nrow = 3,
dimnames = list(c("a", "b", "c"), NULL)) %>%
set_rownames() %>%
expect_equal(matrix(1:6, nrow = 3, dimnames = list(NULL, NULL)))
})
|
96ea39c0c8ea368dcc3a9acc9aff8f36ebda9d30 | 3474af6c604afd89a64b3a1a637f02384669dba7 | /man/scale_label_pictogram.Rd | 3d0f0d872bf145332ed502e8bdc61bfd8f9fe966 | [] | no_license | edwindj/waffle | 4dbb28d1aabaaa0a93502fa122fcb853400924dd | 1d076c55f30b1a5ad101679be726e5d90c86f91b | refs/heads/master | 2020-07-08T04:51:51.664029 | 2019-08-21T12:20:26 | 2019-08-21T12:20:26 | 203,570,123 | 1 | 1 | null | 2019-08-21T11:28:08 | 2019-08-21T11:28:07 | null | UTF-8 | R | false | true | 456 | rd | scale_label_pictogram.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/geom-pictogram.R
\name{scale_label_pictogram}
\alias{scale_label_pictogram}
\title{Used with geom_pictogram() to map Font Awesome fonts to labels}
\usage{
scale_label_pictogram(..., values, aesthetics = "label")
}
\arguments{
\item{...}{dots}
\item{values}{values}
\item{aesthetics}{aesthetics}
}
\description{
Used with geom_pictogram() to map Font Awesome fonts to labels
}
|
b43df4cdb286ddbb8bab36d3dcfa2363f6fe8bae | b33611762071f9277bf18d712d3beaddb1683788 | /man/clumper.Rd | a41ef6f1ce3c0a9037cdc3fe489109507116e5b3 | [] | no_license | fxcebx/soundgen | abc6bb7d7aded02e11fe6bd88cb058ca0947f75f | 2d8ae67893509bd29d132aaa04c0e9385879ddd9 | refs/heads/master | 2020-09-06T21:51:23.464374 | 2019-10-31T17:43:11 | 2019-10-31T17:43:11 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,391 | rd | clumper.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilities_math.R
\name{clumper}
\alias{clumper}
\title{Clump a sequence into large segments}
\usage{
clumper(s, minLength)
}
\arguments{
\item{s}{a vector (soundgen supplies integers, but \code{clumper} also works
on a vector of floats, characters or booleans)}
\item{minLength}{an integer or vector of integers indicating the desired
length of a segment at each position (can vary with time, e.g., if we are
processing pitch_per_gc values)}
}
\value{
Returns the original sequence s transformed to homogeneous segments
of required length.
}
\description{
Internal soundgen function.
}
\details{
\code{clumper} makes sure each homogeneous segment in a sequence is at least
minLength long. Called by getIntegerRandomWalk() and getVocalFry(). Algorithm:
go through the sequence once. If a short segment is encountered, it is pooled
with the previous one (i.e., the currently evaluated segment grows until it
is long enough, which may shorten the following segment). Finally, the last
segment is checked separately. This is CRUDE - a smart implementation is
pending!
}
\examples{
s = c(1,3,2,2,2,0,0,4,4,1,1,1,1,1,3,3)
soundgen:::clumper(s, 2)
soundgen:::clumper(s, 3)
soundgen:::clumper(s, seq(1, 3, length.out = length(s)))
soundgen:::clumper(c('a','a','a','b','b','c','c','c','a','c'), 4)
}
\keyword{internal}
|
71dc93fb38adc4ee06e8ee51df249aa6b4c3c893 | 2da2406aff1f6318cba7453db555c7ed4d2ea0d3 | /inst/snippet/t-robust-qq-fig.R | 38eea306efa6ae3d1afc5a9e77896e1c2764f089 | [] | no_license | rpruim/fastR2 | 4efe9742f56fe7fcee0ede1c1ec1203abb312f34 | d0fe0464ea6a6258b2414e4fcd59166eaf3103f8 | refs/heads/main | 2022-05-05T23:24:55.024994 | 2022-03-15T23:06:08 | 2022-03-15T23:06:08 | 3,821,177 | 11 | 8 | null | null | null | null | UTF-8 | R | false | false | 894 | r | t-robust-qq-fig.R | ExpSims <-
expand.grid(n = c(10, 20, 40, 80), rep = 1:2000) %>%
group_by(n, rep) %>%
mutate(
pval = pval(t.test(rexp(n), mu = 1)),
dist = paste0("Exp(1); n=", n))
TSims <-
expand.grid(n = c(10, 20, 40, 80), rep = 1:2000) %>%
group_by(n, rep) %>%
mutate(
pval = pval(t.test(rt(n, df = 3), mu = 0)),
dist = paste0("t(3); n=", n))
gf_qq( ~ pval, data = bind_rows(ExpSims, TSims),
distribution = qunif, geom = "line") %>%
gf_abline(slope = 1, intercept = 0, color = "red",
linetype = "dashed", alpha = 0.6) %>%
gf_facet_wrap( ~ dist, nrow = 2)
gf_qq( ~ pval, data = bind_rows(ExpSims, TSims), na.rm = TRUE,
distribution = qunif, geom = "line") %>%
gf_abline(slope = 1, intercept = 0, color = "red",
linetype = "dashed", alpha = 0.6) %>%
gf_lims(x = c(0, 0.2), y = c(0, 0.2)) %>%
gf_facet_wrap( ~ dist, nrow = 2)
|
bc45048a3e14c3297df7d0891659708104cb5f37 | 43b52e0ed2e181cf907d7ddde64c680b67ab654f | /tests/testthat.R | 69844d7ed717de95221ddb0a053fb5071ca9af52 | [] | no_license | purplezippo/quantler | c37e79322fced31d14c66fac34a47322e3d67eb4 | 2e4a2afa24148899c9861930c06108fcddb3b4ff | refs/heads/master | 2021-01-01T20:18:25.550381 | 2019-04-03T16:13:17 | 2019-04-03T16:13:17 | 98,804,327 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 174 | r | testthat.R | library(testthat)
library(quantler)
test_check("quantler")
test_that('maxdrawdown returns a list of two elements',{
expect_equal(length(maxdrawdown(runif(20))), 2)
})
|
c562d080ee6cc624e4820fd7a984d736c4612fc4 | f81c7bce55dd6321908785c030e9928187eda2e4 | /run_analysis.R | f640e74524f5403f7e1b2be2e66e55ed11d1b617 | [] | no_license | vijeshm/CourseraGettingAndCleaningDataCourseProject | 43dc032db0be790f2b7962699f80195e775be83c | 0eda27d99a3b04767e4fe6b89ed472e62eb14fe4 | refs/heads/master | 2021-01-10T19:54:31.445382 | 2014-09-21T17:17:16 | 2014-09-21T17:17:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,817 | r | run_analysis.R | # Descrption: This script is used to clean the Samsung Galaxy S smartphone dataset on Human Activities like walking, sitting, standing, laying etc.
# Author: mv.vijesh@gmail.com
# Dated: 10th September, 2014
# Before running this script, make sure that your working directory is the root of Samsung dataset.
#Step 1 - Merges the training and the test sets to create one data set.
# Read all the required Data
#Activity Index to Label Mapping
con <- file("activity_labels.txt")
activityLabelLines = readLines(con)
close.connection(con)
activityLabels <- list()
activityLabels <- lapply(activityLabelLines, function (e) { t <- strsplit(e, " "); activityLabels[[ t[[1]][1] ]] <- t[[1]][2] })
#Feature Index to Label Mapping
con <- file("features.txt")
featureLines = readLines(con)
close.connection(con)
featureLabels <- list()
featureLabels <- lapply(featureLines, function (e) { t <- strsplit(e, " "); featureLabels[[ t[[1]][1] ]] <- t[[1]][2] })
#Read and store training dataset
con <- file("train/Y_train.txt")
trainingActivities = readLines(con)
close.connection(con)
con <- file("train/X_train.txt")
trainingFeatureVectorsFile = readLines(con)
close.connection(con)
con <- file("train/subject_train.txt")
trainingSubjects = readLines(con)
close.connection(con)
#Read and store test dataset
con <- file("test/Y_test.txt")
testActivities = readLines(con)
close.connection(con)
con <- file("test/X_test.txt")
testFeatureVectorsFile = readLines(con)
close.connection(con)
con <- file("test/subject_test.txt")
testSubjects = readLines(con)
close.connection(con)
#Create a Master Data Set
masterActivities = append(trainingActivities, testActivities)
masterFeatureVectors = append(trainingFeatureVectorsFile, testFeatureVectorsFile)
masterSubjects = append(trainingSubjects, testSubjects)
masterDataFrame = data.frame(activities = masterActivities,
featureVectors = masterFeatureVectors,
subjects = masterSubjects)
#Step 2 - Extracts only the measurements on the mean and standard deviation for each measurement
featureVectorNames <- as.character(featureLabels)
meanStdIndices <- which(grepl("mean()", featureVectorNames) | grepl("std()", featureVectorNames))
#Do not alter the master data frame
meanStdDataFrame = data.frame(activities = masterDataFrame$activities,
subjects = masterDataFrame$subjects)
charFeatureVectors <- as.character(masterDataFrame$featureVectors)
#create an empty temporary data frame to store the feature vector values
featureMatrix <- matrix(data = numeric(), nrow = nrow(meanStdDataFrame), ncol = length(meanStdIndices))
#rbind the parsed vector values onto featureDataFrame
for(i in 1:length(charFeatureVectors)) {
charFeatureVector <- charFeatureVectors[i]
t <- strsplit(charFeatureVector, " +")[[1]]
t <- t[meanStdIndices + 1] #offset by 1 because the strsplit will have an empty string at the beginning
featureMatrix[i, ] <- as.numeric(t)
}
meanStdFeatureVectorNames <- featureVectorNames[meanStdIndices]
#create columns onto meanStdDataFrame
for(i in 1:length(meanStdFeatureVectorNames)) {
#Note that this will also accomplish step 4 - Appropriately labels the data set with descriptive variable names.
meanStdDataFrame[meanStdFeatureVectorNames[i]] <- featureMatrix[, i]
}
#Step 3 - Uses descriptive activity names to name the activities in the data set
#In this step, we rename the descriptive activities that the users are performing
meanStdDataFrame$activities <- as.character(sapply(meanStdDataFrame$activities, function(activityCode) { activityLabels[activityCode] }))
#Step 4 - Appropriately labels the data set with descriptive variable names - included as a part of step 2
#Step 5 - From the data set in step 4, creates a second, independent tidy data set with the average of each variable for each activity and each subject.
activitySubjectSplit <- split(meanStdDataFrame, list(meanStdDataFrame$activities, meanStdDataFrame$subjects))
summarizedData <- lapply(activitySubjectSplit, function(activitySubjectData) {
numericDataFrame <- activitySubjectData[, c(-1, -2)]
activitySubjectDataFrame <- data.frame(
activities = activitySubjectData$activities[[1]],
subjects = activitySubjectData$subjects[[1]])
meanDataFrame <- data.frame()
meanDataFrame <- rbind(meanDataFrame, as.numeric(colMeans(numericDataFrame)))
names(meanDataFrame) <- names(numericDataFrame)
cbind(activitySubjectDataFrame, meanDataFrame)
})
tidyData <- data.frame()
names(tidyData) = names(meanStdDataFrame)
for(activitySubjectMeans in summarizedData) {
tidyData <- rbind(tidyData, activitySubjectMeans[1, ])
}
write.table(tidyData, "tidyData.txt", row.names=FALSE) |
768ec5f96b3329b98750027e10051be49b0b701e | 01d1ad3dc81f0c01ea6315c70175b2ac69d166fa | /Rcode/GJNodeStatsforTrain and Test.R | 255a81ec8212e167d48fe924994c48ddaf63bed0 | [] | no_license | estellad/Tree-of-Forests | 19133fa629647c1f5d567cef881ecb901b31f267 | 77ad278fe42eec32eaad5c9a0eb5a13ae9b63f64 | refs/heads/main | 2023-04-26T14:33:23.797371 | 2021-05-22T19:15:22 | 2021-05-22T19:15:22 | 300,429,412 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,033 | r | GJNodeStatsforTrain and Test.R | # Getting some Group 1 from Test set
# GENERAL SETTINGS
# require(tcltk)
# # Set the folder path for where the normal and cancer txt files are.
# ReturnVal <- tkmessageBox(title = "Step 1", message = "Please select the directory for the train set", icon = "info", type = "ok")
# direct1 <- tclvalue(tkchooseDirectory(initialdir= ##'Z:/QTP/Martial/LDO2017/TestSet/Testsubset'))
# 'C:/Users/edong/Desktop/DysplasiaGJtxt'))
# fileList <- list.files(path = direct1, pattern=".txt")
require(tcltk)
# Set the folder path for where the normal and cancer txt files are.
ReturnVal1 <- tkmessageBox(title = "Step 1", message = "Please select the directory for the train Cancer G1 set", icon = "info", type = "ok")
direct1 <- tclvalue(tkchooseDirectory(initialdir= ##'Z:/QTP/Martial/LDO2017/TestSet/Testsubset'))
'Z:/QTP/Martial/LDO2017/TrainSet/Cancertxt'))
fileList1 <- list.files(path = direct1, pattern=".txt")
require(tcltk)
# Set the folder path for where the normal and cancer txt files are.
ReturnVal2 <- tkmessageBox(title = "Step 2", message = "Please select the directory for the train Cancer G9 set", icon = "info", type = "ok")
direct2 <- tclvalue(tkchooseDirectory(initialdir= ##'Z:/QTP/Martial/LDO2017/TestSet/Testsubset'))
'Z:/QTP/Martial/LDO2017/TrainSet/Cancer9txt'))
fileList2 <- list.files(path = direct2, pattern=".txt")
################################################ Ready in training data ########################################
library(caret)
library(randomForest)
# Useful features.
colnm <- c("GROUP", "area", "DNA_Amount", morph, photo, discrete, markovian, fractal, runlength)
l <- length(fileList1)
Original_NumGood = vector("numeric",l)
Original_NumJunk = vector("numeric",l)
Processed_NumGood = vector("numeric",l)
Processed_NumJunk = vector("numeric",l)
Node0TP = vector("numeric",l)
Node0FP <- vector("numeric",l)
Node0TN <- vector("numeric",l)
Node0FN <- vector("numeric",l)
Layer1_Sensitivity = vector("numeric",l)
Layer1_Specificity <- vector("numeric",l)
Layer1_Precision <- vector("numeric",l)
Node1TP = vector("numeric",l)
Node1FP <- vector("numeric",l)
Node1TN <- vector("numeric",l)
Node1FN <- vector("numeric",l)
Node2TP = vector("numeric",l)
Node2FP <- vector("numeric",l)
Node2TN <- vector("numeric",l)
Node2FN <- vector("numeric",l)
Layer2_Sensitivity = vector("numeric",l)
Layer2_Specificity <- vector("numeric",l)
Layer2_Precision <- vector("numeric",l)
Node3TP = vector("numeric",l)
Node3FP <- vector("numeric",l)
Node3TN <- vector("numeric",l)
Node3FN <- vector("numeric",l)
Node4TP = vector("numeric",l)
Node4FP <- vector("numeric",l)
Node4TN <- vector("numeric",l)
Node4FN <- vector("numeric",l)
Node5TP = vector("numeric",l)
Node5FP <- vector("numeric",l)
Node5TN <- vector("numeric",l)
Node5FN <- vector("numeric",l)
Node6TP = vector("numeric",l)
Node6FP <- vector("numeric",l)
Node6TN <- vector("numeric",l)
Node6FN <- vector("numeric",l)
Layer3_Sensitivity = vector("numeric",l)
Layer3_Specificity <- vector("numeric",l)
Layer3_Precision <- vector("numeric",l)
for(i in 1:l) {
# setwd(direct1)
# filename = fileList[i]
# dat <- as.data.frame(read.table(filename, header=TRUE, sep = "" ))
# if (colnames(dat)[1] == "UnitId"){
# dat <- subset(dat, select = -UnitId)
# }
# dat <- dat[, colnm]
# as.factor(dat$GROUP);
# good_ind <- which(dat$GROUP == "1")
# Original_NumGood[i] <- length(good_ind)
# dat$GROUP[good_ind] <- "good"
#
# junk_ind <- which(dat$GROUP == "9")
# Original_NumJunk[i] <- length(junk_ind)
# dat$GROUP[junk_ind] <- "junk"
# colnames(dat)[which(colnames(dat) == "GROUP")] <- "Ycol"
setwd(direct1)
filename1 = fileList1[i]
dat1 <- as.data.frame(read.table(filename1, header=TRUE, sep = "" ))
if (colnames(dat1)[1] == "UnitId"){
dat1 <- subset(dat1, select = -UnitId)
}
setwd(direct2)
filename2 = fileList2[i]
if(filename1 != filename2) {
print("G1/G9 text file title not matched!!! Stop NOW")
}
dat2 <- as.data.frame(read.table(filename2, header=TRUE, sep = "" ))
if (colnames(dat2)[1] == "UnitId"){
dat2 <- subset(dat2, select = -UnitId)
}
dat1 <- dat1[, colnm]; dat2 <- dat2[, colnm]
Original_NumGood[i] <- nrow(dat1)
dat1$GROUP <- "good"
Original_NumJunk[i] <- nrow(dat2)
dat2$GROUP <- "junk"
dat <- rbind(dat1, dat2)
colnames(dat)[which(colnames(dat) == "GROUP")] <- "Ycol"
################### Processing Test Data #########################################
# remove objects with small fractal dimension: the criterion is fractal_dimen <= 1.5
n1 <- nrow(dat)
fractal_dimen <- dat$fractal_dimen
F <- any(fractal_dimen <= 1.5)
if (F==TRUE){
print("Warning: cells with small fractal dimension detected. The rows with fractal_dimen <= 1.5 will be removed to proceed with the algorithm")
Smallf <- which(fractal_dimen <= 1.5, arr.ind=TRUE)
dat <- dat[-Smallf,]
rm(fractal_dimen); rm(Smallf); rm(F)
n2 <- nrow(dat)
print(paste((n1-n2), "rows with small fractal_dimen <= 1.5 have been deleted."))
}
# remove objects with abnormally large background: the criterion is DNA_Index >=5
n1 <- nrow(dat)
DNA_Index <- dat$DNA_Index
D <- any(DNA_Index>=5)
if (D==TRUE){
print("Warning: cells with large background detected. The rows with DNA_Index >= 5 will be removed to proceed with the algorithm")
LargeD <- which(DNA_Index>=5, arr.ind=TRUE)
dat <- dat[-LargeD,]
rm(DNA_Index); rm(LargeD); rm(D)
n2 <- nrow(dat)
print(paste((n1-n2), "rows with large background (DNA_Index>=5) have been deleted."))
}
# remove some objects with very small area: the criterion is area <= 200
n1 <- nrow(dat)
area <- dat$area
s <- any(area<=200)
if(s==TRUE){
print("Warning: small area cells detected. The rows with area <= 200 will be removed to proceed with the algorithm")
small <- which(area <= 200, arr.ind = TRUE)
dat <- dat[-small, ]
rm(area); rm(small); rm(s)
n2 <- nrow(dat)
print(paste((n1-n2), "rows with samll areas (area<200) have been deleted."))
}
# remove objects with very large area: the criterion is area >= 4500
n1 <- nrow(dat)
area <- dat$area
L <- any(area>=4500)
if(L==TRUE){
print("Warning: Large area cells detected. The rows with area >= 4500 will be removed to proceed with the algorithm")
large <- which(area >= 4500, arr.ind = TRUE)
dat <- dat[-large,]
rm(large); rm(area); rm(L)
n2 <- nrow(dat)
print(paste((n1-n2), "rows with large areas (area>=4500) have been deleted."))
}
# remove objects with pale stain: DNA_Amount < 60
n1 <- nrow(dat)
DNA_Amount <- dat$DNA_Amount
P <- any(DNA_Amount<60)
if(P==TRUE){
print("Warning: Pale cells detected. The rows with DNA_Amount < 60 will be removed to proceed with the algorithm")
pale <- which(DNA_Amount < 60, arr.ind = TRUE)
dat <- dat[-pale,]
rm(pale); rm(DNA_Amount); rm(P)
n2 <- nrow(dat)
print(paste((n1-n2), "rows with pale stained (DNA_Amount < 60) have been deleted."))
}
########################## Different Node Classification ##########################
npgood <- table(dat$Ycol)[1]
Processed_NumGood[i] <- npgood
npjunk <- table(dat$Ycol)[2]
Processed_NumJunk[i] <- npjunk
# Predict
############## 0 -> 1,2
predprob0 <- predict(rf_goodjunkII, dat, type = "prob")[,1]
predgoodind0 <- which(predprob0>=0.5)
predjunkind0 <- which(predprob0<0.5)
realgoodind0 <- which(dat$Ycol == "good")
realjunkind0 <- which(dat$Ycol == "junk")
Node0TP[i] <- length(intersect(predgoodind0, realgoodind0))
Node0FP[i] <- length(intersect(predgoodind0, realjunkind0))
Node0TN[i] <- length(intersect(predjunkind0, realjunkind0))
Node0FN[i] <- length(intersect(predjunkind0, realgoodind0))
node1 <- dat[predgoodind0, ]
node2 <- dat[predjunkind0, ]
Layer1_Sensitivity[i] <- Node0TP[i] / (Node0TP[i] + Node0FN[i])
Layer1_Specificity[i] <- Node0TN[i] / (Node0TN[i] + Node0FP[i])
Layer1_Precision[i] <- Node0TP[i] / (Node0TP[i] + Node0FP[i])
############## 1 -> 3,4
predprob1 <- predict(rf_goodjunkIIGood, node1, type = "prob")[,1]
predgoodind1 <- which(predprob1>=0.5)
predjunkind1 <- which(predprob1<0.5)
realgoodind1 <- which(node1$Ycol == "good")
realjunkind1 <- which(node1$Ycol == "junk")
Node1TP[i] <- length(intersect(predgoodind1, realgoodind1))
Node1FP[i] <- length(intersect(predgoodind1, realjunkind1))
Node1TN[i] <- length(intersect(predjunkind1, realjunkind1))
Node1FN[i] <- length(intersect(predjunkind1, realgoodind1))
node3 <- node1[predgoodind1, ]
node4 <- node1[predjunkind1, ]
############## 2 -> 5,6
predprob2 <- predict(rf_goodjunkIIJunk, node2, type = "prob")[,1]
predgoodind2 <- which(predprob2>=0.5)
predjunkind2 <- which(predprob2<0.5)
realgoodind2 <- which(node2$Ycol == "good")
realjunkind2 <- which(node2$Ycol == "junk")
Node2TP[i] <- length(intersect(predgoodind2, realgoodind2))
Node2FP[i] <- length(intersect(predgoodind2, realjunkind2))
Node2TN[i] <- length(intersect(predjunkind2, realjunkind2))
Node2FN[i] <- length(intersect(predjunkind2, realgoodind2))
node5 <- node2[predgoodind2, ]
node6 <- node2[predjunkind2, ]
Layer2_Sensitivity[i] <- (Node1TP[i] + Node2TP[i]) / (Node1TP[i] + Node1FN[i] + Node2TP[i] + Node2FN[i])
Layer2_Specificity[i] <- (Node1TN[i] + Node2TN[i]) / (Node1TN[i] + Node1FP[i] + Node2TN[i] + Node2FP[i])
Layer2_Precision[i] <- (Node1TP[i] + Node2TP[i]) / (Node1TP[i] + Node1FP[i] + Node2TP[i] + Node2FP[i])
############## 3 -> 7,8
predprob3 <- predict(rf_goodjunkIIGood_Good, node3, type = "prob")[,1]
predgoodind3 <- which(predprob3>=0.5)
predjunkind3 <- which(predprob3<0.5)
realgoodind3 <- which(node3$Ycol == "good")
realjunkind3 <- which(node3$Ycol == "junk")
Node3TP[i] <- length(intersect(predgoodind3, realgoodind3))
Node3FP[i] <- length(intersect(predgoodind3, realjunkind3))
Node3TN[i] <- length(intersect(predjunkind3, realjunkind3))
Node3FN[i] <- length(intersect(predjunkind3, realgoodind3))
node7 <- node3[predgoodind3, ]
node8 <- node3[predjunkind3, ]
############## 4 -> 9,10
predprob4 <- predict(rf_goodjunkIIGood_Junk, node4, type = "prob")[,1]
predgoodind4 <- which(predprob4>=0.5)
predjunkind4 <- which(predprob4<0.5)
realgoodind4 <- which(node4$Ycol == "good")
realjunkind4 <- which(node4$Ycol == "junk")
Node4TP[i] <- length(intersect(predgoodind4, realgoodind4))
Node4FP[i] <- length(intersect(predgoodind4, realjunkind4))
Node4TN[i] <- length(intersect(predjunkind4, realjunkind4))
Node4FN[i] <- length(intersect(predjunkind4, realgoodind4))
node9 <- node4[predgoodind4, ]
node10 <- node4[predjunkind4, ]
############## 5 -> 11,12
predprob5 <- predict(rf_goodjunkIIJunk_Good, node5, type = "prob")[,1]
predgoodind5 <- which(predprob5>=0.5)
predjunkind5 <- which(predprob5<0.5)
realgoodind5 <- which(node5$Ycol == "good")
realjunkind5 <- which(node5$Ycol == "junk")
Node5TP[i] <- length(intersect(predgoodind5, realgoodind5))
Node5FP[i] <- length(intersect(predgoodind5, realjunkind5))
Node5TN[i] <- length(intersect(predjunkind5, realjunkind5))
Node5FN[i] <- length(intersect(predjunkind5, realgoodind5))
node11 <- node5[predgoodind5, ]
node12 <- node5[predjunkind5, ]
############## 6 -> 13,14
predprob6 <- predict(rf_goodjunkIIJunk_Junk, node6, type = "prob")[,1]
predgoodind6 <- which(predprob6>=0.5)
predjunkind6 <- which(predprob6<0.5)
realgoodind6 <- which(node6$Ycol == "good")
realjunkind6 <- which(node6$Ycol == "junk")
Node6TP[i] <- length(intersect(predgoodind6, realgoodind6))
Node6FP[i] <- length(intersect(predgoodind6, realjunkind6))
Node6TN[i] <- length(intersect(predjunkind6, realjunkind6))
Node6FN[i] <- length(intersect(predjunkind6, realgoodind6))
node13 <- node6[predgoodind6, ]
node14 <- node6[predjunkind6, ]
Layer3_Sensitivity[i] <- (Node3TP[i] + Node4TP[i] + Node5TP[i] + Node6TP[i]) / (Node3TP[i] + Node3FN[i] + Node4TP[i] + Node4FN[i] + Node5TP[i] + Node5FN[i] + Node6TP[i] + Node6FN[i])
Layer3_Specificity[i] <- (Node3TN[i] + Node4TN[i] + Node5TN[i] + Node6TN[i]) / (Node3TN[i] + Node3FP[i] + Node4TN[i] + Node4FP[i] + Node5TN[i] + Node5FP[i] + Node6TN[i] + Node6FP[i])
Layer3_Precision[i] <- (Node3TP[i] + Node4TP[i] + Node5TP[i] + Node6TP[i]) / (Node3TP[i] + Node3FP[i] + Node4TP[i] + Node4FP[i] + Node5TP[i] + Node5FP[i] + Node6TP[i] + Node6FP[i])
}
Name_Numbers <- cbind(fileList1, Original_NumGood, Original_NumJunk, Processed_NumGood, Processed_NumJunk)
NodeTPFPTNFN <- cbind(Node0TP, Node0FP, Node0TN, Node0FN, Layer1_Sensitivity, Layer1_Specificity, Layer1_Precision,
Node1TP, Node1FP, Node1TN, Node1FN,
Node2TP, Node2FP, Node2TN, Node2FN, Layer2_Sensitivity, Layer2_Specificity, Layer2_Precision,
Node3TP, Node3FP, Node3TN, Node3FN,
Node4TP, Node4FP, Node4TN, Node4FN,
Node5TP, Node5FP, Node5TN, Node5FN,
Node6TP, Node6FP, Node6TN, Node6FN, Layer3_Sensitivity, Layer3_Specificity, Layer3_Precision)
SUM <- as.data.frame.matrix(cbind(Name_Numbers, NodeTPFPTNFN))
ReturnVal2 <- tkmessageBox(title = "Step 2", message = "Please select the directory to save the result", icon = "info", type = "ok")
fileOut <- tclvalue(tkgetSaveFile(initialfile ="NodeStats_Train_Cancer.csv", initialdir="Z:/QTP/Martial/LDO2017", filetypes = "{{csv file} {.csv}}"))
write.csv(SUM, fileOut)
|
17e8aaed114251d144c67b37882bf9bc6e09f0cb | 04a7e4899d9aac6d1dbb0c37a4c45e5edb4f1612 | /R/sr-pdf.R | 7219cdf6f7e4cb6d9f069f40ddd1b67d4ee928ef | [
"MIT"
] | permissive | pbs-assess/csasdown | 796ac3b6d30396a10ba482dfd67ec157d7deadba | 85cc4dda03d6513c11350f7f607cce1cacb6bf6a | refs/heads/main | 2023-08-16T17:22:18.050497 | 2023-08-16T00:35:31 | 2023-08-16T00:35:31 | 136,674,837 | 47 | 18 | NOASSERTION | 2023-06-20T01:45:07 | 2018-06-08T23:31:16 | R | UTF-8 | R | false | false | 2,259 | r | sr-pdf.R | #' @rdname csas_pdf
#' @export
sr_pdf <- function(latex_engine = "pdflatex",
prepub = FALSE,
copy_sty = TRUE,
line_nums = FALSE,
line_nums_mod = 1,
draft_watermark = FALSE,
highlight = "tango",
french = FALSE,
pandoc_args = c("--top-level-division=chapter",
"--wrap=none",
"--default-image-extension=png"),
...) {
fr <- function() if (french) TRUE else FALSE
themes <- c("pygments", "tango", "espresso",
"zenburn", "kate", "monochrome",
"breezedark", "haddock")
if(is.null(highlight)){
highlight = "monochrome"
}
if((!highlight %in% themes) && !file.exists(here(highlight))){
bail("in YAML, ", tag_color("csasdown:sr_pdf: highlight"),
" must be one of ",
csas_color(paste(themes, collapse = ", ")),
"\nor a filename for a custom latex theme file.",
"\nSee pandoc documentation, ",
csas_color("--highlight-style argument."))
}
if (fr()) {
file <- system.file("csas-tex", "sr-french.tex", package = "csasdown")
} else {
file <- system.file("csas-tex", "sr.tex", package = "csasdown")
}
base <- pdf_book(
template = file,
keep_tex = TRUE,
pandoc_args = pandoc_args,
latex_engine = latex_engine,
...
)
tmp_hl <- grep("--highlight-style", base$pandoc$args)
base$pandoc$args <- base$pandoc$args[-c(tmp_hl[1], tmp_hl[1] + 1)]
if (!class(line_nums_mod) %in% c("integer", "numeric")) {
bail(csas_color("line_nums_mod"), " must be a numeric or integer value.")
}
update_csasstyle(
copy = copy_sty,
line_nums = line_nums,
line_nums_mod = line_nums_mod,
draft_watermark = draft_watermark,
which_sty = ifelse(fr(), "sr-french.sty", "sr.sty")
)
base$knitr$opts_chunk$comment <- NA
old_opt <- getOption("bookdown.post.latex")
options(bookdown.post.latex = function(x) {
fix_envs(
x = x,
prepub = prepub,
highlight = highlight,
include_abstract = FALSE
)
})
on.exit(options(bookdown.post.late = old_opt))
base
}
|
8b9c6d90168a0d6d904d57c8fb70209132b73a99 | a796bddefc36227fcf20c21b08509d02306266a1 | /R/12_summarize.R | 1122a871173ea61ac70668eed030b74d01c790f5 | [] | no_license | moodymudskipper/nakedpipe | 4ecfd51ba77f6ec82b116ca17bdb070c317ca39b | 03f128bd77402954ce155a4e6e8e2e8e3ce7f430 | refs/heads/master | 2023-04-04T13:07:39.032087 | 2023-03-23T03:04:07 | 2023-03-23T03:06:55 | 241,155,892 | 73 | 5 | null | null | null | null | UTF-8 | R | false | false | 4,451 | r | 12_summarize.R | # summarize ungrouped data
np_summarise <- function(.data, ..., env) {
exprs <- eval(substitute(alist(...)))
# expand, using `?`
names(exprs) <- allNames(exprs)
# set names for `=` expressions
for (i in seq_along(exprs)) {
if(has_equal(exprs[[i]])) {
names(exprs)[[i]] = as.character(exprs[[i]][[2]])
exprs[[i]] <- exprs[[i]][[3]]
}
}
expand <- function(expr, nm) {
# if no question mark don't change
if(! "?" %in% all.names(expr)) {
return(expr)
}
# pull the question mark calls, and extract the rhs of the informative one
ind <- call_pull(expr)
ind <- setdiff(ind, list(quote(.)))
if(length(ind) > 1)
stop("If you use several `?` in a call, only one should not be `?.`")
ind <- ind[[1]]
# evaluate it and infer the indices if it evaluates to a function
ind <- eval(ind, env)
if(is.function(ind)) {
ind <- sapply(.data, ind)
if(!is.logical(ind))
stop("`?` should only be used on functions that return a boolean")
ind <- names(.data)[ind]
}
# use call_sub to build a list of substituted calls
res <- call_sub(expr, target = quote(`?`), replacement = sapply(ind, as.name))
if(nm != "") {
if(!requireNamespace("glue")) {
stop("You must install the package {glue} to use this feature.")
}
names(res)[] <- eval(
substitute(glue::glue(nm), list(nm = nm)),
list(col = names(res), fn = deparse(expr[[1]])), enclos = env)
}
res
}
# deal with name concatenation that happens with unlist
exprs <- Map(expand, exprs, names(exprs))
for (i in seq_along(exprs)) {
if (is.list(exprs[[i]])) names(exprs)[i] <- ""
}
exprs <- unlist(exprs)
res <- sapply(
exprs,
function(expr) {
eval(expr, envir = .data, enclos = env)
}, simplify = FALSE)
if(length(unique(c(1,lengths(res)))) > 2) {
stop("The different outputs must have the same length or a length of 1.")
}
nms <- allNames(res)
res <- as.data.frame(res)
names(res)[nms == ""] <- sapply(exprs[nms == ""], deparse)
res
}
#' compute by group
#'
#' `compute_by_group()` is meant to be used through the `{expr} ~ by` syntax
#' in a call to the nakedpipe, and might be encountered when using the debugging pipe.
#'
#' @param data data
#' @param expr expression
#' @param by variables to group by
#'
#' @export
compute_by_group <- function(data, expr, by) {
env <- parent.frame()
by_expr <- substitute(by)
#~~~~~~~~~~~~~~~~~~~~
# split data "by"
data_vars <- names(data)
by_vars0 <- all.names(by_expr)
if("?" %in% by_vars0) {
stop("`?` in `by` is not supported yet!")
} else if (length(setdiff(by_vars0, c(data_vars, "+", "-", "*", ":")))) {
stop("The only allowed operators in the `by` clause are ",
"`+`, `-`, `*`, and `:`")
} else {
if(any(c("-", "*", ":") %in% by_vars0))
stop(" `-`, `*`, and `:` in `by` are not supported yet")
by_vars <- setdiff(by_vars0, c("+", "-", "*", ":"))
if(! all(by_vars %in% data_vars))
stop("some variables are not in data")
}
split_data <- split(
`[<-`(data, by_vars, value = NULL),
lapply(data[by_vars], factor, exclude = NULL),
drop = TRUE)
groups <- unique(data[by_vars])
split_groups <- split(groups, seq(nrow(groups)))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# preprocess expr
expr <- substitute(expr)
expr <- insert_dot(expr)
if(identical(expr[[1]], quote(`{`))) {
expr <- as.call(c(quote(np_summarise), quote(.), env = env, as.list(expr[-1])))
}
fun <- as.function(c(alist(.=, .groups =), expr))
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# apply aggregation
split_data_modified <- Map(fun, split_data, split_groups)
fun <- function(., .groups) {
res <- cbind(., .groups) # in that order for row.names issues
res[c(by_vars, setdiff(names(res), by_vars))]
}
split_data_modified <- Map(fun, split_data_modified, split_groups)
res <- do.call("rbind", split_data_modified)
row_nms <- unlist(lapply(split_data_modified, row.names))
if (all(row_nms %in% row.names(data)) && !anyDuplicated(row_nms))
row.names(res) <- row_nms
else
row.names(res) <- NULL
res
}
# compute_by_group(starwars, np_summarise(mean(?is.numeric, na.rm = TRUE), head(?is.character, 1)), by = gender + sex)
# compute_by_group(starwars, {
# mean(?is.numeric, na.rm = TRUE)
# head(?is.character, 1) }, by = gender + sex)
|
6e024130a14dab30f068e432bd85cce1c589db00 | 3d2d38edafbf2f615b9b5263a39fb63865bc2dad | /main.r | 8742aab03eda005aed403e566e925950d6c70573 | [] | no_license | aa989190f363e46d/solarSfReader | 4f4ed17eb993da90845abc417aeceb2fad15646e | 17a88aba9b08e6b3c9c19f29c316e7a5cd7f4406 | refs/heads/master | 2021-01-19T10:57:38.550758 | 2014-10-01T13:22:27 | 2014-10-01T13:22:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 256 | r | main.r | library('digest') ## for sha256 from data file.
library('RSQLite')
library('bitops') ## for bitwise hashes XOR. binXor.
source('./storages.r')
source('./calculations.r')
source('./readSpc.r')
source('./manipulator.r')
Sys.setlocale('LC_ALL','C') |
ccc5af37f2a57203629ce82b4700da72f117da16 | 71dcb528bd7b0522130380fb4c2f4fc8a4b75ed9 | /R/all_mnread_param.R | 6e49e9b26aab8929ad52ed41b52767b859b03673 | [] | no_license | cran/mnreadR | 405c8ff6a3e5db5f77120d06d11840c9203c9abd | a431901056977d13676341f8e1c6471f4d0302ea | refs/heads/master | 2021-07-12T23:45:30.409039 | 2021-06-24T22:10:02 | 2021-06-24T22:10:02 | 98,470,195 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,405 | r | all_mnread_param.R | #----- mnreadParam ------
#######################--
#' Standard MNREAD parameters' estimation
#'
#' This function calculates simultaneously all four MNREAD parameters:
#' \itemize{
#' \item Maximum Reading Speed (MRS)
#' \item Critical Print Size (CPS)
#' \item Reading Acuity (RA)
#' \item Reading ACCessibility Index (ACC)
#' }
#' while performing print size correction for non-standard testing viewing distance.
#'
#' @param data The name of your dataframe
#' @param print_size The variable that contains print size values for each sentence (print size uncorrected for viewing distance)
#' @param viewing_distance The variable that contains the viewing distance value used for testing
#' @param reading_time The variable that contains the reading time for each sentence
#' @param errors The variable that contains the number of errors for each sentence
#' @param ... Optional grouping arguments
#'
#' @return The function returns a new dataframe with four variables:
#' \itemize{
#' \item "RA" -> contains the Reading Acuity estimate (in logMAR)
#' \item "CPS" -> contains the Critical Print Size estimate (in logMAR)
#' \item "MRS" -> contains the Maximum Reading Speed estimate (in words/min)
#' \item "ACC" -> contains the Reading Accessibility Index estimate
#' }
#'
#' @section Notes:
#' This function uses the original algorithm described in Legge (2007) to estimate Maximum Reading Speed (MRS) and Critical Print Size (CPS).
#' This algorithm searches for a reading speed plateau in the data. A plateau is defined as a range of print sizes
#' that supports reading speed at a significantly faster rate than the print sizes smaller or larger than the plateau range.
#' Concretely, the plateau is determined as print sizes which reading speed is at least 1.96 SD faster than the other print sizes.
#' The Maximum Reading Speed is estimated as the mean reading speed for print sizes included in the plateau.
#' The Critical Print Size is defined as the smallest print size on the plateau.
#'
#' For more details on the parameters estimation, see \url{https://legge.psych.umn.edu/mnread-acuity-charts}
#'
#' For more details on the original algorithm, see Chapter 5 of this book:\\
#' Legge, G.E. (2007). Psychophysics of Reading in Normal and Low Vision. Mahwah, NJ & London: Lawrence Erlbaum Associates. ISBN 0-8058-4328-0
#' \url{https://books.google.fr/books/about/Psychophysics_of_Reading_in_Normal_and_L.html?id=BGTHS8zANiUC&redir_esc=y}
#'
#' To ensure proper estimation of the MRS and CPS, individual MNREAD curves should be plotted using \code{\link{mnreadCurve}} and inspected visually.
#'
#'
#' @section Warning:
#' For the function to run properly, one needs to make sure that the variables are of the class:
#' \itemize{
#' \item \strong{print_size} -> numeric
#' \item \strong{viewing_distance} -> integer
#' \item \strong{reading_time} -> numeric
#' \item \strong{errors} -> integer
#' }
#'
#' In cases where only 3 or less sentences were read during a test,
#' the function won't be able to estimate the MRS and CPS
#' and will return NA values instead.
#' The ACC should be used to estimate the MNREAD score in such cases
#' where there are not enough data points to fit the MNREAD curve.
#'
#' To ensure proper ACC calculation, the data should be entered along certain rules:
#' \itemize{
#' \item For the smallest print size that is presented but not read, right before the test is stopped: \strong{reading_time = NA, errors = 10}
#' \item For all the small sentences that are not presented because the test was stopped before them: \strong{reading_time = NA, errors = NA}
#' \item If a sentence is presented, and read, but the time was not recorded by the experimenter: \strong{reading_time = NA, errors = actual number of errors} (cf. s5-regular in low vision data sample)
#' \item If a large sentence was skipped to save time but would have been read well: \strong{reading_time = NA, errors = NA} (cf. s1-regular in normal vision data sample)
#' \item If a large sentence was skipped to save time because the subject cannot read large print: \strong{reading_time = NA, errors = 10} (cf. s7 in low vision data sample)
#' }
#'
#' @seealso
#' \code{\link{curveParam_RT}} for standard MRS and CPS estimation using values of reading time (instead of reading speed)
#'
#' \code{\link{curveParam_RS}} for standard MRS and CPS estimation using values of reading speed (instead of reading time)
#'
#' \code{\link{nlmeParam}} for MRS and CPS estimation using a nonlinear mixed-effect model (NLME)
#'
#' \code{\link{readingAcuity}} for Reading Acuity calculation
#'
#' \code{\link{accIndex}} for Reading Accessibility Index calculation
#'
#'
#' @examples # inspect the structure of the dataframe
#' @examples head(data_low_vision, 10)
#'
#' #------
#'
#' @examples # restrict dataset to one MNREAD test only (subject s1, regular polarity)
#' @examples data_s1 <- data_low_vision %>%
#' @examples filter (subject == "s1", polarity == "regular")
#'
#' @examples # run the parameters estimation
#' @examples data_low_vision_param <- mnreadParam(data_s1, ps, vd, rt, err)
#'
#' @examples # inspect the newly created dataframe
#' @examples data_low_vision_param
#'
#' #------
#'
#' @examples # run the parameters estimation on the whole dataset grouped by subject and polarity
#' @examples data_low_vision_param <- mnreadParam(data_low_vision, ps, vd, rt, err,
#' @examples subject, polarity)
#'
#' @examples # inspect the structure of the newly created dataframe
#' @examples head(data_low_vision_param, 10)
#'
#' @importFrom stats sd
#' @import dplyr
#'
#' @export
mnreadParam <- function(data, print_size, viewing_distance, reading_time, errors, ... = NULL) {
# This function estimates the RA, MRS and CPS and returns them in a new dataframe.
message('Remember to check the accuracy of MRS and CPS estimates by inspecting the MNREAD curve with mnreadCurve()')
print_size <- enquo(print_size)
viewing_distance <- enquo(viewing_distance)
reading_time <- enquo(reading_time)
errors <- enquo(errors)
errors10 <- NULL
rs <- NULL
log_rs <- NULL
correct_ps <- NULL
r_time <- NULL
error_nb <- NULL
p_size <- NULL
ps <- NULL
min_ps <- NULL
sum_err <- NULL
nb_row <- NULL
. <- NULL
.drop <- TRUE
# modify the raw dataframe as needed before running the RA, MRS And CPS estimation
temp_df1 <- as.data.frame(
data %>%
filter ((!!errors) != "NA" & (!!reading_time) > 0) %>%
mutate (errors10 = replace ((!!errors), (!!errors) > 10, 10)) %>%
mutate (rs = 60 * (10 - errors10) / (!!reading_time)) %>%
filter (rs != "NA", rs != "-Inf") %>%
mutate (log_rs = log(rs)) %>%
filter (log_rs != "NA", log_rs != "-Inf") %>%
mutate (correct_ps = (!!print_size) + round(log10(40/(!!viewing_distance)), 2)) %>%
filter (correct_ps != "NA", correct_ps != "-Inf") )
# modify the raw dataframe as needed before running the ACC calculation
temp_df2 <- as.data.frame(
data %>%
mutate (rs = (10 - replace ((!!errors), (!!errors) > 10, 10)) / (!!reading_time) * 60) %>%
mutate (r_time = (!!reading_time)) %>%
mutate (error_nb = (!!errors)) %>%
mutate (p_size = (!!print_size)) %>%
mutate (ps = p_size) %>%
filter (p_size >= 0.4 & p_size <= 1.3 ) )
# with no grouping argument
if ( missing(...) ) {
# calculate reading acuity
RAdf <- as.data.frame(
temp_df1 %>%
summarise (min_ps = min(correct_ps),
sum_err = sum((errors10), na.rm=T)) %>%
mutate (RA = min_ps + sum_err*(0.01)) %>%
select (-min_ps, -sum_err) )
# estimates MRS and CPS
MRS_CPSdf <- as.data.frame(
temp_df1 %>%
arrange (correct_ps) %>% # sort temp_df by correct_ps in ascending order
mutate (nb_row = n()) %>%
do (mansfield_algo(., .$correct_ps, .$nb_row, .$log_rs)) )
# calculate reading accessibility index
ACCdf <- as.data.frame(
temp_df2 %>%
do (acc_algo(.)) )
# create one single df with all 4 parameters
all_param <- cbind(MRS_CPSdf, RAdf, ACCdf)
}
# with grouping argument(s)
else {
grouping_var <- quos(...)
# calculate reading acuity
RAdf <- as.data.frame(
temp_df1 %>%
group_by (!!!grouping_var, .drop = TRUE) %>%
summarise (min_ps = min(correct_ps),
sum_err = sum((errors10), na.rm=T)) %>%
mutate (RA = min_ps + sum_err*(0.01)) %>%
select (-min_ps, -sum_err) ) #%>%
# filter (.drop != "NA") %>% select (-.drop)
# estimates MRS and CPS
MRS_CPSdf <- as.data.frame(
temp_df1 %>%
group_by (!!!grouping_var, .drop = TRUE) %>%
arrange (correct_ps) %>% # sort temp_df by correct_ps in ascending order
mutate (nb_row = n()) %>%
do (mansfield_algo(., .$correct_ps, .$nb_row, .$log_rs)) ) #%>%
# filter (.drop != "NA") %>% select (-.drop)
# calculate reading accessibility index
ACCdf <- as.data.frame(
temp_df2 %>%
group_by (!!!grouping_var, .drop = TRUE) %>%
do (acc_algo(.)) ) #%>%
# filter (.drop != "NA") %>% select (-.drop)
# create one single df with all 4 parameters
join_temp <- left_join(MRS_CPSdf, RAdf)
all_param <- left_join(join_temp, ACCdf)
}
return(all_param)
}
|
f15fd4246ac4e1a423ab6bc82c06d0121ab94e2e | 71c70a7779f0fc12a7a42056704bf7b7005a17d9 | /Rescucitation_Vaupel_Yashin/res_nor.R | ee9a30fbc15286455d35b1cf2cec08a9b2d0aa86 | [] | no_license | jcletilei/Rescucitation_Vaupel_Yashin | 2ffe8fea2ed55d9ad68f9133bd9bddbfd4d268f4 | 32abaca65fc06e9876539a523743adc1b791ed02 | refs/heads/master | 2020-06-26T19:27:04.067900 | 2019-07-25T17:56:00 | 2019-07-25T17:56:00 | null | 0 | 0 | null | null | null | null | ISO-8859-10 | R | false | false | 65,658 | r | res_nor.R | library(MortalityLaws)
library(dplyr)
library(tidyr)
library(tidyverse)
library(lubridate)
library(plyr)
# Downloading some countries. We have to use the HMD codes
cntr <- c('SWE', "ITA","BEL","NOR","FIN",
"NOR", "CHE")
#reading in the life tables in single ages, by single-year periods
# females
LT_f<- ReadHMD(what = "LT_f",
countries = cntr,
interval = "1x1",
username = "vdilego@gmail.com",
password = "588672vitor",save = FALSE)
ls(LT_f)
LT_f
#males
LT_m<- ReadHMD(what = "LT_m",
countries = cntr,
interval = "1x1",
username = "vdilego@gmail.com",
password = "588672vitor",save = FALSE)
ls(LT_m)
LT_m
#############################################
#### norherlands ############################
#############################################
# preparing data
#females
lt_f<-LT_f$data
# retrieving the columns of interest
nor_lt_f<-lt_f%>%
filter(country %in% c("NOR"))%>%
filter(Age<=100)%>%
select(c("Year","Age","lx"))
View(nor_lt_f)
# tranforming to wide format for calculations
nor_lt_f_surv <- nor_lt_f %>%
spread(key=Year, value=lx)
View(nor_lt_f_surv)
#saving for later checking or performing simple tasks in excel
write.table(nor_lt_f_surv, file="nor_females_lx.csv", sep=",", row.names = F) #saving for using excel as well
#the same for life expectancy only
nor_lt_f_ex<-lt_f%>%
filter(country %in% c("NOR"))%>%
filter(Age %in% c("0"))%>%
select(c("Year","Age","ex"))
View(nor_lt_f_ex)
nor_lt_f_ex_surv <- nor_lt_f_ex %>%
spread(key=Year, value=ex)
View(nor_lt_f_ex_surv)
write.table(nor_lt_f_ex_surv, file="nor_females_ex.csv", sep=",", row.names = F) #saving for using excel as well
# males
lt_m<-LT_m$data
nor_lt_m<-lt_m%>%
filter(country %in% c("NOR"))%>%
select(c("Year","Age","lx"))
View(nor_lt_m)
nor_lt_m_surv <- nor_lt_m %>%
spread(key=Year, value=lx)
View(nor_lt_m_surv)
write.table(nor_lt_m_surv, file="nor_males_lx.csv", sep=",", row.names = F) #saving for using excel as well
#only ex
nor_lt_m_ex<-lt_m%>%
filter(country %in% c("NOR"))%>%
filter(Age %in% c("0"))%>%
select(c("Year","Age","ex"))
View(nor_lt_m_ex)
nor_lt_m_ex_surv <- nor_lt_m_ex %>%
spread(key=Year, value=ex)
View(nor_lt_m_ex_surv)
write.table(nor_lt_m_ex_surv, file="nor_males_ex.csv", sep=",", row.names = F) #saving for using excel as well
###############################################
### Implementing Vaupelīs Approach : FEMALES ##
###############################################
# selecting years for mortatliy regime comparison. IN the case of Swenor, we will perform 50-year analysis#
nor_lt_f<-lt_f%>%
filter(country %in% c("NOR"))%>%
filter(Age<=100)%>%
filter(Year %in% c("1846","1851","1901","1951","2014"))%>%
select(c("Year","Age","lx"))
View(nor_lt_f)
# ploting survival curves
library(ggplot2)
X11(width=7,height=7)
females_surv<-ggplot(nor_lt_f %>% filter(Year!="1846"), aes(x = Age, y = lx, group=factor(Year))) +
geom_line(aes(linetype=factor(Year)))+ ylim(1,100000)+
theme_bw()+scale_linetype_manual(name="Year", values = c(
"1851"="solid",
"1901"="dotdash",
"1951"="dashed",
"2014"="dotted"),
labels=c("1851","1901","1951","2014"))+
ggtitle("a.Females") +
ylab("lx")+annotate("text", x =c(63,70,72,75), y = c(40000,52000,80000,90000), label = c("1851","1901","1951","2014"))
dev.off()
#wide format for estimation
lx_nor<- nor_lt_f%>%
spread(key=Year, value=lx)
View(lx_nor)
#only ex
nor_lt_f_ex<-lt_f%>%
filter(country %in% c("NOR"))%>%
filter(Year %in% c("1846","1851","1901","1951","2014"))%>%
filter(Age %in% c("0"))%>%
select(c("Year","Age","ex"))
View(nor_lt_f_ex)
#wide format for estimation
nor_lt_f_ex_surv <- nor_lt_f_ex %>%
spread(key=Year, value=ex)
View(nor_lt_f_ex_surv)
# taking the first row out because we are not dealing with the radix of the lifetable
lx_nor<-lx_nor[-1,]
View(lx_nor)
# creating the variables for estimating the resuscitated
lx_nor_full<-lx_nor %>%
mutate(hazard_1=log(lx_nor[,3]/lx_nor[,2]),hazard_2=log(lx_nor[,4]/lx_nor[,3]),hazard_3=log(lx_nor[,5]/lx_nor[,4]),
hazard_4=log(lx_nor[,6]/lx_nor[,5]),change_1=exp(hazard_1)-1,change_2=exp(hazard_2)-1,change_3=exp(hazard_3)-1,
change_4=exp(hazard_4)-1)
View(lx_nor_full)
# taking only the complete cases because since there are no survivors after age 104 naīs are generated
lx_nor_full<-lx_nor_full[complete.cases(lx_nor_full), ]
# creating a datanorme for the number of resuscitations - we go until ten because that is what Vaupel does
#two mortality regimes at a time. Here the first two years.
# 1. estimating life years spent in each resuscitation state for the first mortality regime comparison 1846/1851
lx_nor_res_1<-lx_nor_full %>%
mutate(res_1=lx_nor_full[,2]*lx_nor_full[,7], res_2=(lx_nor_full[,2]*(lx_nor_full[,7]^2))/factorial(2), res_3=(lx_nor_full[,2]*(lx_nor_full[,7]^3))/factorial(3),
res_4=(lx_nor_full[,2]*(lx_nor_full[,7]^4))/factorial(4), res_5=(lx_nor_full[,2]*(lx_nor_full[,7]^5))/factorial(5),res_6=(lx_nor_full[,2]*(lx_nor_full[,7]^6))/factorial(6),
res_7=(lx_nor_full[,2]*(lx_nor_full[,7]^7))/factorial(7), res_8=(lx_nor_full[,2]*(lx_nor_full[,7]^8))/factorial(8),res_9=(lx_nor_full[,2]*(lx_nor_full[,7]^9))/factorial(9),
res_10=(lx_nor_full[,2]*(lx_nor_full[,7]^10))/factorial(10))
View(lx_nor_res_1)
radix<-100000
write.table(lx_nor_res_1, file="lx_nor.csv", sep=",", row.names = F) #saving for using excel as well)
lx_nor_res_years<-lx_nor_res_1%>%
mutate(tau_1=sum(res_1/radix),tau_2=sum(res_2/radix),tau_3=sum(res_3/radix),tau_4=sum(res_4/radix),tau_5=sum(res_5/radix),tau_6=sum(res_6/radix),
tau_7=sum(res_7/radix),tau_8=sum(res_8/radix),tau_9=sum(res_9/radix),tau_10=sum(res_10/radix))
lx_nor_res_years<-lx_nor_res_years%>% mutate(total=sum(lx_nor_res_years[1,25:34]))
View(lx_nor_res_years)
#adding life expectancies, estimating differences
lx_nor_res_years$ex_old<-nor_lt_f_ex_surv$`1846`
lx_nor_res_years$ex_new<-nor_lt_f_ex_surv$`1851`
#mortality gap between regimes
lx_nor_res_years$ex_diff<-lx_nor_res_years$ex_new-lx_nor_res_years$ex_old
#if all females from the higher mortality regime had their lives saved once, the gap would be:
lx_nor_res_years$ex_gap_res1<-lx_nor_res_years$ex_new-(lx_nor_res_years$ex_old+lx_nor_res_years$tau_1)
lx_nor_res_years$ex_gap_res_percent<-100-((lx_nor_res_years$ex_gap_res1/lx_nor_res_years$ex_diff)*100)
# 2. estimating life years spent in each resuscitation state for the second mortality regime comparison 1851/1901
lx_nor_res_2<-lx_nor_full %>%
mutate(res_1=lx_nor_full[,3]*lx_nor_full[,8], res_2=(lx_nor_full[,3]*(lx_nor_full[,8]^2))/factorial(2), res_3=(lx_nor_full[,3]*(lx_nor_full[,8]^3))/factorial(3),
res_4=(lx_nor_full[,3]*(lx_nor_full[,8]^4))/factorial(4), res_5=(lx_nor_full[,3]*(lx_nor_full[,8]^5))/factorial(5),res_6=(lx_nor_full[,3]*(lx_nor_full[,8]^6))/factorial(6),
res_7=(lx_nor_full[,3]*(lx_nor_full[,8]^7))/factorial(7), res_8=(lx_nor_full[,3]*(lx_nor_full[,8]^8))/factorial(8),res_9=(lx_nor_full[,3]*(lx_nor_full[,8]^9))/factorial(9),
res_10=(lx_nor_full[,3]*(lx_nor_full[,8]^10))/factorial(10))
View(lx_nor_res_2)
lx_nor_res_years_2<-lx_nor_res_2%>%
mutate(tau_1=sum(res_1/radix),tau_2=sum(res_2/radix),tau_3=sum(res_3/radix),tau_4=sum(res_4/radix),tau_5=sum(res_5/radix),tau_6=sum(res_6/radix),
tau_7=sum(res_7/radix),tau_8=sum(res_8/radix),tau_9=sum(res_9/radix),tau_10=sum(res_10/radix))
lx_nor_res_years_2<-lx_nor_res_years_2%>% mutate(total=sum(lx_nor_res_years_2[1,25:34]))
View(lx_nor_res_years_2)
#adding life expectancies, estimating differences
lx_nor_res_years_2$ex_old2<-nor_lt_f_ex_surv$`1851`
lx_nor_res_years_2$ex_new2<-nor_lt_f_ex_surv$`1901`
#mortality gap between regimes
lx_nor_res_years_2$ex_diff2<-lx_nor_res_years_2$ex_new2-lx_nor_res_years_2$ex_old2
#if all females from the higher mortality regime had their lives saved once, the gap would be:
lx_nor_res_years_2$ex_gap_res2<-lx_nor_res_years_2$ex_new2-(lx_nor_res_years_2$ex_old2+lx_nor_res_years_2$tau_1)
lx_nor_res_years_2$ex_gap_res_percent2<-100-((lx_nor_res_years_2$ex_gap_res2/lx_nor_res_years_2$ex_diff2)*100)
# 3. estimating life years spent in each resuscitation state for the third mortality regime comparison 1901/1951
lx_nor_res_3<-lx_nor_full %>%
mutate(res_1=lx_nor_full[,4]*lx_nor_full[,9], res_2=(lx_nor_full[,4]*(lx_nor_full[,9]^2))/factorial(2), res_3=(lx_nor_full[,4]*(lx_nor_full[,9]^3))/factorial(3),
res_4=(lx_nor_full[,4]*(lx_nor_full[,9]^4))/factorial(4), res_5=(lx_nor_full[,4]*(lx_nor_full[,9]^5))/factorial(5),res_6=(lx_nor_full[,4]*(lx_nor_full[,9]^6))/factorial(6),
res_7=(lx_nor_full[,4]*(lx_nor_full[,9]^7))/factorial(7), res_8=(lx_nor_full[,4]*(lx_nor_full[,9]^8))/factorial(8),res_9=(lx_nor_full[,4]*(lx_nor_full[,9]^9))/factorial(9),
res_10=(lx_nor_full[,4]*(lx_nor_full[,9]^10))/factorial(10))
View(lx_nor_res_3)
lx_nor_res_years_3<-lx_nor_res_3%>%
mutate(tau_1=sum(res_1/radix),tau_2=sum(res_2/radix),tau_3=sum(res_3/radix),tau_4=sum(res_4/radix),tau_5=sum(res_5/radix),tau_6=sum(res_6/radix),
tau_7=sum(res_7/radix),tau_8=sum(res_8/radix),tau_9=sum(res_9/radix),tau_10=sum(res_10/radix))
lx_nor_res_years_3<-lx_nor_res_years_3%>% mutate(total=sum(lx_nor_res_years_3[1,25:34]))
View(lx_nor_res_years_3)
#adding life expectancies, estimating differences
lx_nor_res_years_3$ex_old3<-nor_lt_f_ex_surv$`1901`
lx_nor_res_years_3$ex_new3<-nor_lt_f_ex_surv$`1951`
#mortality gap between regimes
lx_nor_res_years_3$ex_diff3<-lx_nor_res_years_3$ex_new3-lx_nor_res_years_3$ex_old3
#if all females from the higher mortality regime had their lives saved once, the gap would be:
lx_nor_res_years_3$ex_gap_res3<-lx_nor_res_years_3$ex_new3-(lx_nor_res_years_3$ex_old3+lx_nor_res_years_3$tau_1)
lx_nor_res_years_3$ex_gap_res_percent3<-100-((lx_nor_res_years_3$ex_gap_res3/lx_nor_res_years_3$ex_diff3)*100)
# 4. estimating life years spent in each resuscitation state for the fourth mortality regime comparison - 1951/2014
lx_nor_res_4<-lx_nor_full %>%
mutate(res_1=lx_nor_full[,5]*lx_nor_full[,10], res_2=(lx_nor_full[,5]*(lx_nor_full[,10]^2))/factorial(2), res_3=(lx_nor_full[,5]*(lx_nor_full[,10]^3))/factorial(3),
res_4=(lx_nor_full[,5]*(lx_nor_full[,10]^4))/factorial(4), res_5=(lx_nor_full[,5]*(lx_nor_full[,10]^5))/factorial(5),res_6=(lx_nor_full[,5]*(lx_nor_full[,10]^6))/factorial(6),
res_7=(lx_nor_full[,5]*(lx_nor_full[,10]^7))/factorial(7), res_8=(lx_nor_full[,5]*(lx_nor_full[,10]^8))/factorial(8),res_9=(lx_nor_full[,5]*(lx_nor_full[,10]^9))/factorial(9),
res_10=(lx_nor_full[,5]*(lx_nor_full[,10]^10))/factorial(10))
View(lx_nor_res_4)
lx_nor_res_years_4<-lx_nor_res_4%>%
mutate(tau_1=sum(res_1/radix),tau_2=sum(res_2/radix),tau_3=sum(res_3/radix),tau_4=sum(res_4/radix),tau_5=sum(res_5/radix),tau_6=sum(res_6/radix),
tau_7=sum(res_7/radix),tau_8=sum(res_8/radix),tau_9=sum(res_9/radix),tau_10=sum(res_10/radix))
lx_nor_res_years_4<-lx_nor_res_years_4%>% mutate(total=sum(lx_nor_res_years_4[1,25:34]))
View(lx_nor_res_years_4)
#adding life expectancies, estimating differences
lx_nor_res_years_4$ex_old4<-nor_lt_f_ex_surv$`1951`
lx_nor_res_years_4$ex_new4<-nor_lt_f_ex_surv$`2014`
#mortality gap between regimes
lx_nor_res_years_4$ex_diff4<-lx_nor_res_years_4$ex_new4-lx_nor_res_years_4$ex_old4
#if all females from the higher mortality regime had their lives saved once, the gap would be:
lx_nor_res_years_4$ex_gap_res4<-lx_nor_res_years_4$ex_new4-(lx_nor_res_years_4$ex_old4+lx_nor_res_years_4$tau_1)
lx_nor_res_years_4$ex_gap_res_percent4<-100-((lx_nor_res_years_4$ex_gap_res4/lx_nor_res_years_4$ex_diff4)*100)
### reshaping data
# 1. first only first regime
lx_nor_res_years$Regime<-c("1846-1851")
View(lx_nor_res_years)
lx_nor_females_1<-lx_nor_res_years%>%
select(c(Age,hazard_1:hazard_4,res_1:Regime))
View(lx_nor_females_1)
lx_nor_females_1_long<- gather (lx_nor_females_1, key=Resuscitations,
value=Res_number, res_1:res_10)
View(lx_nor_females_1_long)
colnames(lx_nor_females_1_long) <- c( "Age" , "hazard_1" , "hazard_2" , "hazard_3" , "hazard_4",
"tau_1" , "tau_2" , "tau_3" , "tau_4" , "tau_5" ,
"tau_6" , "tau_7" , "tau_8" , "tau_9" , "tau_10" ,
"total" , "ex_old", "ex_new" , "ex_diff" , "ex_gap_res1" ,
"ex_gap_res_percent", "Regime" , "Resuscitations" , "Res_number" )
# 2. second regime
lx_nor_res_years_2$Regime<-c("1851-1901")
View(lx_nor_res_years_2)
lx_nor_females_2<-lx_nor_res_years_2%>%
select(c(Age,hazard_1:hazard_4,res_1:Regime))
View(lx_nor_females_2)
lx_nor_females_2_long<- gather (lx_nor_females_2, key=Resuscitations,
value=Res_number, res_1:res_10)
View(lx_nor_females_2_long)
colnames(lx_nor_females_2_long) <- c( "Age" , "hazard_1" , "hazard_2" , "hazard_3" , "hazard_4",
"tau_1" , "tau_2" , "tau_3" , "tau_4" , "tau_5" ,
"tau_6" , "tau_7" , "tau_8" , "tau_9" , "tau_10" ,
"total" , "ex_old", "ex_new" , "ex_diff" , "ex_gap_res1" ,
"ex_gap_res_percent", "Regime" , "Resuscitations" , "Res_number" )
# 3. third regime
lx_nor_res_years_3$Regime<-c("1901-1951")
View(lx_nor_res_years_3)
lx_nor_females_3<-lx_nor_res_years_3%>%
select(c(Age,hazard_1:hazard_4,res_1:Regime))
View(lx_nor_females_3)
lx_nor_females_3_long<- gather (lx_nor_females_3, key=Resuscitations,
value=Res_number, res_1:res_10)
View(lx_nor_females_3_long)
colnames(lx_nor_females_3_long) <- c( "Age" , "hazard_1" , "hazard_2" , "hazard_3" , "hazard_4",
"tau_1" , "tau_2" , "tau_3" , "tau_4" , "tau_5" ,
"tau_6" , "tau_7" , "tau_8" , "tau_9" , "tau_10" ,
"total" , "ex_old", "ex_new" , "ex_diff" , "ex_gap_res1" ,
"ex_gap_res_percent", "Regime" , "Resuscitations" , "Res_number" )
# 4. fourth regime
lx_nor_res_years_4$Regime<-c("1951-2014")
View(lx_nor_res_years_4)
lx_nor_females_4<-lx_nor_res_years_4%>%
select(c(Age,hazard_1:hazard_4,res_1:Regime))
View(lx_nor_females_4)
lx_nor_females_4_long<- gather (lx_nor_females_4, key=Resuscitations,
value=Res_number, res_1:res_10)
View(lx_nor_females_4_long)
colnames(lx_nor_females_4_long) <- c( "Age" , "hazard_1" , "hazard_2" , "hazard_3" , "hazard_4",
"tau_1" , "tau_2" , "tau_3" , "tau_4" , "tau_5" ,
"tau_6" , "tau_7" , "tau_8" , "tau_9" , "tau_10" ,
"total" , "ex_old", "ex_new" , "ex_diff" , "ex_gap_res1" ,
"ex_gap_res_percent", "Regime" , "Resuscitations" , "Res_number" )
# combining everything
library(reshape)
library(reshape2)
resuscitated_females_nornor<- rbind(lx_nor_females_1_long,lx_nor_females_2_long,lx_nor_females_3_long,lx_nor_females_4_long)
View(resuscitated_females_nornor)
# ploting the hazards and tauīs
X11(width=15,height=5.5)
par(mfrow=c(1,2))
plot(x=c(1:length(lx_nor_res_years_4$Age)), lx_nor_res_years_4$hazard_1, ylim=c(-1,3.38), type="l", axes=FALSE,
ylab="Intensity of lifesaving ", xlab="Age", lwd=2,main="a. Females")
lines(x=c(1:length(lx_nor_res_years_4$Age)), lx_nor_res_years_4$hazard_2, lty=5,lwd=2)
lines(x=c(1:length(lx_nor_res_years_4$Age)), lx_nor_res_years_4$hazard_3, lty=3,lwd=2)
lines(x=c(1:length(lx_nor_res_years_4$Age)), lx_nor_res_years_4$hazard_4, lty=1,lwd=2, col="blue")
axis(1, seq(0,length(lx_nor_res_years_4$Age),5), las=1,cex.axis=.8, lwd=1.5)
axis(2, seq(-1, 4, 0.5),lwd=1.5,cex.axis=.8, las=1)
legend("topleft", legend=c("1846-1851","1851-1901","1901-1951","1951-2014"),
lty=c(1,5,3,1),col=c("black","black","black","blue"), bty="n")
abline(h=0, col="grey", lwd=2)
# force of mortality
nor_qx_f<-lt_f%>%
filter(country %in% c("NOR"))%>%
filter(Age<=100)%>%
filter(Year %in% c("1846","1851","1901","1951","2014"))%>%
select(c("Year","Age","qx"))
View(nor_qx_f)
nor_qx_f_wide<- nor_qx_f %>%
spread(key=Year, value=qx)
pdf(file="prob_death_females_nornor.pdf",width=5.5,height=5.5)
plot(x=c(1:length(nor_qx_f_wide$Age)),nor_qx_f_wide$`1851`, ylim=c(0,1), type="l", axes=FALSE,
ylab="Probability of death- qx", xlab="Age", lwd=1.5, main="a) Females")
lines(x=c(1:length(nor_qx_f_wide$Age)), nor_qx_f_wide$`1901`, lty=3,lwd=1.5)
lines(x=c(1:length(nor_qx_f_wide$Age)), nor_qx_f_wide$`1951`, lty=2,lwd=1.5)
lines(x=c(1:length(nor_qx_f_wide$Age)), nor_qx_f_wide$`2014`, lty=6,lwd=1.5)
axis(1, seq(0,length(nor_qx_f_wide$Age),5), las=1,cex.axis=.5, lwd=1.5)
axis(2, seq(0, 1, 0.1),lwd=1.5,cex.axis=.5, las=1)
legend("topleft", legend=c("1851","1901","1951","2014"),
lty=c(1,5,3,2,6), bty="n", cex = .5)
dev.off()
#ggplot for number of resuscitations and the number of resuscitated, females, nornor
library(forcats)
resuscitated_females_nornor$Resuscitations<-fct_collapse(resuscitated_females_nornor$Resuscitations,
res_1 = c("res_1"),
res_2 = c("res_2"),
res_3 = c("res_3"),
res_4 = c("res_4"),
res_5_plus=c("res_5","res_6","res_7","res_8","res_9","res_10")
)
resuscitated_females_nornor$Resuscitations <- factor(resuscitated_females_nornor$Resuscitations, ordered = TRUE,
levels = c("res_1", "res_2", "res_3","res_4","res_5_plus"))
pdf(file="res_females_nornor.pdf",width=15,height=5.5)
X11(width=15,height=5.5)
ggplot(resuscitated_females_nornor %>% filter(Regime != "1846-1851") , aes(x = Age, y = Res_number, group=Resuscitations)) +
geom_line(aes(linetype=Resuscitations))+ facet_grid(.~Regime)+
theme_bw()+scale_linetype_manual(name="Number of \nResuscitations", values = c("res_1"="longdash",
"res_2"="solid",
"res_3"="dotdash",
"res_4"="dashed",
"res_5_plus"="dotted"),
labels=c("1", "2", "3","4","5+"))+
ylab("Number of resuscitated persons")+theme_bw()+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
dev.off()
# assesing differences in life expectancy and number of life years lived in each resuscitation state
View(lx_nor_res_years)
tau_fem_1<-lx_nor_res_years[1,25:41]
colnames(tau_fem_1)<-c("tau_1" , "tau_2","tau_3","tau_4","tau_5" , "tau_6","tau_7",
"tau_8","tau_9","tau_10", "total","ex_old","ex_new","ex_diff","ex_gap_res1","ex_gap_res_percent", "Regime")
tau_fem_2<-lx_nor_res_years_2[1,25:41]
colnames(tau_fem_2)<-c("tau_1" , "tau_2","tau_3","tau_4","tau_5" , "tau_6","tau_7",
"tau_8","tau_9","tau_10", "total","ex_old","ex_new","ex_diff","ex_gap_res1","ex_gap_res_percent", "Regime")
tau_fem_3<-lx_nor_res_years_3[1,25:41]
colnames(tau_fem_3)<-c("tau_1" , "tau_2","tau_3","tau_4","tau_5" , "tau_6","tau_7",
"tau_8","tau_9","tau_10", "total","ex_old","ex_new","ex_diff","ex_gap_res1","ex_gap_res_percent", "Regime")
tau_fem_4<-lx_nor_res_years_4[1,25:41]
colnames(tau_fem_4)<-c("tau_1" , "tau_2","tau_3","tau_4","tau_5" , "tau_6","tau_7",
"tau_8","tau_9","tau_10", "total","ex_old","ex_new","ex_diff","ex_gap_res1","ex_gap_res_percent", "Regime")
tau_fem<-rbind(tau_fem_1,tau_fem_2, tau_fem_3,tau_fem_4)
View(tau_fem)
tau_fem_long <- gather(tau_fem, tau, life_years_res, tau_1:tau_10)
View(tau_fem_long)
tau_fem_prop<-tau_fem_long %>%
group_by(tau)%>%
mutate(prop_res=(life_years_res/total)*100)
View(tau_fem_prop)
tau_fem_prop$tau<-fct_collapse(tau_fem_prop$tau,
tau_1 = c("tau_1"),
tau_2 = c("tau_2"),
tau_3 = c("tau_3"),
tau_4 = c("tau_4"),
tau_5_plus=c("tau_5","tau_6","tau_7","tau_8","tau_9","tau_10")
)
tau_fem_prop$tau <- factor(tau_fem_prop$tau, ordered = TRUE,
levels = c("tau_1", "tau_2", "tau_3","tau_4","tau_5_plus"))
library(latex2exp)
X11(width=10,height=10)
ggplot(data = tau_fem_prop %>% filter(Regime!="1846-1851"), aes(x = factor(tau),y = prop_res, group = factor(Regime) )) +
geom_line(aes(color=Regime), size=1)+theme_bw()
# add here the proportions
# if animation is interesting..
#p <- resuscitated_females_nornor %>%
# plot_ly(
# x = ~Age,
# y = ~Res_number,
# color = ~Resuscitations,
# norme = ~Regime,
# text = ~Resuscitations,
# hoverinfo = "text",
# type = 'scatter',
# mode = 'markers'
# )
#p %>%
# animation_opts(1000, transition = 300,redraw = FALSE
# )
#p
###############################################
### Implementing Vaupelīs Approach : MALES ##
###############################################
# selecting years for mortatliy regime comparison. IN the case of Swenor, we will perform 50-year analysis#
nor_lt_m<-lt_m%>%
filter(country %in% c("NOR"))%>%
filter(Year %in% c("1846","1851","1901","1951","2014"))%>%
filter(Age<=100) %>%
select(c("Year","Age","lx"))
View(nor_lt_m)
X11(width=7,height=7)
male_surv<-ggplot(nor_lt_m %>% filter(Year!="1846"), aes(x = Age, y = lx, group=factor(Year))) +
geom_line(aes(linetype=factor(Year)))+ ylim(1,100000)+
theme_bw()+scale_linetype_manual(name="Year", values = c(
"1851"="solid",
"1901"="dotdash",
"1951"="dashed",
"2014"="dotted"),
labels=c("1851","1901","1951","2014"))+
ggtitle("b.Males") +
ylab("lx")+annotate("text", x = c(57,65,70,75), y = c(45000,52000,78000,90000), label = c("1851","1901","1951","2014"))
#if combining males and females in one plot
require(gridExtra)
X11(width=15,height=7)
grid.arrange(females_surv, male_surv, ncol=2)
dev.off
#wide format for estimation
nor_lt_m_surv <- nor_lt_m %>%
spread(key=Year, value=lx)
View(nor_lt_m_surv)
#only ex
nor_lt_m_ex<-lt_m%>%
filter(country %in% c("NOR"))%>%
filter(Year %in% c("1846","1851","1901","1951","2014"))%>%
filter(Age %in% c("0"))%>%
select(c("Year","Age","ex"))
View(nor_lt_m_ex)
nor_lt_m_ex_surv <- nor_lt_m_ex %>%
spread(key=Year, value=ex)
View(nor_lt_m_ex_surv)
# taking the first row out because we are not dealing with the radix of the lifetable
lx_nor_m<-nor_lt_m_surv[-1,]
View(lx_nor_m)
# creating the variables for estimating the resuscitated
lx_nor_m_full<-lx_nor_m %>%
mutate(hazard_1=log(lx_nor_m[,3]/lx_nor_m[,2]),hazard_2=log(lx_nor_m[,4]/lx_nor_m[,3]),hazard_3=log(lx_nor_m[,5]/lx_nor_m[,4]),
hazard_4=log(lx_nor_m[,6]/lx_nor_m[,5]),change_1=exp(hazard_1)-1,change_2=exp(hazard_2)-1,change_3=exp(hazard_3)-1,
change_4=exp(hazard_4)-1)
View(lx_nor_m_full)
# taking only the complete cases because since there are no survivors after age 104 naīs are generated
lx_nor_m_full<-lx_nor_m_full[complete.cases(lx_nor_m_full), ]
# creating a datanorme for the number of resuscitations - we go until ten because that is what Vaupel does
#two mortality regimes at a time. Here the first two years.
# 1. estimating life years spent in each resuscitation state for the first mortality regime comparison 1846/1851
lx_nor_m_res_1<-lx_nor_m_full %>%
mutate(res_1=lx_nor_m_full[,2]*lx_nor_m_full[,7], res_2=(lx_nor_m_full[,2]*(lx_nor_m_full[,7]^2))/factorial(2), res_3=(lx_nor_m_full[,2]*(lx_nor_m_full[,7]^3))/factorial(3),
res_4=(lx_nor_m_full[,2]*(lx_nor_m_full[,7]^4))/factorial(4), res_5=(lx_nor_m_full[,2]*(lx_nor_m_full[,7]^5))/factorial(5),res_6=(lx_nor_m_full[,2]*(lx_nor_m_full[,7]^6))/factorial(6),
res_7=(lx_nor_m_full[,2]*(lx_nor_m_full[,7]^7))/factorial(7), res_8=(lx_nor_m_full[,2]*(lx_nor_m_full[,7]^8))/factorial(8),res_9=(lx_nor_m_full[,2]*(lx_nor_m_full[,7]^9))/factorial(9),
res_10=(lx_nor_m_full[,2]*(lx_nor_m_full[,7]^10))/factorial(10))
View(lx_nor_m_res_1)
radix<-100000
lx_nor_m_res_years<-lx_nor_m_res_1%>%
mutate(tau_1=sum(res_1/radix),tau_2=sum(res_2/radix),tau_3=sum(res_3/radix),tau_4=sum(res_4/radix),tau_5=sum(res_5/radix),tau_6=sum(res_6/radix),
tau_7=sum(res_7/radix),tau_8=sum(res_8/radix),tau_9=sum(res_9/radix),tau_10=sum(res_10/radix))
View(lx_nor_m_res_years)
lx_nor_m_res_years<-lx_nor_m_res_years%>% mutate(total=sum(lx_nor_m_res_years[1,25:34]))
View(lx_nor_m_res_years)
#adding life expectancies, estimating differences
lx_nor_m_res_years$ex_old_m<-nor_lt_m_ex_surv$`1846`
lx_nor_m_res_years$ex_new_m<-nor_lt_m_ex_surv$`1851`
#mortality gap between regimes
lx_nor_m_res_years$ex_diff<-lx_nor_m_res_years$ex_new_m-lx_nor_m_res_years$ex_old_m
#if all males from the higher mortality regime had their lives saved once, the gap would be:
lx_nor_m_res_years$ex_gap_m_res1<-lx_nor_m_res_years$ex_new_m-(lx_nor_m_res_years$ex_old_m+lx_nor_m_res_years$tau_1)
lx_nor_m_res_years$ex_gap_m_res_percent<-100-((lx_nor_m_res_years$ex_gap_m_res1/lx_nor_m_res_years$ex_diff)*100)
View(lx_nor_m_res_years)
# 2. estimating life years spent in each resuscitation state for the second mortality regime comparison 1851/1901
lx_nor_m_res_2<-lx_nor_m_full %>%
mutate(res_1=lx_nor_m_full[,3]*lx_nor_m_full[,8], res_2=(lx_nor_m_full[,3]*(lx_nor_m_full[,8]^2))/factorial(2), res_3=(lx_nor_m_full[,3]*(lx_nor_m_full[,8]^3))/factorial(3),
res_4=(lx_nor_m_full[,3]*(lx_nor_m_full[,8]^4))/factorial(4), res_5=(lx_nor_m_full[,3]*(lx_nor_m_full[,8]^5))/factorial(5),res_6=(lx_nor_m_full[,3]*(lx_nor_m_full[,8]^6))/factorial(6),
res_7=(lx_nor_m_full[,3]*(lx_nor_m_full[,8]^7))/factorial(7), res_8=(lx_nor_m_full[,3]*(lx_nor_m_full[,8]^8))/factorial(8),res_9=(lx_nor_m_full[,3]*(lx_nor_m_full[,8]^9))/factorial(9),
res_10=(lx_nor_m_full[,3]*(lx_nor_m_full[,8]^10))/factorial(10))
View(lx_nor_m_res_2)
lx_nor_m_res_years_2<-lx_nor_m_res_2%>%
mutate(tau_1=sum(res_1/radix),tau_2=sum(res_2/radix),tau_3=sum(res_3/radix),tau_4=sum(res_4/radix),tau_5=sum(res_5/radix),tau_6=sum(res_6/radix),
tau_7=sum(res_7/radix),tau_8=sum(res_8/radix),tau_9=sum(res_9/radix),tau_10=sum(res_10/radix))
lx_nor_m_res_years_2<-lx_nor_m_res_years_2%>% mutate(total=sum(lx_nor_m_res_years_2[1,25:34]))
View(lx_nor_m_res_years_2)
#adding life expectancies, estimating differences
lx_nor_m_res_years_2$ex_old2_m<-nor_lt_m_ex_surv$`1851`
lx_nor_m_res_years_2$ex_new2_m<-nor_lt_m_ex_surv$`1901`
#mortality gap between regimes
lx_nor_m_res_years_2$ex_diff2<-lx_nor_m_res_years_2$ex_new2_m-lx_nor_m_res_years_2$ex_old2_m
#if all females from the higher mortality regime had their lives saved once, the gap would be:
lx_nor_m_res_years_2$ex_gap_m_res2<-lx_nor_m_res_years_2$ex_new2_m-(lx_nor_m_res_years_2$ex_old2_m+lx_nor_m_res_years_2$tau_1)
lx_nor_m_res_years_2$ex_gap_m_res_percent2<-100-((lx_nor_m_res_years_2$ex_gap_m_res2/lx_nor_m_res_years_2$ex_diff2)*100)
View(lx_nor_m_res_years_2)
# 3. estimating life years spent in each resuscitation state for the third mortality regime comparison 1901/1951
lx_nor_m_res_3<-lx_nor_m_full %>%
mutate(res_1=lx_nor_m_full[,4]*lx_nor_m_full[,9], res_2=(lx_nor_m_full[,4]*(lx_nor_m_full[,9]^2))/factorial(2), res_3=(lx_nor_m_full[,4]*(lx_nor_m_full[,9]^3))/factorial(3),
res_4=(lx_nor_m_full[,4]*(lx_nor_m_full[,9]^4))/factorial(4), res_5=(lx_nor_m_full[,4]*(lx_nor_m_full[,9]^5))/factorial(5),res_6=(lx_nor_m_full[,4]*(lx_nor_m_full[,9]^6))/factorial(6),
res_7=(lx_nor_m_full[,4]*(lx_nor_m_full[,9]^7))/factorial(7), res_8=(lx_nor_m_full[,4]*(lx_nor_m_full[,9]^8))/factorial(8),res_9=(lx_nor_m_full[,4]*(lx_nor_m_full[,9]^9))/factorial(9),
res_10=(lx_nor_m_full[,4]*(lx_nor_m_full[,9]^10))/factorial(10))
View(lx_nor_m_res_3)
lx_nor_m_res_years_3<-lx_nor_m_res_3%>%
mutate(tau_1=sum(res_1/radix),tau_2=sum(res_2/radix),tau_3=sum(res_3/radix),tau_4=sum(res_4/radix),tau_5=sum(res_5/radix),tau_6=sum(res_6/radix),
tau_7=sum(res_7/radix),tau_8=sum(res_8/radix),tau_9=sum(res_9/radix),tau_10=sum(res_10/radix))
lx_nor_m_res_years_3<-lx_nor_m_res_years_3%>% mutate(total=sum(lx_nor_m_res_years_3[1,25:34]))
View(lx_nor_m_res_years_3)
#adding life expectancies, estimating differences
lx_nor_m_res_years_3$ex_old3_m<-nor_lt_m_ex_surv$`1901`
lx_nor_m_res_years_3$ex_new3_m<-nor_lt_m_ex_surv$`1951`
#mortality gap between regimes
lx_nor_m_res_years_3$ex_diff3<-lx_nor_m_res_years_3$ex_new3_m-lx_nor_m_res_years_3$ex_old3_m
#if all females from the higher mortality regime had their lives saved once, the gap would be:
lx_nor_m_res_years_3$ex_gap_m_res3<-lx_nor_m_res_years_3$ex_new3_m-(lx_nor_m_res_years_3$ex_old3_m+lx_nor_m_res_years_3$tau_1)
lx_nor_m_res_years_3$ex_gap_m_res_percent3<-100-((lx_nor_m_res_years_3$ex_gap_m_res3/lx_nor_m_res_years_3$ex_diff3)*100)
View(lx_nor_m_res_years_3)
# 4. estimating life years spent in each resuscitation state for the fourth mortality regime comparison - 1951/2014
lx_nor_m_res_4<-lx_nor_m_full %>%
mutate(res_1=lx_nor_m_full[,5]*lx_nor_m_full[,10], res_2=(lx_nor_m_full[,5]*(lx_nor_m_full[,10]^2))/factorial(2), res_3=(lx_nor_m_full[,5]*(lx_nor_m_full[,10]^3))/factorial(3),
res_4=(lx_nor_m_full[,5]*(lx_nor_m_full[,10]^4))/factorial(4), res_5=(lx_nor_m_full[,5]*(lx_nor_m_full[,10]^5))/factorial(5),res_6=(lx_nor_m_full[,5]*(lx_nor_m_full[,10]^6))/factorial(6),
res_7=(lx_nor_m_full[,5]*(lx_nor_m_full[,10]^7))/factorial(7), res_8=(lx_nor_m_full[,5]*(lx_nor_m_full[,10]^8))/factorial(8),res_9=(lx_nor_m_full[,5]*(lx_nor_m_full[,10]^9))/factorial(9),
res_10=(lx_nor_m_full[,5]*(lx_nor_m_full[,10]^10))/factorial(10))
View(lx_nor_m_res_4)
lx_nor_m_res_years_4<-lx_nor_m_res_4%>%
mutate(tau_1=sum(res_1/radix),tau_2=sum(res_2/radix),tau_3=sum(res_3/radix),tau_4=sum(res_4/radix),tau_5=sum(res_5/radix),tau_6=sum(res_6/radix),
tau_7=sum(res_7/radix),tau_8=sum(res_8/radix),tau_9=sum(res_9/radix),tau_10=sum(res_10/radix))
lx_nor_m_res_years_4<-lx_nor_m_res_years_4%>% mutate(total=sum(lx_nor_m_res_years_4[1,25:34]))
View(lx_nor_m_res_years_4)
#adding life expectancies, estimating differences
lx_nor_m_res_years_4$ex_old4_m<-nor_lt_m_ex_surv$`1951`
lx_nor_m_res_years_4$ex_new4_m<-nor_lt_m_ex_surv$`2014`
#mortality gap between regimes
lx_nor_m_res_years_4$ex_diff4<-lx_nor_m_res_years_4$ex_new4_m-lx_nor_m_res_years_4$ex_old4_m
#if all females from the higher mortality regime had their lives saved once, the gap would be:
lx_nor_m_res_years_4$ex_gap_m_res4<-lx_nor_m_res_years_4$ex_new4_m-(lx_nor_m_res_years_4$ex_old4_m+lx_nor_m_res_years_4$tau_1)
lx_nor_m_res_years_4$ex_gap_m_res_percent4<-100-((lx_nor_m_res_years_4$ex_gap_m_res4/lx_nor_m_res_years_4$ex_diff4)*100)
View(lx_nor_m_res_years_4)
### reshaping data
# 1. first only first regime
lx_nor_m_res_years$Regime<-c("1846-1851")
View(lx_nor_m_res_years)
lx_nor_males_1<-lx_nor_m_res_years%>%
select(c(Age,hazard_1:hazard_4,res_1:Regime))
View(lx_nor_males_1)
lx_nor_males_1_long<- gather (lx_nor_males_1, key=Resuscitations,
value=Res_number, res_1:res_10)
View(lx_nor_males_1_long)
colnames(lx_nor_males_1_long) <- c( "Age" , "hazard_1" , "hazard_2" , "hazard_3" , "hazard_4",
"tau_1" , "tau_2" , "tau_3" , "tau_4" , "tau_5" ,
"tau_6" , "tau_7" , "tau_8" , "tau_9" , "tau_10" ,
"total" , "ex_old", "ex_new" , "ex_diff" , "ex_gap_res1" ,
"ex_gap_res_percent", "Regime" , "Resuscitations" , "Res_number" )
# 2. second regime
lx_nor_m_res_years_2$Regime<-c("1851-1901")
View(lx_nor_m_res_years_2)
lx_nor_males_2<-lx_nor_m_res_years_2%>%
select(c(Age,hazard_1:hazard_4,res_1:Regime))
View(lx_nor_males_2)
lx_nor_males_2_long<- gather (lx_nor_males_2, key=Resuscitations,
value=Res_number, res_1:res_10)
View(lx_nor_males_2_long)
colnames(lx_nor_males_2_long) <- c( "Age" , "hazard_1" , "hazard_2" , "hazard_3" , "hazard_4",
"tau_1" , "tau_2" , "tau_3" , "tau_4" , "tau_5" ,
"tau_6" , "tau_7" , "tau_8" , "tau_9" , "tau_10" ,
"total" , "ex_old", "ex_new" , "ex_diff" , "ex_gap_res1" ,
"ex_gap_res_percent", "Regime" , "Resuscitations" , "Res_number" )
# 3. third regime
lx_nor_m_res_years_3$Regime<-c("1901-1951")
View(lx_nor_m_res_years_3)
lx_nor_males_3<-lx_nor_m_res_years_3%>%
select(c(Age,hazard_1:hazard_4,res_1:Regime))
View(lx_nor_males_3)
lx_nor_males_3_long<- gather (lx_nor_males_3, key=Resuscitations,
value=Res_number, res_1:res_10)
View(lx_nor_males_3_long)
colnames(lx_nor_males_3_long) <- c( "Age" , "hazard_1" , "hazard_2" , "hazard_3" , "hazard_4",
"tau_1" , "tau_2" , "tau_3" , "tau_4" , "tau_5" ,
"tau_6" , "tau_7" , "tau_8" , "tau_9" , "tau_10" ,
"total" , "ex_old", "ex_new" , "ex_diff" , "ex_gap_res1" ,
"ex_gap_res_percent", "Regime" , "Resuscitations" , "Res_number" )
# 4. fourth regime
lx_nor_m_res_years_4$Regime<-c("1951-2014")
View(lx_nor_m_res_years_4)
lx_nor_males_4<-lx_nor_m_res_years_4%>%
select(c(Age,hazard_1:hazard_4,res_1:Regime))
View(lx_nor_males_4)
lx_nor_males_4_long<- gather (lx_nor_males_4, key=Resuscitations,
value=Res_number, res_1:res_10)
View(lx_nor_males_4_long)
colnames(lx_nor_males_4_long) <- c( "Age" , "hazard_1" , "hazard_2" , "hazard_3" , "hazard_4",
"tau_1" , "tau_2" , "tau_3" , "tau_4" , "tau_5" ,
"tau_6" , "tau_7" , "tau_8" , "tau_9" , "tau_10" ,
"total" , "ex_old", "ex_new" , "ex_diff" , "ex_gap_res1" ,
"ex_gap_res_percent", "Regime" , "Resuscitations" , "Res_number" )
# combining everything
library(reshape)
library(reshape2)
resuscitated_males_nornor<- rbind(lx_nor_males_1_long,lx_nor_males_2_long,lx_nor_males_3_long,lx_nor_males_4_long)
View(resuscitated_males_nornor)
# ploting the hazards and tauīs
X11()
par(mfrow=c(1,2))
plot(x=c(1:length(lx_nor_m_res_years_4$Age)), lx_nor_m_res_years_4$hazard_1, ylim=c(-1,3.38), type="l", axes=FALSE,
ylab="Intensity of lifesaving ", xlab="Age", lwd=2, main="b. Males")
lines(x=c(1:length(lx_nor_m_res_years_4$Age)), lx_nor_m_res_years_4$hazard_2, lty=5,lwd=2)
lines(x=c(1:length(lx_nor_m_res_years_4$Age)), lx_nor_m_res_years_4$hazard_3, lty=3,lwd=2)
lines(x=c(1:length(lx_nor_m_res_years_4$Age)), lx_nor_m_res_years_4$hazard_4, lty=1,lwd=2, col="blue")
axis(1, seq(0,length(lx_nor_m_res_years_4$Age),5), las=1,cex.axis=.8, lwd=1.5)
axis(2, seq(-1, 4, 0.5),lwd=1.5,cex.axis=.8, las=1)
legend("topleft", legend=c("1846-1851","1851-1901","1901-1951","1951-2014"),
lty=c(1,5,3,1),col=c("black","black","black","blue"), bty="n")
abline(h=0,col="grey", lwd=2)
# force of mortality
nor_qx_m<-lt_m%>%
filter(country %in% c("NOR"))%>%
filter(Age<=100)%>%
filter(Year %in% c("1846","1851","1901","1951","2014"))%>%
select(c("Year","Age","qx"))
View(nor_qx_m)
nor_qx_m_wide<- nor_qx_m %>%
spread(key=Year, value=qx)
X11()
pdf(file="prob_death_males_nornor.pdf",width=5.5,height=5.5)
plot(x=c(1:length(nor_qx_m_wide$Age)),nor_qx_m_wide$`1851`, ylim=c(0,1), type="l", axes=FALSE,
ylab="Probability of death- qx", xlab="Age", lwd=1.5, main="b. Males")
lines(x=c(1:length(nor_qx_m_wide$Age)), nor_qx_m_wide$`1901`, lty=3,lwd=1.5)
lines(x=c(1:length(nor_qx_m_wide$Age)), nor_qx_m_wide$`1951`, lty=2,lwd=1.5)
lines(x=c(1:length(nor_qx_m_wide$Age)), nor_qx_m_wide$`2014`, lty=6,lwd=1.5)
axis(1, seq(0,length(nor_qx_m_wide$Age),5), las=1,cex.axis=.8, lwd=1.5)
axis(2, seq(0, 1, 0.1),lwd=1.5,cex.axis=.8, las=1)
legend("topleft", legend=c("1851","1901","1951","2014"),
lty=c(1,5,3,2,6), bty="n", cex = .8)
dev.off()
#ggplot for number of resuscitations and the number of resuscitated, males, nornor
library(forcats)
resuscitated_males_nornor$Resuscitations<-fct_collapse(resuscitated_males_nornor$Resuscitations,
res_1 = c("res_1"),
res_2 = c("res_2"),
res_3 = c("res_3"),
res_4 = c("res_4"),
res_5_plus=c("res_5","res_6","res_7","res_8","res_9","res_10"))
resuscitated_males_nornor$Resuscitations <- factor(resuscitated_males_nornor$Resuscitations, ordered = TRUE,
levels = c("res_1", "res_2", "res_3","res_4","res_5_plus"))
# doing it for both females and males
resuscitated_females_nornor$Sex<-factor("Females")
resuscitated_males_nornor$Sex<-factor("Males")
resuscitated_all<-rbind (resuscitated_females_nornor,resuscitated_males_nornor)
View(resuscitated_all)
pdf(file="res_all_nor.pdf",width=15,height=8)
X11(width=15,height=8)
ggplot(resuscitated_all %>% filter(Regime!="1846-1851"), aes(x = Age, y = Res_number, group=Resuscitations)) +
geom_line(aes(linetype=Resuscitations))+ facet_grid(Sex~ Regime)+
theme_bw()+scale_linetype_manual(name="Number of \nResuscitations", values = c("res_1"="longdash",
"res_2"="solid",
"res_3"="dotdash",
"res_4"="dashed",
"res_5_plus"="dotted"),
labels=c("1", "2", "3","4","5+"))+
ylab("Number of resuscitated persons")+theme_bw()+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
dev.off()
#################################################
#### comparing gender gaps ######################
#################################################
# 1. females versus male regime 1846
# creating the variables for estimating the resuscitated
colnames (lx_nor )<- c( "Age","fem_1846","fem_1851","fem_1901","fem_1951","fem_2014")
lx_nor_compare<-cbind(lx_nor,lx_nor_m)
View(lx_nor_compare)
lx_nor_compare<-lx_nor_compare[,-c(7)]
lx_nor_compare_full<- lx_nor_compare %>%
mutate(hazard_1=log(lx_nor_compare[,2]/lx_nor_compare[,7]),hazard_2=log(lx_nor_compare[,3]/lx_nor_compare[,8]),hazard_3=log(lx_nor_compare[,4]/lx_nor_compare[,9]),
hazard_4=log(lx_nor_compare[,5]/lx_nor_compare[,10]),hazard_5=log(lx_nor_compare[,6]/lx_nor_compare[,11]), change_1=exp(hazard_1)-1,change_2=exp(hazard_2)-1,change_3=exp(hazard_3)-1,
change_4=exp(hazard_4)-1,change_5=exp(hazard_5)-1)
View(lx_nor_compare_full)
# taking only the complete cases because since there are no survivors after age 104 naīs are generated
lx_nor_compare_full<-lx_nor_compare_full[complete.cases(lx_nor_compare_full), ]
# creating a datanorme for the number of resuscitations - we go until ten because that is what Vaupel does
#two mortality regimes at a time. Here the first two years.
# 1. estimating life years spent in each resuscitation state for the first mortality regime comparison between sex 1846/1846
lx_nor_compare_res_1<-lx_nor_compare_full %>%
mutate(res_1=lx_nor_compare_full[,7]*lx_nor_compare_full[,12], res_2=(lx_nor_compare_full[,7]*(lx_nor_compare_full[,12]^2))/factorial(2), res_3=(lx_nor_compare_full[,7]*(lx_nor_compare_full[,12]^3))/factorial(3),
res_4=(lx_nor_compare_full[,7]*(lx_nor_compare_full[,12]^4))/factorial(4), res_5=(lx_nor_compare_full[,7]*(lx_nor_compare_full[,12]^5))/factorial(5),res_6=(lx_nor_compare_full[,7]*(lx_nor_compare_full[,12]^6))/factorial(6),
res_7=(lx_nor_compare_full[,7]*(lx_nor_compare_full[,12]^7))/factorial(7), res_8=(lx_nor_compare_full[,7]*(lx_nor_compare_full[,12]^8))/factorial(8),res_9=(lx_nor_compare_full[,7]*(lx_nor_compare_full[,12]^9))/factorial(9),
res_10=(lx_nor_compare_full[,7]*(lx_nor_compare_full[,12]^10))/factorial(10))
View(lx_nor_compare_res_1)
radix<-100000
lx_nor_compare_res_years<-lx_nor_compare_res_1%>%
mutate(tau_1=sum(res_1/radix),tau_2=sum(res_2/radix),tau_3=sum(res_3/radix),tau_4=sum(res_4/radix),tau_5=sum(res_5/radix),tau_6=sum(res_6/radix),
tau_7=sum(res_7/radix),tau_8=sum(res_8/radix),tau_9=sum(res_9/radix),tau_10=sum(res_10/radix))
View(lx_nor_compare_res_years)
lx_nor_compare_res_years<-lx_nor_compare_res_years%>% mutate(total=sum(lx_nor_compare_res_years[1,32:41]))
View(lx_nor_compare_res_years)
#adding life expectancies, estimating differences
lx_nor_compare_res_years$ex_male<-nor_lt_m_ex_surv$`1846`
lx_nor_compare_res_years$ex_female<-nor_lt_f_ex_surv$`1846`
#mortality gap between regimes
lx_nor_compare_res_years$ex_diff_compare<-lx_nor_compare_res_years$ex_female-lx_nor_compare_res_years$ex_male
#if all males from the higher mortality regime had their lives saved once, the gap would be:
lx_nor_compare_res_years$ex_gap_compare_res1<-lx_nor_compare_res_years$ex_female-(lx_nor_compare_res_years$ex_male+lx_nor_compare_res_years$tau_1)
lx_nor_compare_res_years$ex_gap_compare_res_percent<-100-((lx_nor_compare_res_years$ex_gap_compare_res1/lx_nor_compare_res_years$ex_diff_compare)*100)
View(lx_nor_compare_res_years)
# 2. females versus male regime 1851
lx_nor_compare_res_2<-lx_nor_compare_full %>%
mutate(res_1=lx_nor_compare_full[,8]*lx_nor_compare_full[,13], res_2=(lx_nor_compare_full[,8]*(lx_nor_compare_full[,13]^2))/factorial(2), res_3=(lx_nor_compare_full[,8]*(lx_nor_compare_full[,13]^3))/factorial(3),
res_4=(lx_nor_compare_full[,8]*(lx_nor_compare_full[,13]^4))/factorial(4), res_5=(lx_nor_compare_full[,8]*(lx_nor_compare_full[,13]^5))/factorial(5),res_6=(lx_nor_compare_full[,8]*(lx_nor_compare_full[,13]^6))/factorial(6),
res_7=(lx_nor_compare_full[,8]*(lx_nor_compare_full[,13]^7))/factorial(7), res_8=(lx_nor_compare_full[,8]*(lx_nor_compare_full[,13]^8))/factorial(8),res_9=(lx_nor_compare_full[,8]*(lx_nor_compare_full[,13]^9))/factorial(9),
res_10=(lx_nor_compare_full[,8]*(lx_nor_compare_full[,13]^10))/factorial(10))
View(lx_nor_compare_res_2)
radix<-100000
lx_nor_compare_res_years2<-lx_nor_compare_res_2%>%
mutate(tau_1=sum(res_1/radix),tau_2=sum(res_2/radix),tau_3=sum(res_3/radix),tau_4=sum(res_4/radix),tau_5=sum(res_5/radix),tau_6=sum(res_6/radix),
tau_7=sum(res_7/radix),tau_8=sum(res_8/radix),tau_9=sum(res_9/radix),tau_10=sum(res_10/radix))
View(lx_nor_compare_res_years2)
lx_nor_compare_res_years2<-lx_nor_compare_res_years2%>% mutate(total=sum(lx_nor_compare_res_years2[1,32:41]))
View(lx_nor_compare_res_years2)
#adding life expectancies, estimating differences
lx_nor_compare_res_years2$ex_male<-nor_lt_m_ex_surv$`1851`
lx_nor_compare_res_years2$ex_female<-nor_lt_f_ex_surv$`1851`
#mortality gap between regimes
lx_nor_compare_res_years2$ex_diff_compare<-lx_nor_compare_res_years2$ex_female-lx_nor_compare_res_years2$ex_male
#if all males from the higher mortality regime had their lives saved once, the gap would be:
lx_nor_compare_res_years2$ex_gap_compare_res1<-lx_nor_compare_res_years2$ex_female-(lx_nor_compare_res_years2$ex_male+lx_nor_compare_res_years2$tau_1)
lx_nor_compare_res_years2$ex_gap_compare_res_percent<-100-((lx_nor_compare_res_years2$ex_gap_compare_res1/lx_nor_compare_res_years2$ex_diff_compare)*100)
View(lx_nor_compare_res_years2)
# 3. females versus male regime 1901
lx_nor_compare_res_3<-lx_nor_compare_full %>%
mutate(res_1=lx_nor_compare_full[,9]*lx_nor_compare_full[,14], res_2=(lx_nor_compare_full[,9]*(lx_nor_compare_full[,14]^2))/factorial(2), res_3=(lx_nor_compare_full[,9]*(lx_nor_compare_full[,14]^3))/factorial(3),
res_4=(lx_nor_compare_full[,9]*(lx_nor_compare_full[,14]^4))/factorial(4), res_5=(lx_nor_compare_full[,9]*(lx_nor_compare_full[,14]^5))/factorial(5),res_6=(lx_nor_compare_full[,9]*(lx_nor_compare_full[,14]^6))/factorial(6),
res_7=(lx_nor_compare_full[,9]*(lx_nor_compare_full[,14]^7))/factorial(7), res_8=(lx_nor_compare_full[,9]*(lx_nor_compare_full[,14]^8))/factorial(8),res_9=(lx_nor_compare_full[,9]*(lx_nor_compare_full[,14]^9))/factorial(9),
res_10=(lx_nor_compare_full[,9]*(lx_nor_compare_full[,14]^10))/factorial(10))
View(lx_nor_compare_res_3)
radix<-100000
lx_nor_compare_res_years3<-lx_nor_compare_res_3%>%
mutate(tau_1=sum(res_1/radix),tau_2=sum(res_2/radix),tau_3=sum(res_3/radix),tau_4=sum(res_4/radix),tau_5=sum(res_5/radix),tau_6=sum(res_6/radix),
tau_7=sum(res_7/radix),tau_8=sum(res_8/radix),tau_9=sum(res_9/radix),tau_10=sum(res_10/radix))
View(lx_nor_compare_res_years3)
lx_nor_compare_res_years3<-lx_nor_compare_res_years3%>% mutate(total=sum(lx_nor_compare_res_years3[1,32:41]))
View(lx_nor_compare_res_years3)
#adding life expectancies, estimating differences
lx_nor_compare_res_years3$ex_male<-nor_lt_m_ex_surv$`1901`
lx_nor_compare_res_years3$ex_female<-nor_lt_f_ex_surv$`1901`
#mortality gap between regimes
lx_nor_compare_res_years3$ex_diff_compare<-lx_nor_compare_res_years3$ex_female-lx_nor_compare_res_years3$ex_male
#if all males from the higher mortality regime had their lives saved once, the gap would be:
lx_nor_compare_res_years3$ex_gap_compare_res1<-lx_nor_compare_res_years3$ex_female-(lx_nor_compare_res_years3$ex_male+lx_nor_compare_res_years3$tau_1)
lx_nor_compare_res_years3$ex_gap_compare_res_percent<-100-((lx_nor_compare_res_years3$ex_gap_compare_res1/lx_nor_compare_res_years3$ex_diff_compare)*100)
View(lx_nor_compare_res_years3)
# 3. females versus male regime 1951
lx_nor_compare_res_4<-lx_nor_compare_full %>%
mutate(res_1=lx_nor_compare_full[,10]*lx_nor_compare_full[,15], res_2=(lx_nor_compare_full[,10]*(lx_nor_compare_full[,15]^2))/factorial(2), res_3=(lx_nor_compare_full[,10]*(lx_nor_compare_full[,15]^3))/factorial(3),
res_4=(lx_nor_compare_full[,10]*(lx_nor_compare_full[,15]^4))/factorial(4), res_5=(lx_nor_compare_full[,10]*(lx_nor_compare_full[,15]^5))/factorial(5),res_6=(lx_nor_compare_full[,10]*(lx_nor_compare_full[,15]^6))/factorial(6),
res_7=(lx_nor_compare_full[,10]*(lx_nor_compare_full[,15]^7))/factorial(7), res_8=(lx_nor_compare_full[,10]*(lx_nor_compare_full[,15]^8))/factorial(8),res_9=(lx_nor_compare_full[,10]*(lx_nor_compare_full[,15]^9))/factorial(9),
res_10=(lx_nor_compare_full[,10]*(lx_nor_compare_full[,15]^10))/factorial(10))
View(lx_nor_compare_res_4)
radix<-100000
lx_nor_compare_res_years4<-lx_nor_compare_res_4%>%
mutate(tau_1=sum(res_1/radix),tau_2=sum(res_2/radix),tau_3=sum(res_3/radix),tau_4=sum(res_4/radix),tau_5=sum(res_5/radix),tau_6=sum(res_6/radix),
tau_7=sum(res_7/radix),tau_8=sum(res_8/radix),tau_9=sum(res_9/radix),tau_10=sum(res_10/radix))
View(lx_nor_compare_res_years4)
lx_nor_compare_res_years4<-lx_nor_compare_res_years4%>% mutate(total=sum(lx_nor_compare_res_years4[1,32:41]))
View(lx_nor_compare_res_years4)
#adding life expectancies, estimating differences
lx_nor_compare_res_years4$ex_male<-nor_lt_m_ex_surv$`1951`
lx_nor_compare_res_years4$ex_female<-nor_lt_f_ex_surv$`1951`
#mortality gap between regimes
lx_nor_compare_res_years4$ex_diff_compare<-lx_nor_compare_res_years4$ex_female-lx_nor_compare_res_years4$ex_male
#if all males from the higher mortality regime had their lives saved once, the gap would be:
lx_nor_compare_res_years4$ex_gap_compare_res1<-lx_nor_compare_res_years4$ex_female-(lx_nor_compare_res_years4$ex_male+lx_nor_compare_res_years4$tau_1)
lx_nor_compare_res_years4$ex_gap_compare_res_percent<-100-((lx_nor_compare_res_years4$ex_gap_compare_res1/lx_nor_compare_res_years4$ex_diff_compare)*100)
View(lx_nor_compare_res_years4)
# 4. females versus male regime 2014
lx_nor_compare_res_5<-lx_nor_compare_full %>%
mutate(res_1=lx_nor_compare_full[,11]*lx_nor_compare_full[,16], res_2=(lx_nor_compare_full[,11]*(lx_nor_compare_full[,16]^2))/factorial(2), res_3=(lx_nor_compare_full[,11]*(lx_nor_compare_full[,16]^3))/factorial(3),
res_4=(lx_nor_compare_full[,11]*(lx_nor_compare_full[,16]^4))/factorial(4), res_5=(lx_nor_compare_full[,11]*(lx_nor_compare_full[,16]^5))/factorial(5),res_6=(lx_nor_compare_full[,11]*(lx_nor_compare_full[,16]^6))/factorial(6),
res_7=(lx_nor_compare_full[,11]*(lx_nor_compare_full[,16]^7))/factorial(7), res_8=(lx_nor_compare_full[,11]*(lx_nor_compare_full[,16]^8))/factorial(8),res_9=(lx_nor_compare_full[,11]*(lx_nor_compare_full[,16]^9))/factorial(9),
res_10=(lx_nor_compare_full[,11]*(lx_nor_compare_full[,16]^10))/factorial(10))
View(lx_nor_compare_res_5)
radix<-100000
lx_nor_compare_res_years5<-lx_nor_compare_res_5%>%
mutate(tau_1=sum(res_1/radix),tau_2=sum(res_2/radix),tau_3=sum(res_3/radix),tau_4=sum(res_4/radix),tau_5=sum(res_5/radix),tau_6=sum(res_6/radix),
tau_7=sum(res_7/radix),tau_8=sum(res_8/radix),tau_9=sum(res_9/radix),tau_10=sum(res_10/radix))
View(lx_nor_compare_res_years5)
lx_nor_compare_res_years5<-lx_nor_compare_res_years5%>% mutate(total=sum(lx_nor_compare_res_years5[1,32:41]))
View(lx_nor_compare_res_years5)
#adding life expectancies, estimating differences
lx_nor_compare_res_years5$ex_male<-nor_lt_m_ex_surv$`2014`
lx_nor_compare_res_years5$ex_female<-nor_lt_f_ex_surv$`2014`
#mortality gap between regimes
lx_nor_compare_res_years5$ex_diff_compare<-lx_nor_compare_res_years5$ex_female-lx_nor_compare_res_years5$ex_male
#if all males from the higher mortality regime had their lives saved once, the gap would be:
lx_nor_compare_res_years5$ex_gap_compare_res1<-lx_nor_compare_res_years5$ex_female-(lx_nor_compare_res_years5$ex_male+lx_nor_compare_res_years5$tau_1)
lx_nor_compare_res_years5$ex_gap_compare_res_percent<-100-((lx_nor_compare_res_years5$ex_gap_compare_res1/lx_nor_compare_res_years5$ex_diff_compare)*100)
### reshaping data
# 1. first only first regime
lx_nor_compare_res_years$Regime<-c("1846")
View(lx_nor_compare_res_years)
lx_nor_compare_1<-lx_nor_compare_res_years%>%
select(c(Age,hazard_1:hazard_5,res_1:Regime))
View(lx_nor_compare_1)
lx_nor_compare_1_long<- gather (lx_nor_compare_1, key=Resuscitations,
value=Res_number, res_1:res_10)
View(lx_nor_compare_1_long)
colnames(lx_nor_compare_1_long) <- c( "Age" , "hazard_1" , "hazard_2" , "hazard_3" , "hazard_4","hazard_5" ,
"tau_1" , "tau_2" , "tau_3" , "tau_4" , "tau_5" ,
"tau_6" , "tau_7" , "tau_8" , "tau_9" , "tau_10" ,
"total" , "ex_old", "ex_new" , "ex_diff" , "ex_gap_res1" ,
"ex_gap_res_percent", "Regime" , "Resuscitations" , "Res_number" )
# 2. second regime
lx_nor_compare_res_years2$Regime<-c("1851")
View(lx_nor_compare_res_years2)
lx_nor_compare_2<-lx_nor_compare_res_years2%>%
select(c(Age,hazard_1:hazard_5,res_1:Regime))
View(lx_nor_compare_2)
lx_nor_compare_2_long<- gather (lx_nor_compare_2, key=Resuscitations,
value=Res_number, res_1:res_10)
View(lx_nor_compare_2_long)
colnames(lx_nor_compare_2_long) <- c( "Age" , "hazard_1" , "hazard_2" , "hazard_3" ,"hazard_5", "hazard_4",
"tau_1" , "tau_2" , "tau_3" , "tau_4" , "tau_5" ,
"tau_6" , "tau_7" , "tau_8" , "tau_9" , "tau_10" ,
"total" , "ex_old", "ex_new" , "ex_diff" , "ex_gap_res1" ,
"ex_gap_res_percent", "Regime" , "Resuscitations" , "Res_number" )
# 3. third regime
lx_nor_compare_res_years3$Regime<-c("1901")
lx_nor_compare_3<-lx_nor_compare_res_years3%>%
select(c(Age,hazard_1:hazard_5,res_1:Regime))
View(lx_nor_compare_3)
lx_nor_compare_3_long<- gather (lx_nor_compare_3, key=Resuscitations,
value=Res_number, res_1:res_10)
View(lx_nor_compare_3_long)
colnames(lx_nor_compare_3_long) <- c( "Age" , "hazard_1" , "hazard_2" , "hazard_3" , "hazard_4","hazard_5",
"tau_1" , "tau_2" , "tau_3" , "tau_4" , "tau_5" ,
"tau_6" , "tau_7" , "tau_8" , "tau_9" , "tau_10" ,
"total" , "ex_old", "ex_new" , "ex_diff" , "ex_gap_res1" ,
"ex_gap_res_percent", "Regime" , "Resuscitations" , "Res_number" )
# 4. fourth regime
lx_nor_compare_res_years4$Regime<-c("1951")
View(lx_nor_compare_res_years4)
lx_nor_compare_4<-lx_nor_compare_res_years4%>%
select(c(Age,hazard_1:hazard_5,res_1:Regime))
View(lx_nor_compare_4)
lx_nor_compare_4_long<- gather (lx_nor_compare_4, key=Resuscitations,
value=Res_number, res_1:res_10)
View(lx_nor_compare_4_long)
colnames(lx_nor_compare_4_long) <- c( "Age" , "hazard_1" , "hazard_2" , "hazard_3" , "hazard_4", "hazard_5",
"tau_1" , "tau_2" , "tau_3" , "tau_4" , "tau_5" ,
"tau_6" , "tau_7" , "tau_8" , "tau_9" , "tau_10" ,
"total" , "ex_old", "ex_new" , "ex_diff" , "ex_gap_res1" ,
"ex_gap_res_percent", "Regime" , "Resuscitations" , "Res_number" )
# 5. fifth regime
lx_nor_compare_res_years5$Regime<-c("2014")
View(lx_nor_compare_res_years5)
lx_nor_compare_5<-lx_nor_compare_res_years5%>%
select(c(Age,hazard_1:hazard_5,res_1:Regime))
View(lx_nor_compare_5)
lx_nor_compare_5_long<- gather (lx_nor_compare_5, key=Resuscitations,
value=Res_number, res_1:res_10)
View(lx_nor_compare_5_long)
colnames(lx_nor_compare_5_long) <- c( "Age" , "hazard_1" , "hazard_2" , "hazard_3" , "hazard_4", "hazard_5",
"tau_1" , "tau_2" , "tau_3" , "tau_4" , "tau_5" ,
"tau_6" , "tau_7" , "tau_8" , "tau_9" , "tau_10" ,
"total" , "ex_old", "ex_new" , "ex_diff" , "ex_gap_res1" ,
"ex_gap_res_percent", "Regime" , "Resuscitations" , "Res_number" )
# combining everything
library(reshape)
library(reshape2)
resuscitated_compare_nornor<- rbind(lx_nor_compare_2_long,lx_nor_compare_3_long,lx_nor_compare_4_long,lx_nor_compare_5_long)
View(resuscitated_compare_nornor)
# ploting the hazards and tauīs
X11()
par(mfrow=c(1,2))
plot(x=c(1:length(lx_nor_compare_res_years4$Age)), lx_nor_compare_res_years4$hazard_1, ylim=c(-1,3.38), type="l", axes=FALSE,
ylab="Intensity of lifesaving ", xlab="Age", lwd=2)
lines(x=c(1:length(lx_nor_compare_res_years4$Age)), lx_nor_compare_res_years4$hazard_2, lty=5,lwd=2)
lines(x=c(1:length(lx_nor_compare_res_years4$Age)), lx_nor_compare_res_years4$hazard_3, lty=3,lwd=2)
lines(x=c(1:length(lx_nor_compare_res_years4$Age)), lx_nor_compare_res_years4$hazard_4, lty=2,lwd=2)
lines(x=c(1:length(lx_nor_compare_res_years4$Age)), lx_nor_compare_res_years4$hazard_5, lty=1,lwd=2)
axis(1, seq(0,length(lx_nor_compare_res_years4$Age),5), las=1,cex.axis=.8, lwd=1.5)
axis(2, seq(-1, 4, 0.5),lwd=1.5,cex.axis=.8, las=1)
legend("topleft", legend=c("1846","1851","1901","1951","2014"),
lty=c(1,5,3,2,1), bty="n")
abline(h=0,col="grey", lwd=2)
#ggplot for number of resuscitations and the number of resuscitated, males, nornor
library(forcats)
resuscitated_compare_nornor$Resuscitations<-fct_collapse(resuscitated_compare_nornor$Resuscitations,
res_1 = c("res_1"),
res_2 = c("res_2"),
res_3 = c("res_3"),
res_4 = c("res_4"),
res_5_plus=c("res_5","res_6","res_7","res_8","res_9","res_10")
)
resuscitated_compare_nornor$Resuscitations <- factor(resuscitated_compare_nornor$Resuscitations, ordered = TRUE,
levels = c("res_1", "res_2", "res_3","res_4","res_5_plus"))
pdf(file="res_compare_nornor2.pdf",width=15,height=8)
X11(width=15,height=8)
ggplot(resuscitated_compare_nornor %>% filter(Regime!="1846"), aes(x = Age, y = Res_number, group=Resuscitations)) +
geom_line(aes(linetype=Resuscitations))+ facet_grid(~ Regime)+
theme_bw()+scale_linetype_manual(name="Number of \nResuscitations", values = c("res_1"="longdash",
"res_2"="solid",
"res_3"="dotdash",
"res_4"="dashed",
"res_5_plus"="dotted"),
labels=c("1", "2", "3","4","5+"))+
ylab("Number of resuscitated persons")+theme_bw()+
theme(panel.grid.major = element_blank(), panel.grid.minor = element_blank())
dev.off()
# assesing differences in life expectancy and number of life years lived in each resuscitation state
View(lx_nor_compare_res_years)
tau_compare_1<-lx_nor_compare_res_years[1,32:48]
colnames(tau_compare_1)<-c("tau_1" , "tau_2","tau_3","tau_4","tau_5" , "tau_6","tau_7",
"tau_8","tau_9","tau_10", "total","ex_old","ex_new","ex_diff","ex_gap_res1","ex_gap_res_percent", "Regime")
tau_compare_2<-lx_nor_compare_res_years2[1,32:48]
colnames(tau_compare_2)<-c("tau_1" , "tau_2","tau_3","tau_4","tau_5" , "tau_6","tau_7",
"tau_8","tau_9","tau_10", "total","ex_old","ex_new","ex_diff","ex_gap_res1","ex_gap_res_percent", "Regime")
tau_compare_3<-lx_nor_compare_res_years3[1,32:48]
colnames(tau_compare_3)<-c("tau_1" , "tau_2","tau_3","tau_4","tau_5" , "tau_6","tau_7",
"tau_8","tau_9","tau_10", "total","ex_old","ex_new","ex_diff","ex_gap_res1","ex_gap_res_percent", "Regime")
tau_compare_4<-lx_nor_compare_res_years4[1,32:48]
colnames(tau_compare_4)<-c("tau_1" , "tau_2","tau_3","tau_4","tau_5" , "tau_6","tau_7",
"tau_8","tau_9","tau_10", "total","ex_old","ex_new","ex_diff","ex_gap_res1","ex_gap_res_percent", "Regime")
tau_compare_5<-lx_nor_compare_res_years5[1,32:48]
colnames(tau_compare_5)<-c("tau_1" , "tau_2","tau_3","tau_4","tau_5" , "tau_6","tau_7",
"tau_8","tau_9","tau_10", "total","ex_old","ex_new","ex_diff","ex_gap_res1","ex_gap_res_percent", "Regime")
tau_compare<-rbind(tau_compare_1,tau_compare_2, tau_compare_3,tau_compare_4,tau_compare_5)
View(tau_compare)
write.table(tau_compare, file="tau_compare_nor.csv", sep=",", row.names = F) #saving for using excel as well
tau_compare_long <- gather(tau_compare, tau, life_years_res, tau_1:tau_10)
View(tau_compare_long)
tau_compare_prop<-tau_compare_long %>%
group_by(tau)%>%
mutate(prop_res=(life_years_res/total)*100)
View(tau_compare_prop)
tau_compare_prop$tau<-fct_collapse(tau_compare_prop$tau,
tau_1 = c("tau_1"),
tau_2 = c("tau_2"),
tau_3 = c("tau_3"),
tau_4 = c("tau_4"),
tau_5_plus=c("tau_5","tau_6","tau_7","tau_8","tau_9","tau_10")
)
tau_compare_prop$tau <- factor(tau_compare_prop$tau, ordered = TRUE,
levels = c("tau_1", "tau_2", "tau_3","tau_4","tau_5_plus"))
library(latex2exp)
X11(width=10,height=10)
ggplot(data = tau_compare_prop %>% filter(Regime!="1846"), aes(x = factor(tau),y = prop_res, group = factor(Regime) )) +
geom_line(aes(color=Regime), size=1)+theme_bw()
|
91b88e282b04e45727a4ed8004f8e4d32162751d | 38fbb4560f07db4f7436f09e9d929a5221f5ace8 | /wordcloud_ex.R | d236bea015bb24dea6526eeb044ae403d5ceb06d | [
"MIT"
] | permissive | Jeongryeon/ryun | 798ec921e2bccaba870cb62f6d57b6691e2ca240 | 736f78d082f4fefde1acedfe2433da924166b2ae | refs/heads/master | 2020-04-27T22:22:56.172428 | 2019-04-10T04:10:36 | 2019-04-10T04:10:36 | 174,734,991 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,298 | r | wordcloud_ex.R | install.packages("tuber")
install.packages("httpuv")
install.packages("dplyr")
install.packages("stringr")
install.packages("tibble")
install.packages("KoNLP")
install.packages("tm")
install.packages("wordcloud2")
install.packages("arules")
install.packages("igraph")
install.packages("combinat")
install.packages("SnowballC")
install.packages("RColorBrewer")
install.packages("wordcloud")
library(tuber)
library(httpuv)
library(dplyr)
library(stringr)
library(tibble)
library(tm)
library(KoNLP)
library(wordcloud2)
useNIADic()
library(arules)
library(igraph)
library(combinat)
library(SnowballC)
library(RColorBrewer)
library(wordcloud)
app_id <-"152593731585-3fu2n5m15ib2m2hpajjf7i78qvudsb9v.apps.googleusercontent.com"
app_secret<-"SYnhxexEduwYRLMjILt2vzFk"
yt_oauth(app_id,app_secret,token = "")
chanel_if<-yt_search(term="", type="video", channel_id = "UC-g0gSStENkYPXFRsKrlvyA")
video_cmt_bind= rbind()
#한 채널에 있는 동영상의 댓글 정보 중, 댓글 원본, 동영상 아이디,그리고 댓글 단 사용자 아이디를 데이터프레임으로 추출.
for (i in 1:5){
#한 채널에 있는 모든 영상(i개)의 댓글 크롤링하여 새로운 변수에 저장
video_f_comments<-get_all_comments(chanel_if$video_id[i])
#아래와 같은 정보를 추출하여 데이터프레임으로 만들고, 각각 적용
video_cmt_bind[[i]]= data.frame(video_f_comments$authorDisplayName, video_f_comments$textOriginal, video_f_comments$videoId)
#리스트 형태를 rbind를 통해 matrix로 만듦
video_to_matrix<-rbind(video_cmt_bind)
#데이터프레임으로 만듦
video_to_dataframe<-data.frame(do.call(rbind,video_to_matrix))
#각각의 열 이름 변경
names(video_to_dataframe) <- c("AuthorName", "Comments", "VideoId")
#csv파일로 저장
write.csv(video_to_dataframe, file = "english_coach.csv")
}
#데이터 불러오기
english_txt<-read.csv(file = "english_coach.csv",header = T, stringsAsFactors = F )
english_txt
typeof(english_txt)
english_txt_ed<-as.vector(english_txt$Comments)
english_txt_ed
typeof(english_txt_ed)
english_ed_cor<- Corpus(VectorSource(english_txt_ed))
str(english_ed_cor)
#공백제거
english_ed_cor <- tm_map(english_ed_cor, stripWhitespace)
# Convert the text to lower case(소문자변경-사전에 있는 내용과 비교하기 위해)
english_ed_cor<- tm_map(english_ed_cor, tolower)
# Remove numbers(숫자제거)
english_ed_cor <- tm_map(english_ed_cor, removeNumbers)
# Remove english common stopwords(뛰어쓰기와 시제 제거 )
english_ed_cor <- tm_map(english_ed_cor, removeWords, stopwords("english"))
# Remove punctuations(구두점제거)
english_ed_cor <- tm_map(english_ed_cor, removePunctuation)
# Text stemming(어근만 추출한다)
english_ed_cor <- tm_map(english_ed_cor, stemDocument)
#정제된 데이터 TermDocumentMatrix를 사용
english_tdm <- TermDocumentMatrix(english_ed_cor)
#문장 별 단어 빈도수
english_tdm_ma <- as.matrix(english_tdm)
english_tdm
english_tdm_ma
write.csv(english_tdm_ma, file = "english_coach_tdm_ma.csv")
#term document matrix의 결과를 합해서 내림차순으로 정렬
english_sort<- sort(rowSums(english_tdm_ma),decreasing=TRUE)
english_df <- data.frame(word = names(english_sort),freq=english_sort)
head(english_sort, 10)
head(english_df, 10)
typeof(english_sort)
typeof(english_df)
#워드클라우드 생성
wordcloud(words = english_df$word,
freq = english_df$freq,
min.freq = 1,
max.words=200,
random.order=FALSE,
rot.per=0.35,
colors=brewer.pal(8, "Dark2"))
#TF-IDF 시작 (초기버젼 문장 별 IDF 구하기)
#문장 별 단어 빈도수 행의 합 구하기
english_tdm_ma_rowSum<-rowSums(english_tdm_ma)
write.csv(english_tdm_ma_rowSum, file = "english_tdm_ma_rowSum.csv")
typeof(english_tdm_ma)
english_tdm_ma_df<-as.data.frame(english_tdm_ma)
english_tdm_ma_df
write.csv(english_tdm_ma_df, file = "english_tdm_ma_df.csv")
english_tdm_ma_df[1,]
write.csv(english_tdm_ma_df[1,], file = "english_tdm_ma_df[1,].csv")
english_tdm_ma_df[19,]
zxc<-duplicated(english_tdm_ma_df)
write.csv(zxc, file = "zxc.csv")
|
c9f1a99e7f1ed12e1e89a1d1733a186f4a732f96 | 8b3ad0b9645fd35b739805c762adbaaa71b95be8 | /man/check_gene_filter1.Rd | 0e33ead733834d465d638d3061ffea9fd398c073 | [] | no_license | wikiselev/clustools | 51d33502e91fcca854664ebbfd1e20417f4e1b22 | 800d0bf479e2e4679f7d14264cd9e0e7f917b164 | refs/heads/master | 2021-01-10T02:20:14.157018 | 2016-01-21T11:07:20 | 2016-01-21T11:07:20 | 36,796,775 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,123 | rd | check_gene_filter1.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/machine-learning.R
\name{check_gene_filter1}
\alias{check_gene_filter1}
\title{Check the first filtering step.}
\usage{
check_gene_filter1(d, min.cells, max.cells, min.reads, n.dim)
}
\arguments{
\item{d}{Expression matrix with rows as genes and columns as cells. Column
names of d should represent the ground truth clustering indecies.}
\item{min.cells}{Minimum number of cells in which a given gene is expressed.}
\item{max.cells}{Maximum number of cells in which a given gene is expressed.}
\item{min.reads}{Minimum number of reads per gene per cell.}
\item{n.dim}{Number of dimension of the transformed distance matrix which is used
in kmeans clustering.}
}
\value{
Adjusted Rand index of the clustering.
}
\description{
Evaluate Adjusted Rand index based on gene_filter1() parameters. NOTE that
gene_filter2 is set to "none", distance is set to "spearman" and transformation
is set to "spectral". These parameters were chosen because they provide the
best clustering index.
}
\examples{
check_gene_filter1(quake, 3, 3, 2, 4)
}
|
59de6940608002b4407093ea62d0ed85081009e0 | c4cb920902a96270eabe14349daada9269dad185 | /R/repository.R | 9a2a9208dfe6fa6c804f379d5341596d50bd5e35 | [] | no_license | ashbaldry/vstsr | d224187bde2f3694cb63ce8dd1bf5a3b535a54dd | 76111d35ddb7650eb6d602b6acc60f95925cbb03 | refs/heads/main | 2023-08-30T21:38:13.389355 | 2023-08-20T10:18:21 | 2023-08-20T10:18:21 | 117,109,966 | 4 | 2 | null | 2023-08-20T10:15:49 | 2018-01-11T14:19:14 | R | UTF-8 | R | false | false | 3,162 | r | repository.R | #' Azure DevOps Project Repositories
#'
#' @description
#' These functions will allow you to scrape project information from Azure DevOps.
#'
#' @details
#' For more information about repository API calls check
#' \url{https://docs.microsoft.com/en-us/rest/api/azure/devops/git}.
#'
#' @param domain The name of the Azure DevOps organization
#' @param project Project ID or project name
#' @param repo the name of the repository in \code{project} to look at. Leave as \code{""} to get all repositories
#' within all projects of the domain
#' @param auth_key authentication key generated by using \code{\link{vsts_auth_key}}
#' @param quiet logical whether want general running information from printing. Any issue with the API call will
#' still show up if set to \code{TRUE}
#'
#' @examples
#' \dontrun{
#' # Add in own details to get a non-NULL output
#' auth_key <- vsts_auth_key("<username>", "<password>")
#'
#' # Get repo list
#' vsts_get_repos("domain", "project", auth_key)
#'
#' # Create new repo
#' vsts_create_repo("domain", "project", "repo", auth_key)
#'
#' # Delete existing repo
#' vsts_delete_repo("domain", "project", "repo", auth_key)
#' }
#'
#' @rdname vsts_repo
#' @export
vsts_get_repos <- function(domain, project, auth_key, quiet = FALSE) {
uri <- file.path(AZURE_HOME_URL, domain, project, "_apis/git/repositories?api-version=6.0")
response <- httr::GET(uri, httr::add_headers(Authorization = auth_key))
if (httr::status_code(response) != 200) {
send_failure_message(response, "get repos list")
return(NULL)
}
content <- httr::content(response, encoding = "UTF-8", simplifyDataFrame = TRUE)$value
if (!quiet) cat("Available repositories:", paste(content$name, collapse = ", "), "\n")
content
}
#' @rdname vsts_repo
#' @export
vsts_create_repo <- function(domain, project, repo, auth_key, quiet = FALSE) {
uri <- file.path(AZURE_HOME_URL, domain, project, "_apis/git/repositories?api-version=6.0")
response <- httr::POST(
uri,
httr::add_headers(Authorization = auth_key),
httr::content_type_json(),
body = jsonlite::toJSON(list(name = repo), auto_unbox = TRUE)
)
if (httr::status_code(response) != 201) {
send_failure_message(response, "create repository")
return(NULL)
}
if (!quiet) cat(repo, "repository has been created in", project, "\n")
httr::content(response, encoding = "UTF-8", simplifyDataFrame = TRUE)
}
#' @rdname vsts_repo
#' @export
vsts_delete_repo <- function(domain, project, repo, auth_key, quiet = FALSE) {
repos <- vsts_get_repos(domain, project, auth_key, quiet = TRUE)
repo_id <- repos[repos$name == repo, "id"]
if (is.null(repo_id) || length(repo_id) == 0) {
cat("Unable to find", repo, "in", project, "\n")
return(NULL)
}
uri <- file.path(AZURE_HOME_URL, domain, project, "_apis/git/repositories", paste0(repo_id, "?api-version=6.0"))
response <- httr::DELETE(uri, httr::add_headers(Authorization = auth_key))
if (httr::status_code(response) != 204) {
send_failure_message(response, "delete repository")
return(NULL)
}
if (!quiet) cat(repo, "repository has been deleted from", project, "\n")
return(TRUE)
}
|
c4bc903e44d212a0400641e18240722366eac1cf | c12b995bfa4db368286eddfacda70f0920e589e0 | /scRNAseq_mir29_targeting_signature_per_cluster.R | 93444d9d4853bf55d888a395faca48ecaf626028 | [] | no_license | hongyaz/mir29Project | b6ba1f7fff5246ba75f4be1c0f806fee1ed743a9 | ff8511810a7f1bcef3f80b9e43ebf8d055ced6db | refs/heads/main | 2023-04-11T19:03:08.033368 | 2021-09-15T00:27:15 | 2021-09-15T00:27:15 | 405,442,084 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,129 | r | scRNAseq_mir29_targeting_signature_per_cluster.R | # mir29ko is an Seurat object containing data from all 3 samples, generated in scRNAseq_harmony_integration.R
library(Seurat)
adult_ctrl = subset(mir29ko, subset = state == "Cre-")
adult_flox = subset(mir29ko, subset = state == "Cre+")
neo = subset(mir29ko, subset = state == "Neo")
actrl_norm = as.data.frame(as.matrix(GetAssayData(adult_ctrl, 'data')))
aflox_norm = as.data.frame(as.matrix(GetAssayData(adult_flox, 'data')))
neo_norm = as.data.frame(as.matrix(GetAssayData(neo, 'data')))
# for all miRNAs
# find target genes (names here) - context score < -0.2
# find the target genes for miRNAs ~50MB file
targets_mouse <- read.table("Targetscan_mouse_filtered_genes.txt")
colnames(targets_mouse) <- c("transcript_id_ver", "gene_name","seed", "mirna_name")
targets_mouse$transcript_id <- substr(targets_mouse$transcript_id_ver, 1, 18)
# find weak targets of miRNAs, used to exclude in background genes - context score >= -0.2
weak_targets_all <- read.table("Targetscan_mouse_weak_targets_genes_names.txt")
colnames(weak_targets_all) <- c("transcript_id_ver", "gene_name","seed", "mirna_name")
weak_targets_all$transcript_id <- substr(weak_targets_all$transcript_id_ver, 1, 18)
mir_family_info <- read.table("miR_Family_Info.txt", fill = T, header = T, sep = "\t")
mmu_mir_family_info <- mir_family_info[substr(mir_family_info[,'MiRBase.ID'] ,1 ,3) == "mmu", ]
mmu_con_mir_family_info <- mmu_mir_family_info[mmu_mir_family_info[,'Family.Conservation.'] >= 2, ]
seed_con <- mmu_con_mir_family_info[,c('Seed.m8', 'MiRBase.ID')]
library(plyr)
mirna_seeds_con <- ddply(seed_con, .(Seed.m8), summarize,
miRNA=paste(MiRBase.ID,collapse=";"))
colnames(mirna_seeds_con) <- c("seed", "miRNA")
targets_mouse_con_sites <- read.table("/home/hz543/data/targetscan/mouse/Targetscan_mouse_filtered_genes.txt")
colnames(targets_mouse_con_sites) <- c("transcript_id_ver", "gene_name","seed", "mirna_name")
targets_mouse_con_sites$transcript_id <- substr(targets_mouse_con_sites$transcript_id_ver, 1, 18)
targets_mouse_seed <- targets_mouse[targets_mouse_con_sites$'seed' %in% seed_con$Seed.m8,]
gene_w_con_mirna_sites <- targets_mouse_seed$gene_name
targeting_cluster_bg_adult <- function(cluster, seed, low_counts_cutoff){
seed <- toupper(seed)
targets_name <- targets_mouse[targets_mouse[,'seed'] == toString(seed),]$gene_name
actrl_norm_subset = actrl_norm[, names(adult_ctrl$seurat_clusters)[adult_ctrl$seurat_clusters == cluster]]
aflox_norm_subset = aflox_norm[, names(adult_flox$seurat_clusters)[adult_flox$seurat_clusters == cluster]]
actrl_norm_filter <- actrl_norm_subset[rowMeans(actrl_norm_subset) > low_counts_cutoff,]
aflox_norm_filter <- aflox_norm_subset[rowMeans(aflox_norm_subset) > low_counts_cutoff,]
actrl_norm_both <- actrl_norm_filter[rownames(actrl_norm_filter) %in% rownames(aflox_norm_filter),]
aflox_norm_both <- aflox_norm_filter[rownames(aflox_norm_filter) %in% rownames(actrl_norm_filter),]
fold_change = log2(rowMeans(actrl_norm_both) / rowMeans(aflox_norm_both))
fold_change_sites = fold_change[names(fold_change) %in% gene_w_con_mirna_sites]
print(nrow(actrl_norm_both))
if (length(targets_name) < 5){
return("No enough targets")
} else {
targets_weak <- weak_targets_all[weak_targets_all[,'seed'] == toString(seed),]$gene_name
exp <- fold_change_sites[names(fold_change_sites) %in% targets_name]
exp_background <- fold_change_sites[!names(fold_change_sites) %in% targets_name]
exp_background_no_weak <- exp_background[!names(exp_background) %in% targets_weak]
# wilcox test
wilcox_test <- wilcox.test(exp, exp_background_no_weak)
p_value <- wilcox_test$p.value
p_value_sign = sign(median(exp) - median(exp_background_no_weak)) * p_value
return (p_value_sign)
}
}
# finding targeting signature for miR-29 (seed: AGCACCA)
cluster_target_pvals_bg = c()
for (i in c(0:8)){
print(i)
cluster_target_pvals_bg = c(cluster_target_pvals_bg, targeting_cluster_bg_adult(i, 'AGCACCA', 0.01))
}
|
749ecc3a97b3519707b3602a5b13edc1a4fedaf4 | 07914bee98711467ea03263fdf2978aacda3a63a | /cachematrix.R | 2524cbd0e3827a18a880f65623946d9b2ce510d0 | [] | no_license | roger8144/ProgrammingAssignment2 | 5e0f202aee53b3905c851f6b3a4993735b5869e6 | d7f9864c477e9298f76dc172825332cbfe2b48fb | refs/heads/master | 2021-01-17T12:17:50.372712 | 2014-12-19T04:04:46 | 2014-12-19T04:04:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,501 | r | cachematrix.R | ## Coursera R Programming Assignment 2 - Lexial Scoping. Write an R functio
## that is able to cache potentially time consuming computations. Here, we
## cache the inverse of a matrix.
## The makeCacheMatrix function creates a special "matrix" that stores a
## matrix and cache its inverse. This object is really a list containing a
## function to
## 1. set the value of the matrix
## 2. get the value of the matrix
## 3. set the value of the inverse of the matrix
## 4. get the value of the inverse of the matrix
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function (y) {
x <<- y
inv <<- NULL
}
get <- function() x
setInverse <- function(inverse) inv <<- inverse
getInverse <- function() inv
list(set = set,
get = get,
setInverse = setInverse,
getInverse = getInverse)
}
## The cacheSolve function calculates the inverse of the special "matrix"
## created with the makeCacheMatrix function. However, it first checks to see
## if the inverse has already been computed or not. If so, it gets the inverse
## from the cache and skips the computation. Otherwise, it calculates the
## mean of the data and sets the value of the mean in the cache via the
## setInverse function
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getInverse()
if (!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
inv <- solve(data, ...)
x$setInverse(inv)
inv
}
|
0e6cfbaf18f86d5f702db69165b4c04c6a2da548 | 1bb196a4f1e7bd159b8c8ba54cdcdd1101fc1ca8 | /R/cor_mat.R | d3f9d75001658d317d4c61bc7bfd2cc99127bf2f | [] | no_license | slagtermaarten/maartenutils | c9cda2738ac0e4b449167e4e4da416bf1f99747d | 35771546f62389c3879158b0319c1e3295c9a870 | refs/heads/master | 2023-04-14T09:02:54.144968 | 2023-04-07T13:45:28 | 2023-04-07T13:45:56 | 104,318,595 | 0 | 1 | null | 2018-09-11T15:01:20 | 2017-09-21T07:56:35 | R | UTF-8 | R | false | false | 2,362 | r | cor_mat.R | get_lower_tri<-function(cormat){
cormat[upper.tri(cormat)] <- NA
return(cormat)
}
get_upper_tri <- function(cormat){
cormat[lower.tri(cormat)]<- NA
return(cormat)
}
reorder_cormat <- function(cormat){
cormat
dd <- as.dist((1-cormat)/2)
which(is.na(dd))
hc <- hclust(dd)
cormat <-cormat[hc$order, hc$order]
}
#' Prefilter data for informative features before doing a correlation analysis
#'
#'
prefilter_correlation_data <- function(dtf, epsilon = 1e-5) {
setDT(dtf)
dtf <-
dtf[, !eps(apply(dtf, 2, var, na.rm = T), 0, epsilon = epsilon), with = F]
return(dtf)
}
#' Compute coefficient of variation
#'
#' @param x Numeric vector
coef_variation <- function(x) {
if (is.null(x) || length(x) <= 1 || all(is.na(x))) {
return(NA_real_)
}
std_dev <- sd(x, na.rm = T)
X_bar <- mean(x, na.rm = T)
# if (is.na(std_dev)) browser()
if (eps(std_dev, 0, eps = 1e-23)) {
return(0)
} else {
# return(exp(log(std_dev) - log(X_bar)))
return(std_dev / X_bar)
}
}
#' Create a correlation plot
#'
#'
create_corplot <- function(cormat, base_size = 6, print_coefs = F) {
cormat <- reorder_cormat(cormat)
upper_tri <- get_upper_tri(cormat)
# Melt the correlation matrix
melted_cormat <- melt(upper_tri, na.rm = TRUE)
# Create a ggheatmap
ggheatmap <- ggplot(melted_cormat, aes(Var2, Var1, fill = value)) +
geom_tile(color = 'white') +
scale_fill_gradient2(low = 'blue', high = 'red', mid = 'white',
midpoint = 0, limit = c(-1,1), space = 'Lab',
name='Spearman\nCorrelation') +
theme_ms(base_size = base_size) +
theme(axis.text.x = element_text(angle = 45, vjust = 1, hjust = 1)) +
coord_fixed() +
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.ticks = element_blank(),
legend.justification = c(0, 1),
legend.position = c(0.1, 0.9),
legend.direction = 'horizontal')+
guides(fill = guide_colorbar(#barwidth = 7, barheight = 1,
title.position = 'top', title.hjust = 0.5))
if (print_coefs) {
ggheatmap <- ggheatmap +
geom_text(aes(Var2, Var1, label = round(value, 3)), color = 'black',
size = .8 * sqrt(base_size))
}
return(ggheatmap)
}
|
68a46775ea4e39a625e957e08526449148ce15b8 | 0f0b5f45facc7619d3a6f6b15418a244cbc9711c | /REJECTION.R | ae4e7e369c0828cc37476010f79b184d5cd40a48 | [
"MIT"
] | permissive | aurielfournier/aurielfournier.github.io | fab8b6161b42688d9bec4e1753082bc3a83307ca | 15131ccdd7502bb02e08185a3584503ccc9f53f8 | refs/heads/master | 2023-08-11T01:32:56.934562 | 2023-08-04T15:30:21 | 2023-08-04T15:30:21 | 31,795,800 | 0 | 5 | null | null | null | null | UTF-8 | R | false | false | 8,130 | r | REJECTION.R | library(googlesheets4)
library(ggplot2)
library(cowplot)
library(RColorBrewer)
library(rgdal)
library(maptools)
library(tidyr)
library(dplyr)
library(grid)
library(auriel)
gs4_deauth()
dat <- read_sheet("https://docs.google.com/spreadsheets/d/1HyhVgsRINRbu6vRYJJzSe7omQOK_41jtqyZmvv_iXjE/edit?usp=sharing") %>%
filter(published_yet!="NA")
m_by_rejects <- ggplot(data=dat,
aes(x=months_between, y=rejects,
group=published_yet))+
geom_point(size=2, aes(color=published_yet, shape=published_yet))+
ylim(0,6)+
xlab("Months from first submission")+
theme(legend.position="non",
legend.background = element_rect(colour = 'black', fill = 'white', linetype='solid'),
axis.title.x=element_text(size=10))+
scale_color_manual(values=c("#1f78b4","#b2df8a"),
name="Paper Status")+
scale_x_continuous(breaks=c(0,12,24,36,48,60))+
theme_fournier()
m_by_desk <- ggplot(data=dat,
aes(x=months_between, y=desk_rejects,
group=published_yet))+
geom_point(size=2, aes(color=published_yet, shape=published_yet))+
ylim(0,6)+
xlab("Months from first submission ")+
ylab("Desk Rejects")+
theme(legend.position=c(0.75,0.8),
legend.background = element_rect(colour = 'black',
fill = 'white', linetype='solid'),
legend.title = element_text(),
axis.title.x=element_text(size=10))+
scale_color_manual(values=c("#1f78b4","#b2df8a"),)+
scale_x_continuous(breaks=c(0,12,24,36,48,60))+
guides(color=guide_legend(ncol=1, title="Paper Status"),
shape=guide_legend(ncol=1, title="Paper Status"))+
theme_fournier()
rejects_hist <- ggplot(data=dat,
aes(x=rejects))+
geom_histogram()+
annotate("text", label=paste0("Last Updated ", Sys.Date()),
x=3, y=12)+
scale_x_continuous(breaks=0:6)+
scale_y_continuous(breaks=seq(0,14, by=2))+
theme(axis.title.x=element_text(size=10))+
theme_fournier()+
ylab("Number of Papers")+
xlab("Rejections")
desk_hist <- ggplot(data=dat,
aes(x=desk_rejects))+
geom_histogram()+
theme(axis.title.x=element_text(size=10))+
scale_y_continuous(breaks=seq(0,21, by=3))+
theme_fournier()+
ylab("Number of Papers")+
xlab("Desk rejections \n
(rejection without review by editor)")
a <- plot_grid(m_by_rejects, m_by_desk,
rejects_hist, desk_hist, nrow=2, align="hv")
ggsave(a, file="./images/papers.jpeg", width=20, height=15, units="cm", dpi=300)
desk<-ggplot(data=dat,
aes(x=year_submitted_first_time, y=months_between, color=published_yet))+
geom_jitter(size=3)+
ylab("Months from first sub to publication")+
theme_fournier()+
scale_color_manual(values=c("#1f78b4","#b2df8a"),)+
xlab("Year first submitted")+
theme(legend.position = c(0.2,0.8))+
guides(color=guide_legend(ncol=1, title="Paper Status"),
shape=guide_legend(ncol=1, title="Paper Status"))+
scale_x_continuous(breaks=c(2012,2014,2016,2018,2020,2022))
ggsave(desk, file="./images/pubtime_over_time.jpeg", width=15, height=15, units="cm", dpi=300)
#
#
# sumdat <- dat %>%
# group_by(year_submitted_first_time, published_yet) %>%
# summarize(total = sum(desk_rejects,na.rm=TRUE))
#
# desk<-ggplot(data=sumdat,
# aes(x=year_submitted_first_time, y=total, color=published_yet))+
# geom_jitter(size=3)+
# ylab("desk rejects")+
# theme_fournier()+
# scale_color_manual(values=c("#1f78b4","#b2df8a"),)
#
# sumdat <- dat %>%
# group_by(year_submitted_first_time, published_yet) %>%
# summarize(total = sum(review_rejects,na.rm=TRUE))
#
# review<-ggplot(data=sumdat,
# aes(x=year_submitted_first_time, y=total, color=published_yet))+
# geom_jitter(size=3)+
# ylab("review rejects")+
# theme_fournier()+
# scale_color_manual(values=c("#1f78b4","#b2df8a"),)
#
# sumdat <- dat %>%
# group_by(year_submitted_first_time, published_yet) %>%
# summarize(total = sum(rejects,na.rm=TRUE))
#
# all<- ggplot(data=sumdat,
# aes(x=year_submitted_first_time, y=total, color=published_yet))+
# geom_jitter(size=3)+
# ylab("all rejects")+
# theme_fournier()+
# scale_color_manual(values=c("#1f78b4","#b2df8a"),)
#
# a <- plot_grid(desk, review, all, nrow=3, align="hv")
#
#
# ggsave(a, file="./images/papers_over_time.jpeg", width=15, height=30, units="cm", dpi=300)
#
## Grants
gs4_deauth()
dat <- read_sheet("https://docs.google.com/spreadsheets/d/1MnEXtnXcgntgvLBmL_VNV1oRK0LTjvG0hmZzdUBu_vs/edit?usp=sharing")
datdat <- dat %>%
mutate(year=factor(year)) %>%
group_by(year, rejected_y_n) %>%
summarize(count=n())
a <- ggplot(data=datdat,
aes(x=year, y=count, fill=rejected_y_n))+
geom_bar( stat="identity", color="black",
position = position_dodge2(width = 0.9, preserve = "single"))+
scale_fill_manual(values=c("#1f78b4","#1b9e77","#b2df8a"),
name="")+
theme_fournier()
ggsave(a, file="./images/grants.jpeg", width=20, height=15, units="cm", dpi=300)
#
#
# ## JOBS
#
#
# dat <- gs_title("data_on_job_MSU_result")
#
# datdat <- gs_read(dat) %>%
# group_by(offer, interview) %>%
# summarize(count =n())
#
# a <- ggplot(data=datdat,
# aes(x=interview, y=count, fill=offer, group=offer))+
# geom_bar(position="dodge", stat="identity")+
# scale_fill_manual(values=c("#7570b3","#d95f02"),
# name="Received offer?")+
# xlab("Was interviewed?")
#
#
# ggsave(a, file="./images/postdoc_jobs.jpeg", width=20, height=15, units="cm", dpi=300)
#
# dat <- gs_title("job_search_INHS_director")
#
# datdat <- gs_read(dat) %>%
# mutate(interview = ifelse(is.na(date_interview),"no response",
# ifelse(date_interview=="N","no","yes")),
# offer = ifelse(is.na(offer),"accepted before decision", offer)) %>%
# group_by(interview, offer) %>%
# summarize(count =n())
#
# a <- ggplot(data=datdat,
# aes(x=interview, y=count, fill=offer, group=offer))+
# geom_bar(position="dodge", stat="identity")+
# scale_fill_manual(values=c("#7570b3","#d95f02","#1b9e77"),
# name="Received offer?")+
# xlab("Was interviewed?")
#
# ggsave(a, file="./images/INHS_jobs.jpeg", width=20, height=15, units="cm", dpi=300)
#
# dat <- gs_title("job_search_INHS_director")
#
# datdat <- gs_read(dat) %>%
# group_by(type) %>%
# summarize(count = n())
#
# a <- ggplot(data=datdat,
# aes(x=type, y=count, fill=type))+
# geom_bar(position="dodge", stat="identity")+
# scale_fill_manual(values=c("#7570b3","#d95f02","#1b9e77","#e7298a"))+
# xlab("Position Type")+
# theme(legend.position = "none")
#
#
# ggsave(a, file="./images/INHS_jobs_types.jpeg", width=20, height=15, units="cm", dpi=300)
#
#
# dat <- gs_title("job_search_INHS_director")
#
#
# datdat <- gs_read(dat) %>%
# group_by(state) %>%
# summarize(countn = n())
#
# ms <- usa[usa$NAME_1 %in% datdat$state,]
#
# usa <- readRDS("~/GBNERR_wintermarshbirds/gis_data/USA_adm1.rds")
# can <- readRDS("~/GBNERR_wintermarshbirds/gis_data/CAN_adm1.rds")
# mex <- readRDS("~/GBNERR_wintermarshbirds/gis_data/MEX_adm1.rds")
#
# us <- map_data("state")
# us <- dplyr::filter(us, region=="michigan"|region=="wisconsin")
#
#
# a <- ggplot()+
# geom_polygon(data=can,aes(x=long,y=lat,group=group), col="black", fill="white")+
# geom_polygon(data=mex,aes(x=long,y=lat,group=group), col="black", fill="white")+
# geom_polygon(data=usa,aes(x=long,y=lat,group=group), col="black", fill="white")+
# coord_map("albers",lat0=25, lat1=60,xlim=c(-125,-70),ylim=c(25,57))+
# geom_polygon(data=ms, aes(x=long,y=lat,
# group=group),
# color="black",fill="#7570b3")+
# geom_polygon(data=us,aes(x=long,y=lat,group=group), col="black", fill=NA)
#
# ggsave(a, file="aurielfournier.github.io/images/INHS_jobs_geography.jpeg", width=20, height=15, units="cm", dpi=300)
#
|
e95ca3b22ecf8807e1126aeb34d2747e0fec5f08 | 412d7ac8d78ee6eac43787b8a8f1883ebcffd6da | /R/4a2_hpin_nav.R | 526d30bd9fc63ea52d171580bdefb21a58aefc96 | [] | no_license | fhaertner/GeometricAnalysisDMs | 76669248c750ce7a919f545d046854c75d559140 | d0c23d650a3bfa7f695df94e04689c07b59e8a27 | refs/heads/master | 2020-03-10T02:02:20.435952 | 2019-03-12T21:05:32 | 2019-03-12T21:05:32 | 129,126,049 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,281 | r | 4a2_hpin_nav.R |
library(NetHypGeom)
library(dplyr)
load("../data/hPIN_150k.RData")
load("../data/coords_hPIN_150k.RData")
load("../data/protein_classes.RData")
epochs <- 100 #Number of experiments to run
st <- 500 #Number of source-target pairs
N <- vcount(hPIN) #Number of proteins in the network
#All TFs and Receptors
all.tf <- sort(which(V(hPIN)$symbol %in% protein.classes$symbol[protein.classes$tf]))
all.rec <- sort(which(V(hPIN)$symbol %in% protein.classes$symbol[protein.classes$rec]))
overlap <- intersect(all.rec, all.tf)
all.rec <- c(all.rec[!(all.rec %in% overlap)], overlap)
all.tf <- c(overlap, all.tf[!(all.tf %in% overlap)])
#To form a non-redundant pool of TF-Rec pairs, we use zero-based row-order indexing of a rectangular matrix
n <- length(all.rec)
m <- length(all.tf)
pool.real <- 0:(n*m - 1)
rec.matrix <- matrix(pool.real, byrow = T, nrow = n, ncol = m, dimnames = list(0:(n-1), 0:(m-1)))
overlap.size <- length(overlap)
unwanted <- rec.matrix[as.character((n-overlap.size):(n-1)), as.character(0:(overlap.size-1))]
unwanted <- unwanted[lower.tri(unwanted, diag = T)]
pool.real <- pool.real[-(unwanted+1)]
#Proteins that are neither TFs nor Receptors but with degree similar to them
load("../results/nonTF_nonRec_pool.RData")
not.rec <- sort(not.rec)
not.tf <- sort(not.tf)
overlap <- intersect(not.rec, not.tf)
not.rec <- c(not.rec[!(not.rec %in% overlap)], overlap)
not.tf <- c(overlap, not.tf[!(not.tf %in% overlap)])
nn <- length(not.rec)
mm <- length(not.tf)
pool.ctl <- 0:(nn*mm - 1)
rec.matrix <- matrix(pool.ctl, byrow = T, nrow = nn, ncol = mm, dimnames = list(0:(nn-1), 0:(mm-1)))
overlap.size <- length(overlap)
unwanted <- rec.matrix[as.character((nn-overlap.size):(nn-1)), as.character(0:(overlap.size-1))]
unwanted <- unwanted[lower.tri(unwanted, diag = T)]
pool.ctl <- pool.ctl[-(unwanted+1)]
rm(rec.matrix)
hops.h2 <- vector("list", length = epochs) #Packet delivery using hyperbolic distances
hops.rt <- vector("list", length = epochs) #Packet delivery between Receptors and TF
hops.nrt <- vector("list", length = epochs) #Packet delivery between proteins with degrees similar to Rec and TF
hops.h2 <- vector("list", length = epochs) #Packet delivery using hyperbolic distances
hops.rt <- vector("list", length = epochs) #Packet delivery between Receptors and TF
hops.nrt <- vector("list", length = epochs) #Packet delivery between proteins with degrees similar to Rec and TF
for(ep in 1:epochs){
#Sampling of non-redundant src-trg pairs
k <- sample(N*(N-1)/2, st) - 1 #We subtract 1, because the formulae to go from linear upper diagonal indexing to (i,j) are zero-based
sources <- N - 2 - floor(sqrt(-8*k + 4*N*(N-1)-7)/2.0 - 0.5)
targets <- k + sources + 1 - N*(N-1)/2 + (N-sources)*((N-sources)-1)/2
#We sum 1 to go back to 1-based indexing
sources <- sources + 1
targets <- targets + 1
#Sampling of non.redundant Rec-TF pairs
k <- sample(pool.real, st)
i <- floor(k/m)
j <- k - (i*m)
rec <- all.rec[i + 1]
tf <- all.tf[j + 1]
#Sampling of non.redundant nonRec-nonTF pairs
k <- sample(pool.ctl, st)
i <- floor(k/mm)
j <- k - (i*mm)
nrec <- not.rec[i + 1]
ntf <- not.tf[j + 1]
hops.h2[[ep]] <- greedy_route_packets(hPIN, coords, sources, targets)
hops.rt[[ep]] <- greedy_route_packets(hPIN, coords, rec, tf)
hops.nrt[[ep]] <- greedy_route_packets(hPIN, coords, nrec, ntf)
}
res <- tibble(case = factor(rep(c("Rnd. src-trg pairs", "Rec-TF", "Control"),
each = epochs*2),
levels = c("Rnd. src-trg pairs", "Rec-TF", "Control"),
ordered = TRUE),
exp = rep(rep(c("GR efficiency", "Hop stretch"), each = epochs), 3),
value = c(sapply(hops.h2, function(x) sum(x > 0)/st),
sapply(hops.h2, function(x) mean(x[x > 0])),
sapply(hops.rt, function(x) sum(x > 0)/st),
sapply(hops.rt, function(x) mean(x[x > 0])),
sapply(hops.nrt, function(x) sum(x > 0)/st),
sapply(hops.nrt, function(x) mean(x[x > 0]))))
rt <- filter(res, case == "Rec-TF" & exp == "GR efficiency")$value
rnd <- filter(res, case == "Rnd. src-trg pairs" & exp == "GR efficiency")$value
ctrl <- filter(res, case == "Control" & exp == "GR efficiency")$value
pval_RTvsRnd_eff <- wilcox.test(rt, rnd, alternative = "greater")$p.value
pval_RTvsCtl_eff <- wilcox.test(rt, ctrl, alternative = "greater")$p.value
rt <- filter(res, case == "Rec-TF" & exp == "Hop stretch")$value
rnd <- filter(res, case == "Rnd. src-trg pairs" & exp == "Hop stretch")$value
ctrl <- filter(res, case == "Control" & exp == "Hop stretch")$value
pval_RTvsRnd_hs <- wilcox.test(rt, rnd, alternative = "less")$p.value
pval_RTvsCtl_hs <- wilcox.test(rt, ctrl, alternative = "less")$p.value
p.nav <- ggplot(res, aes(case, value, fill = exp)) + geom_boxplot(width = 0.7) +
labs(x = "", y = "") + theme_bw() +
theme(legend.title = element_blank(), legend.background = element_blank(),
legend.justification = c(1,0.5), legend.position = c(1,0.5))
save(hops.h2, hops.rt, hops.nrt, res, p.nav, pval_RTvsCtl_eff, pval_RTvsCtl_hs,
pval_RTvsRnd_eff, pval_RTvsRnd_hs, file = "../results/nav_hPIN.RData")
|
11449308675498282423bf5e892ca73649054667 | 753e449d707e106e6b345c222b7277ac8fc39858 | /Mapping spt.R | fa27028f72ddef04955aa26acd7364cd8b5d290d | [] | no_license | vamsikrishna97/IBMproject | 2fe4160d1baf234e03499efa36f6383c73f79be7 | 85e8ff1cd9ea01bb5af1868abde22047c55bcf1b | refs/heads/master | 2020-08-01T12:23:58.201243 | 2017-02-17T20:12:09 | 2017-02-17T20:12:09 | 73,575,782 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 785 | r | Mapping spt.R | q<-read.csv("StudentperT.csv")
q<-q[q$India..State..UTs!="India",]
for(i in 1:length(unique(q$India..State..UTs))){
q[q$India..State..UTs==unique(q$India..State..UTs)[i],"id"]<-i+1286
}
q.clean<-gather(q,Year,Number,3:12)
Type<-"Primary School"
ind.df<-join(indup.df,q.clean,by="id")
Mapper<- function(Type){
ind.df<-ind.df[ind.df$School.Type==Type,]
p<-ggplot(ind.df) +
aes(long,lat,group=group,fill=Number,frame=Year)+
ggtitle(paste0( "Student Per Teacher in the year:"),subtitle = paste0( "Type=",Type))+
geom_polygon()+
scale_fill_gradient(low = "green",
high = "red1" ,guide_legend("No of students per Teacher"))+
geom_path(color="grey") +
coord_equal()
print("Making GIF")
gg_animate(p,interval=1.3)
}
|
81f5468f0f7a7f2c714e5a3f0fb00d5b277cb52e | 3f680c621d68cd817097e1a83915ceaead162e12 | /tests/testthat/test-approximatelyUninformative8.R | a2621ed47e35a6f51bc94e6ddc12ab416177564a | [] | no_license | rohan-shah/mpMap2 | 46273875750e7a564a17156f34439a4d93260d6c | c43bb51b348bdf6937e1b11298b9cdfe7a85e001 | refs/heads/master | 2021-05-23T20:34:59.327670 | 2020-07-19T10:24:09 | 2020-07-19T10:24:09 | 32,772,885 | 10 | 6 | null | null | null | null | UTF-8 | R | false | false | 2,292 | r | test-approximatelyUninformative8.R | context("uninformative 8-parent markers")
test_that("Check that the eight-parent uninformative marker combination gives an RF estimate of NA in the right cases",
{
testInfiniteSelfing <- function(pedigree)
{
pedigree@selfing <- "infinite"
map <- qtl::sim.map(len = 10, n.mar = 2, anchor.tel=TRUE, include.x=FALSE, sex.sp=FALSE, eq.spacing=TRUE)
cross <- simulateMPCross(pedigree = pedigree, map = map, mapFunction = haldane)
firstColumnFunction <- function(x)
{
if(x %in% c(1, 4, 5, 7, 8)) return(1)
if(x %in% c(2, 3, 6)) return(0)
return(NA)
}
secondColumnFunction <- function(x)
{
if(x %in% c(3, 7, 8)) return(1)
if(x %in% c(1, 2, 4, 5, 6)) return(0)
return(NA)
}
cross@geneticData[[1]]@founders[,1] <- sapply(cross@geneticData[[1]]@founders[,1], firstColumnFunction)
cross@geneticData[[1]]@finals[,1] <- sapply(cross@geneticData[[1]]@finals[,1], firstColumnFunction)
cross@geneticData[[1]]@founders[,2] <- sapply(cross@geneticData[[1]]@founders[,2], secondColumnFunction)
cross@geneticData[[1]]@finals[,2] <- sapply(cross@geneticData[[1]]@finals[,2], secondColumnFunction)
cross@geneticData[[1]]@hetData[[1]] <- cross@geneticData[[1]]@hetData[[2]] <- rbind(c(0,0,0), c(1,1,1))
validObject(cross)
return(estimateRF(cross))
}
#Infinite selfing, with or without intercrossing, is more uninformative for the single funnel design than for the random funnel design
for(intercrossingGenerations in 0:1)
{
pedigree <- eightParentPedigreeSingleFunnel(initialPopulationSize = 100, selfingGenerations = 5, intercrossingGenerations = intercrossingGenerations, nSeeds = 1)
rf <- testInfiniteSelfing(pedigree)
expect_true(is.na(rf@rf@theta[1,2]))
}
intercrossingGenerations <- 0
pedigree <- eightParentPedigreeRandomFunnels(initialPopulationSize = 100, selfingGenerations = 5, intercrossingGenerations = intercrossingGenerations, nSeeds = 1)
rf <- testInfiniteSelfing(pedigree)
expect_true(!is.na(rf@rf@theta[1,2]))
intercrossingGenerations <- 1
pedigree <- eightParentPedigreeRandomFunnels(initialPopulationSize = 100, selfingGenerations = 5, intercrossingGenerations = intercrossingGenerations, nSeeds = 1)
rf <- testInfiniteSelfing(pedigree)
expect_true(is.na(rf@rf@theta[1,2]))
})
|
a90018379446463e92943a4af1fa949ed98b8743 | 72d8202bea19a62e973ab035d1644c5737b93abf | /00-apply missing data and ordination.R | c62f2046de30ef2334d1a87b143ff99bfc365a49 | [] | no_license | Astahlke/MissingData_GEA | d35259933212cef6d49b57a5df5a3e2540fb83a4 | f30e3150783ccb3e31232e87350839669f8c0a95 | refs/heads/master | 2021-01-18T18:07:28.926059 | 2017-03-10T03:27:56 | 2017-03-10T03:27:56 | 84,361,022 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,162 | r | 00-apply missing data and ordination.R | ## This code will loop through all replicates for a set of 24 simulations for the discrete landscapes(H1sims, H5sims, H9sims) with 1 missing data input file. Keep the same file structure as when you download the files from Dryad (i.e. one folder per sim, and 10 replicate files in each folder named "R0.csv" to "R9.csv").
## Need to change input file after each run.
rm(list=ls())
library(raster)
library(vegan)
###set working directory to the folder that stores the 240 sim folders that you are running
setwd("/path/to/H9sims") # change for each H sim
sim <- dir() #[-c(1,15,18,21)]#for H5 sims
repl <- c(0:9)
##########################
### Loop starts here
##########################
for (s in 1:length(sim)) {
for (r in 1:length(repl)) {
###read in the habitat files
###R1 goes with sim rep0
###Change H9, H5, H1
fname <- paste("/path/to/landscape surfaces/L10H9R",r,"_aa.asc", sep="")
qrule <- raster(fname)
###read in the snp dataset
###Change directory for H9, H5, H1
fname1 <- paste("path/to/H9sims/",sim[s],"/R",repl[r],".csv", sep="")
data <- read.csv(fname1)
###extract coordinates and the habitat value at each coordinate
coord <- data[,2:3]
habitat <- extract(qrule,coord)
###convert UTM - subtract corner coordinate and /1000 to convert from m to km
xvar <- (data[,2] - 477895.2)/1000
yvar <- (data[,3] - 4905702)/1000
###create the full data set
fulldata <- cbind(xvar, yvar, habitat, data[,8:207])
###subset individuals from the subsample30, subsample100, or sample500.csv files
samp <- read.csv("/path/to/subsample30.csv", header=F)
samp <- samp[,1]
sampdata <- fulldata[samp,]
###extract the environmental variables alone
env <- sampdata[,1:3]
env <- scale(env, center=T, scale=T)
env <- data.matrix(env)
colnames(env)<-c("xvar","yvar","habitat")
###extract the snps alone
snps <- sampdata[,4:203]
snps <- snps[,seq(1, ncol(snps), 2)]
colnames(snps) <- seq(0, 99, by=1)
snps <- data.matrix(snps)
#enter NAs
##in glob2rx(), "_30", "_100" or "_500" for sample sizes
missing_files <- list.files("/path/to/Input Files/", pattern = glob2rx("random_*_30.csv"), full.names = F, recursive = F)
###change missing_files[x] to [1] to [6] for each percentage of missing data
missing <- read.csv(paste0("/path/to/Input Files/", missing_files[1]),header=FALSE)
missing <- as.numeric(missing[,1])
snps[missing] <- NA
###remove NA rows (individuals) from both snp and env datasets; there are a few replicates with NA individuals so there will be 499 instead of 500 individuals
subsetNA <- apply(snps,1,function(x) length(unique(x))>1)
snps <- snps[subsetNA,]
env <- env[subsetNA,]
###remove monomorphic snps
subsetMM <- apply(snps,2,function(x) length(unique(x))>1)
snps <- snps[,subsetMM]
###MAF filtering
geno <- as.matrix(snps)
## calc_n
n0 <- apply(geno==0,2,sum,na.rm=T)
n1 <- apply(geno==1,2,sum,na.rm=T)
n2 <- apply(geno==2,2,sum,na.rm=T)
n <- n0 + n1 + n2
## calculate allele frequencies
p <- ((2*n0)+n1)/(2*n)
q <- 1 - p
maf <- pmin(p, q)
## KEEP LOCI WITH MAF > 3%
subsetMAF <- maf[maf>0.03]
tempname <- maf[maf<0.03]
nameMAF <- match(names(subsetMAF), colnames(snps))
newsnps <- snps
snps <- snps[, c(nameMAF)]
###To check which loci have been removed & final # individuals, this will create output files further down so we can just see how many individuals/snps are remaining after removing NAinds, maf < 3%, and to double check the number of NA snps
dims <- dim(snps)
ind_missing <- cbind(sim[s], repl[r], dims[1], dims[2], length(tempname))
fname <- paste("NAinds_", sim[s], repl[r], sep="")
assign(fname, ind_missing)
snps_missing <- cbind(sim[s],repl[r],length(which(is.na(snps)==TRUE)))
fname <- paste("NAsnps_", sim[s], repl[r], sep="")
assign(fname, snps_missing)
## You shouldn't have to change anything inside the loop from here; but you will have to change the output file names outside of the loop
cM <- colMeans(snps, na.rm =TRUE)
indx <- which(is.na(snps), arr.ind=TRUE)
snps[indx] <- cM[indx[,2]]
cM2 <- colMeans(newsnps, na.rm=TRUE)
indx2 <- which(is.na(newsnps), arr.ind=TRUE)
newsnps[indx2] <- cM2[indx2[,2]]
###format the snp data
snps.scale <- scale(snps, center=T, scale=T) ## scale and center for PCA/RDA
snps.bray <- vegdist(snps, method="bray") ## bray-curtis distance for PCoA and dbRDA
############
## RDA ##
#############
snp.rda <- rda(snps.scale, env, scale=F)
snp.rda.sum <- summary(snp.rda)
snpload.rda <- snp.rda.sum$species[,1:3]
##############
## dbRDA ##
##############
snp.dbrda <- capscale(snps.bray ~ env, comm=snps) # RDA on a PCoA
snp.dbrda.sum <- summary(snp.dbrda)
snpload.dbrda <- snp.dbrda.sum$species[,1:3]
rownames(snpload.dbrda) <- colnames(snps)
#####################
## detect outliers ##
#####################
tests <- list(snpload.rda, snpload.dbrda)
test.names <- c("rda","dbrda")
for (i in 1:length(tests)) {
for (j in 1:3) {
x <- tests[i] #change back to i
x <- matrix(unlist(x), ncol = 3)
rownames(x) <- colnames(snps)
x <- x[,j] # change back to j
lims <- mean(x) + c(-1, 1) * 3 * sd(x)
out <- x[x < lims[1] | x > lims[2]]
if (length(out) > 0) {
names <- as.numeric(names(out))
axis <- rep(j, length(out))
outdata <- t(rbind(axis, names, as.numeric(out)))
snpcors <- matrix(NA, nrow=length(names), ncol=6)
for (k in 1:length(names)) {
outlocus <- names[k] + 1 # to access correct column in snp dataframe
if (ncol(snps) == 100) {
outsnp <- snps[,outlocus]
} else if (ncol(snps) < 100) {
outsnp <- newsnps[,outlocus] # use the full snp matrix including the MAF < 3% locus
outsnp <- outsnp[!is.na(outsnp)] # remove NA individuals, if any
}
corr.x <- cor.test(outsnp, env[,1])
snpcors[k,1] <- corr.x$estimate
snpcors[k,2] <- corr.x$p.value
corr.y <- cor.test(outsnp, env[,2])
snpcors[k,3] <- corr.y$estimate
snpcors[k,4] <- corr.y$p.value
corr.h <- cor.test(outsnp, env[,3])
snpcors[k,5] <- corr.h$estimate
snpcors[k,6] <- corr.h$p.value
}
outdata <- cbind(outdata, snpcors)
fname <- paste(test.names[i],j, sep="")
assign(fname, outdata)
}
else if (length(out) == 0) {
fname <- paste(test.names[i],j, sep="")
assign(fname, NA)
}
}
}
out.rda <- rbind(rda1, rda2, rda3)
out.rda <- as.data.frame(out.rda)
label0 <- rep("RDA", nrow(out.rda))
label1 <- rep(sim[s], nrow(out.rda))
label2 <- rep(repl[r], nrow(out.rda))
out.rda <- cbind(label0,label1,label2,out.rda)
out.rda <- out.rda[complete.cases(out.rda),]
out.dbrda <- rbind(dbrda1, dbrda2, dbrda3)
out.dbrda <- as.data.frame(out.dbrda)
label0 <- rep("dbRDA", nrow(out.dbrda))
label1 <- rep(sim[s], nrow(out.dbrda))
label2 <- rep(repl[r], nrow(out.dbrda))
out.dbrda <- cbind(label0,label1,label2,out.dbrda)
out.dbrda <- out.dbrda[complete.cases(out.dbrda),]
outs <- rbind(out.rda, out.dbrda)
if (nrow(outs) > 0) {
colnames(outs) <- c("ord","sim","rep", "axis","locus","loading", "x-corr", "x-pval", "y-corr", "y-pval", "h-corr", "h-pval")
fname <- paste("out_", sim[s],"_R",repl[r], sep="")
assign(fname,outs)
}
}
}
########################
##### Loop ends here
##########################
## save output; there will be 5 output files in total
# 1. Raw Data
# 2. True Positives
# 3. Summary file
# 4. Number of individuals after removing NA individuals
# 5. Number of snps converted to NAs
## Follow this output file naming : "_random_5percent_30ind_RawData.csv" with the conditions that were run
save <- ls()[grep("out_", ls())]
bar = NA
for (l in 1:length(save)) {
foo <- get(save[l])
bar <- rbind(foo, bar)
}
bar <- bar[-nrow(bar),]
### CHANGE OUTPUT FILE NAME:
fname <- paste("/path/to/output folder/", sim[s], "_random_5percent_30ind_RawData.csv", sep="")
write.csv(bar, file=fname)
## true positives:
tp <- bar[bar$locus == 0,]
### CHANGE OUTPUT FILE NAME:
fname <- paste("/path/to/output folder/", sim[s], "_random_5percent_30ind_TruePos.csv", sep="")
write.csv(tp, file=fname)
# summary of true and false positives
fp <- bar[bar$locus != 0,]
ord <- c("RDA","dbRDA")
summary <- matrix(NA, nrow=length(sim)*2, ncol=5)
colnames(summary) <- c("sim", "ord", "tp", "fp", "fp.sd")
summary[,1] <- as.vector(sapply(sim, function(x) rep(x,2)))
summary[,2] <- as.vector(rep(ord,length(sim)))
for (i in 1:length(sim)) {
foo <- tp[tp$sim == sim[i],]
baz <- fp[fp$sim == sim[i],]
for (j in 1:length(ord)) {
# true positives
bar <- foo[foo$ord == ord[j],]
bar1 <- bar[!duplicated(bar$rep),]
rowindex <- (i-1)*2
summary[j+rowindex,3] <- nrow(bar1)
#false positives
qux <- baz[baz$ord == ord[j],]
temp <- vector(mode="integer", length=10)
for (k in 1:length(repl)) {
rux <- qux[qux$rep == repl[k],]
rux1 <- rux[!duplicated(rux$locus),] # removes any loci detected on >1 axis in same replicate
temp[k] <- nrow(rux1)
}
summary[j+rowindex,4] <- sum(temp)
summary[j+rowindex,5] <- sd(temp)
}
}
### CHANGE OUTPUT FILE NAME:
fname <- paste("/path/to/output folder/", sim[s], "_random_5percent_30ind_Summary.csv", sep="")
write.csv(summary, file=fname)
## save file telling how many NA individuals removed
library(gtools)
save <- ls()[grep("NAinds_", ls())]
bar = data.frame()
for (l in 1:length(save)) {
foo <- get(save[l])
foo <- as.data.frame(foo)
bar <- smartbind(foo, bar)
}
bar <- bar[-nrow(bar),]
colnames(bar) <- c("sim","repl","num inds", "loci","num loci MAF < 3%")
rownames(bar) <- seq(1,nrow(bar),1)
### CHANGE OUTPUT FILE NAME:
fname <- paste("/path/to/output folder/", sim[s], "_random_5percent_30ind_NAinds.csv", sep="")
write.csv(bar, file=fname)
## save file telling how many snps are NA
save <- ls()[grep("NAsnps_", ls())]
bar = data.frame()
for (l in 1:length(save)) {
foo <- get(save[l])
foo <- as.data.frame(foo)
bar <- smartbind(foo, bar)
}
bar <- bar[-nrow(bar),]
colnames(bar) <- c("sim","repl","num NA snps")
rownames(bar) <- seq(1, nrow(bar), 1)
### CHANGE OUTPUT FILE NAME:
fname <- paste("/path/to/output folder/", sim[s], "_random_5percent_30ind_NAsnps.csv", sep="")
write.csv(bar, file=fname)
|
aca8657419566b539f9731f417f6437ab1b281fa | 1cd3929bdf604552846fbf6aa487800ce6115424 | /man/ker.eq.Rd | 921b8224b4c827087985651ed81807e1a810e930 | [] | no_license | cran/SNSequate | ef8b2ade5cfc458e02838a5b1e091cbb795ea506 | 16e0b6aee24a6737c9726ba0bfb054ad2a64e625 | refs/heads/master | 2023-01-12T04:36:19.272414 | 2022-12-20T16:30:09 | 2022-12-20T16:30:09 | 17,693,580 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,354 | rd | ker.eq.Rd | \name{ker.eq}
\alias{ker.eq}
\alias{ker.eq.default}
\title{The Kernel method of test equating}
\description{This function implements the kernel method of test equating as described in Holland and Thayer (1989),
and Von Davier et al. (2004). Nonstandard kernels others than the gaussian are available. Associated standard error
of equating are also provided.
}
\usage{
ker.eq(scores, kert, hx = NULL, hy = NULL, degree, design, Kp = 1, scores2,
degreeXA, degreeYA, J, K, L, wx, wy, w, gapsX, gapsY, gapsA, lumpX, lumpY,
lumpA, alpha, h.adap)
}
\arguments{
{ Note that depending on the specified equating design, not all arguments are necessary
as detailed below.}
\item{scores}{ If the "EG" design is specified, a two column matrix containing the raw sample frequencies
coming from the two groups of scores to be equated. It is assumed that the data in the first
and second columns come from tests \eqn{X}{X} and \eqn{Y}{Y}, respectively.
If the "SG" design is specified, a matrix containing the (joint) bivariate sample
frequencies for \eqn{X}{X} (raws) and \eqn{Y}{Y} (columns).
If the "CB" design is specified, a two column matrix containing the observed scores
of the sample taking test \eqn{X}{X} first, followed by test \eqn{Y}{Y}. The \code{scores2}
argument is then used for the scores of the sample taking test Y first followed by test
\eqn{X}{X}.
If either the "NEAT_CB" or "NEAT_PSE" design is selected, a two column matrix containing
the observed scores on test \eqn{X}{X} (first column) and the observed scores on the anchor
test \eqn{A}{A} (second column). The \code{scores2} argument is then used for the observed
scores on test \eqn{Y}{Y}.}
\item{kert}{ A character string giving the type of kernel to be used for continuization.
Current options include "\code{gauss}", "\code{logis}", "\code{uniform}", "\code{epan}" and "\code{adap}"
for the gaussian, logistic, uniform, Epanechnikov and Adaptative kernels, respectively}
\item{hx}{ An integer indicating the value of the bandwidth parameter to be used for kernel continuization
of \eqn{F(x)}{F(x)}. If not provided (Default), this value is automatically calculated (see
details).}
\item{hy}{ An integer indicating the value of the bandwidth parameter to be used for kernel continuization
of \eqn{G(y)}{G(y)}. If not provided (Default), this value is automatically calculated (see
details).}
\item{degree}{ A vector indicating the number of power moments to be fitted to the marginal distributions
("EG" design), and/or the number or cross moments to be fitted to the joint distributions
(see Details).}
\item{design}{ A character string indicating the equating design (one of "EG", "SG", "CB", "NEAT_CE",
"NEAT_PSE")}
\item{Kp}{ A number which acts as a weight for the second term in the combined penalization function used
to obtain \code{h} (see details).}
\item{scores2}{ Only used for the "CB", "NEAT_CE" and "NEAT_PSE" designs. See the description of
\code{scores}.}
\item{degreeXA}{ A vector indicating the number of power moments to be fitted to the marginal distributions
\eqn{X}{X} and \eqn{A}{A}, and the number or cross moments to be fitted to the joint
distribution \eqn{(X,A)}{(X,A)} (see details). Only used for the "NEAT_CE" and "NEAT_PSE" designs.}
\item{degreeYA}{ Only used for the "NEAT_CE" and "NEAT_PSE" designs (see the description for
\code{degreeXA})}
\item{J}{ The number of possible \eqn{X}{X} scores. Only needed for "CB", "NEAT_CB" and "NEAT_PSE" designs}
\item{K}{ The number of possible \eqn{Y}{Y} scores. Only needed for "CB", "NEAT_CB" and "NEAT_PSE" designs}
\item{L}{ The number of possible \eqn{A}{A} scores. Needed for "NEAT_CB" and "NEAT_PSE" designs}
\item{wx}{ A number that satisfies \eqn{0\leq w_X\leq 1}{0<=w_x<=1} indicating the weight put on the data
that is not subject to order effects. Only used for the "CB" design.}
\item{wy}{ A number that satisfies \eqn{0\leq w_Y\leq 1}{0<=w_y<=1} indicating the weight put on the data
that is not subject to order effects. Only used for the "CB" design.}
\item{w}{ A number that satisfies \eqn{0\leq w\leq 1}{0<=w<=1} indicating the weight given to
population \eqn{P}{P}. Only used for the "NEAT" design.}
\item{gapsX}{ A \emph{list} object containing:
\describe{
\item{\code{index}}{A vector of indices between \eqn{0} and \eqn{J} to smooth "gaps", usually ocurring at regular intervals due to scores rounded to integer values and other methodological factors. }
\item{\code{degree}}{An integer indicating the maximum degree of the moments fitted by the log-linear model.}
}
Only used for the "NEAT" design.
}
\item{gapsY}{ A \emph{list} object containing:
\describe{
\item{\code{index}}{A vector of indices between \eqn{0} and \eqn{K}.}
\item{\code{degree}}{An integer indicating the maximum degree of the moments fitted.}
}
Only used for the "NEAT" design.
}
\item{gapsA}{ A \emph{list} object containing:
\describe{
\item{\code{index}}{A vector of indices between \eqn{0} and \eqn{L}. }
\item{\code{degree}}{An integer indicating the maximum degree of the moments fitted.}
}
Only used for the "NEAT" design.
}
\item{lumpX}{An integer to represent the index where an artificial "lump" is created in the marginal distribution of frecuencies for \eqn{X} due to recording of negative rounded formulas or any other methodological artifact.}
\item{lumpY}{An integer to represent the index where an artificial "lump" is created in the marginal distribution of frecuencies for \eqn{Y}.}
\item{lumpA}{An integer to represent the index where an artificial "lump" is created in the marginal distribution of frecuencies for \eqn{A}.}
\item{alpha}{Only for Adaptative Kernel. Sensitivity parameter.}
\item{h.adap}{Only for Adaptative Kernel. A list(hx, hy) containing bandwidths for Adaptative kernel for each Form.}
}
\details{This is a generic function that implements the kernel method of test equating as described in Von Davier et al.
(2004). Given test scores \eqn{X}{X} and \eqn{Y}{Y}, the functions calculates
\deqn{\hat{e}_Y(x)=G_{h_{Y}}^{-1}(F_{h_{X}}(x;\hat{r}),\hat{s})}{\hat{e}_Y(x)=G_{hy}^{-1}(F_{hx}(x;\hat{r}),\hat{s})}
where \eqn{\hat{r}}{\hat{r}} and \eqn{\hat{s}}{\hat{s}} are estimated score probabilities obtained via loglinear
smoothing (see \code{\link{loglin.smooth}}). The value of \eqn{h_X}{h_X} and \eqn{h_Y}{h_Y} can either be specified
by the user or left unspecified (default) in which case they are automatically calculated. For instance, one can
specifies large values of \eqn{h_X}{h_X} and \eqn{h_Y}{h_Y}, so that the \eqn{\hat{e}_Y(x)}{\hat{e}_Y(x)} tends to the
linear equating function (see Theorem 4.5 in Von Davier et al, 2004 for more details).
}
\value{ An object of class \code{ker.eq} representing the kernel equating process. Generic functions such as
\code{print}, and \code{summary} have methods to show the results of the equating. The results include
summary statistics, equated values, standard errors of equating, and others.
The function \code{\link{SEED}} can be used to obtain standard error of equating differences (SEED) of two
objects of class \code{ker.eq}. The function \code{\link{PREp}} can be used on a \code{ker.eq} object to
obtain the percentage relative error measure (see Von Davier et al, 2004).
\item{Scores}{The possible values of \eqn{x_j}{xj} and \eqn{y_k}{yk}}
\item{eqYx }{The equated values of test \eqn{X}{X} in test \eqn{Y}{Y} scale}
\item{eqXy }{The equated values of test \eqn{Y}{Y} in test \eqn{X}{X} scale}
\item{SEEYx}{The standard error of equating for equating \eqn{X}{X} to \eqn{Y}{Y}}
\item{SEEXy}{The standard error of equating for equating \eqn{Y}{Y} to \eqn{X}{X}}
}
\references{
Gonzalez, J. (2014). SNSequate: Standard and Nonstandard Statistical Models and Methods for Test
Equating. \emph{Journal of Statistical Software, 59(7),} 1-30.
Holland, P. and Thayer, D. (1989). The kernel method of equating score distributions.
(Technical Report No 89-84). Princeton, NJ: Educational Testing Service.
Holland, P., King, B. and Thayer, D. (1989). The standard error of equating for the kernel method
of equating score distributions (Tech. Rep. No. 89-83). Princeton, NJ: Educational Testing Service.
Von Davier, A., Holland, P., and Thayer, D. (2004). \emph{The Kernel Method of Test Equating}.
New York, NY: Springer-Verlag.
}
\author{Jorge Gonzalez \email{jorge.gonzalez@mat.uc.cl}}
\seealso{\code{\link{loglin.smooth}}, \code{\link{SEED}}, \code{\link{PREp}}
}
\examples{
#Kernel equating under the "EG" design
data(Math20EG)
mod<-ker.eq(scores=Math20EG,kert="gauss",hx=NULL,hy=NULL,degree=c(2,3),design="EG")
summary(mod)
#Reproducing Table 7.6 in Von Davier et al, (2004)
scores<-0:20
SEEXy<-mod$SEEXy
SEEYx<-mod$SEEYx
Table7.6<-cbind(scores,SEEXy,SEEYx)
Table7.6
#Other nonstandard kernels. Table 10.3 in Von Davier (2011).
mod.logis<-ker.eq(scores=Math20EG,kert="logis",hx=NULL,hy=NULL,degree=c(2,3),design="EG")
mod.unif<-ker.eq(scores=Math20EG,kert="unif",hx=NULL,hy=NULL,degree=c(2,3),design="EG")
mod.gauss<-ker.eq(scores=Math20EG,kert="gauss",hx=NULL,hy=NULL,degree=c(2,3),design="EG")
XtoY<-cbind(mod.logis$eqYx,mod.unif$eqYx,mod.gauss$eqYx)
YtoX<-cbind(mod.logis$eqXy,mod.unif$eqXy,mod.gauss$eqXy)
Table10.3<-cbind(XtoY,YtoX)
Table10.3
## Examples using Adaptive and Epanechnikov kernels
x_sim = c(1,2,3,4,5,6,7,8,9,10,11,10,9,8,7,6,5,4,3,2,1)
prob_sim = x_sim/sum(x_sim)
set.seed(1)
sim = rmultinom(1, p = prob_sim, size = 1000)
x_asimD = c(1,7,13,18,22,24,25,24,20,18,16,15,13,9,5,3,2.5,1.5,1.5,1,1)
probas_asimD = x_asimD/sum(x_asimD)
set.seed(1)
asim = rmultinom(1, p = probas_asimD, size = 1000)
scores = cbind(asim,sim)
mod.adap = ker.eq(scores,degree=c(2,2),design="EG",kert="adap")
mod.epan = ker.eq(scores,degree=c(2,2),design="EG",kert="epan")
}
\concept{kernel equating}
|
f63ac8ce3dc0ffe760e42354682125cc0065e53f | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/webshot/examples/resize.Rd.R | ce21b06631a687eaf9f9d8145b272a956ae7fc71 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 377 | r | resize.Rd.R | library(webshot)
### Name: resize
### Title: Resize an image
### Aliases: resize
### ** Examples
if (interactive()) {
# Can be chained with webshot() or appshot()
webshot("https://www.r-project.org/", "r-small-1.png") %>%
resize("75%")
# Generate image that is 400 pixels wide
webshot("https://www.r-project.org/", "r-small-2.png") %>%
resize("400x")
}
|
18204cd6a21bdc1bd6fe6543b9c4a1d70771dec9 | 0441a11b7d74d68d17c67a023268c97afcbd4627 | /BAMSandAllen/rCode/PartialMantelTestsForNomenclature.r | 7756c16a7661bd3235f765dfd03361cfe1ada18c | [] | no_license | leonfrench/ABAMS | fbe847d8c4e21388ca9150ad55d2b4508883ebcf | a4d65e9c9a8a437db9326828ebe7bdaefd9a81ce | refs/heads/master | 2020-08-05T21:09:21.769589 | 2017-07-04T21:36:16 | 2017-07-04T21:36:16 | 7,511,456 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 1,415 | r | PartialMantelTestsForNomenclature.r | library(vegan)
library(mantel.correlog)
#### last ran from /grp/java/workspace/BAMSandAllen/data/rankedGenes/near final ammon/mantel.correlog
energies <- as.matrix(read.table("ConnectivityAndAllenPartialExpressionMatrixPair.NewEnergies.Correlation.txt"))
connections <- as.matrix(read.table("ConnectivityAndAllenPartialExpressionMatrixPair.Connectivity.Correlation.txt"))
distance <- as.matrix(read.table("ConnectivityAndAllenPartialExpressionMatrixPair.EuclidianDistance.Explain.Adjacency.txt"))
nomen <- as.matrix(read.table("ConnectivityAndAllenNomenclaturePair.Nomenclature.Correlation.txt"))
energiesLogCorrected <- as.matrix(read.table("ConnectivityAndAllenPartialExpressionMatrixPair.NewEnergies.Residuals(LogEuclidianDistance).Correlation.txt"))
connectionsLogCorrected <- as.matrix(read.table("ConnectivityAndAllenPartialExpressionMatrixPair.Connectivity.Residuals(LogEuclidianDistance).Correlation.txt"))
nnames <- row.names(nomen)
dnames <-row.names(distance)
cnames <- row.names(connections)
enames <- row.names(energies)
cor(rank(nnames), rank(dnames))
cor(rank(nnames), rank(cnames))
cor(rank(nnames), rank(enames))
mantel.partial(energies, connections, log(distance))
mantel.partial(energies, nomen, log(distance))
mantel.partial(energies, -1*nomen, log(distance))
mantel.partial(connections, nomen, log(distance))
mantel(energiesLogCorrected, -1*nomen)
mantel(connectionsLogCorrected, nomen) |
b0826637677f5ac8397cc201474b08de3c316919 | 6bc11a98ae9ce811134ea764f20d1a3bd7bdc28e | /assignment 2.R | da62d91b88f8e52156c79b2a26c232e599440e8c | [] | no_license | prinzz1208/acad-DS-LinearRegression | cade5ffae6867dc52e0cbf214d834b15dff37b59 | 0715cebebbfdbe5b6ef6e8c9e4cb935989696975 | refs/heads/master | 2022-06-02T21:44:22.695882 | 2020-04-29T13:45:34 | 2020-04-29T13:45:34 | 255,600,668 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 905 | r | assignment 2.R | mdeaths2 = matrix(mdeaths,nrow = 6,ncol = 12)
colnames(mdeaths2) = month.abb
rownames(mdeaths2) = seq(1974,1979)
# VISUALIZATION OF DATASET
bar.color = c('maroon','red','orange','yellow','magenta','pink')
barplot((mdeaths2),ylim = c(0,4000),xlab = 'Month',ylab = 'Number of Deaths',col = bar.color,beside = TRUE,names.arg = month.abb,cex.names = 0.8)
legend("top",legend = 1974:1979,fill = bar.color,horiz = TRUE)
# CALCULATION OF RATE
sumByRow = unname(rowSums(mdeaths2))
rate = vector();
for ( i in 1:(length(sumByRow)-1)) {
cal = ((sumByRow[i+1] - sumByRow[i])/sumByRow[i]) * 100
rate = append(rate,cal)
}
colName = vector();
for (i in c(1974:1978))
colName = append(colName,paste(toString(i),toString((i+1)),sep = "-"))
names(rate) = colName
# VISUALISATON OF RATE
barplot((rate),xlab = 'Rate of deaths per year',ylab = 'Percentage',col = 'red')
legend("top",legend = 'Rate',fill = 'red')
|
b9d63f1596208830fd55e60f1d48a72abed2a057 | fa3bacaf97c83832b8a2baaf53cce3859c104f2c | /cachematrix.R | 2c5d795e79f0878f25721dcec24b5f93a168f7b6 | [] | no_license | css281/ProgrammingAssignment2 | 0f0df491722c29e62ab4f26a97d87335f01e779d | 6774ed96843d70451f0e1017b2186f83fb35529d | refs/heads/master | 2021-01-18T10:20:45.295934 | 2015-08-23T21:48:29 | 2015-08-23T21:48:29 | 41,223,776 | 0 | 0 | null | 2015-08-22T20:47:25 | 2015-08-22T20:47:24 | null | UTF-8 | R | false | false | 3,148 | r | cachematrix.R | ## Computionally intensive operations in R may potentially benefit from caching
## the result of processing input dataset. Retrieving result from cache avoids
## re-processing when the input is unchanged and the same operation is performed again.
## Performance benefits and resource usage optimization could be very significant.
## As an example, matrix inversion is one such computation and the two functions below
## illustrate this concept where a square invertible matrix is the input and the inverse
## is computed once and stored in cache for subsequent retrievals.
## 1. makeCacheMatrix(): This function creates a special "matrix" object that can cache its inverse.
## 2. cacheSolve(): This function computes the inverse of the special "matrix" returned by
## makeCacheMatrix above. If the inverse has already been calculated (and the matrix has not changed),
## then the cachesolve should retrieve the inverse from the cache.
## Sample test run log with comments have been included in the file TEST_RUNS.md
makeCacheMatrix <- function(x = matrix()) {
## input: square invertible matrix, defaulted to a 1x1 matrix
## return: list of the following functions which is used as an input to cacheSolve()
## 1. set matrix
## 2. get matrix
## 3. set matrix inverse
## 4. get matrix inverse
## Create object to store cache and initialize to NULL
inverseCache <- NULL
## <<- operatorcreates a variable in an environment outside the current env
## Create the matrix
set <- function(y) {
x <<- y ##
inverseCache <<- NULL
}
## Retrieve the matrix
get <- function() x
## Store the computed inverse in cahe
setinv <- function(inverse) inverseCache <<- inverse
## Retrieve inverse form cache
getinv <- function() inverseCache
## return list of set/get functions
list(set = set, get = get, setinv = setinv, getinv = getinv)
}
## cacheSolve(): computes the inverse of the matrix returned by makeCacheMatrix().
## If the inverse has already been calculated and the matrix has not changed,
## it retrieves the inverse from the cache.
cacheSolve <- function(x, ...) {
## input: the list object from makeCacheMatrix()
## return: inverse of the matrix input to makeCacheMatrix()
## Retrieve the inverse from cache
inverseCache <- x$getinv()
## Return inverted matrix if it exists in cache, implying a prior calculated inverse has been cached
if (!is.null(inverseCache)) {
message("Retrieving cached inverse")
return(inverseCache)
}
## Otherwise get the matrix using the get fuction and compute the inverse using solve()
m <- x$get()
inverseCache <- solve(m, ...)
## store the computed inverse in the cache using the setinv function.
x$setinv(inverseCache)
## return the inverse
return(inverseCache)
}
|
3b93c451258c2bcc6b3f5481239757a1fb3c56ba | 17d4404e41c3f730912a446a596de59b0d3c50b1 | /R/diagnostics.R | cefc244ff390f6757f21627cda4d39365a88edbb | [] | no_license | JamesSul/analytics4managers | 453713a6181840d4bbe0bcf461884c4e28298a93 | 1e98a3b490b58492b9863d89410e66c5f53415e6 | refs/heads/master | 2020-04-29T11:48:25.732283 | 2019-03-20T00:42:09 | 2019-03-20T00:42:09 | 176,113,586 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,361 | r | diagnostics.R | # Hello, world!
#
# This is an example function named 'hello'
# which prints 'Hello, world!'.
#
# You can learn more about package authoring with RStudio at:
#
# http://r-pkgs.had.co.nz/
#
# Some useful keyboard shortcuts for package authoring:
#
# Build and Reload Package: 'Cmd + Shift + B'
# Check Package: 'Cmd + Shift + E'
# Test Package: 'Cmd + Shift + T'
resid.vs.fitted <- function(model) {
df <- augment(model)
df %>% ggplot(aes(x = .fitted, y = .resid)) +
geom_point(size = 2) +
geom_smooth(method = "loess", color = "blue", se = FALSE) +
ggtitle("Residuals vs. Fitted") +
xlab("Fitted values") +
ylab("Residuals") +
theme_bw()
}
lin_compare <- function(df, name_x, name_y) {
c1 <- deparse(substitute(name_x))
c2 <- deparse(substitute(name_y))
df %>% ggplot(aes(x = eval(parse(text = c1)), y = eval(parse(text = c2)))) +
geom_point() +
geom_smooth(method = "lm", se = FALSE, color = "red") +
geom_smooth(method = "loess", se = FALSE, color = "blue") +
xlab(c1) + ylab(c2) + ggtitle("Linear/LOESS comparison") +
theme_bw()
}
lin_compare <- function(df, name_x, name_y, name_z = FALSE) {
c1 <- deparse(substitute(name_x))
c2 <- deparse(substitute(name_y))
c3 <- deparse(substitute(name_z))
if(c3 == "FALSE") {
df %>% ggplot(aes(x = eval(parse(text = c1)), y = eval(parse(text = c2)))) +
geom_point(size = 2) +
geom_smooth(method = "lm", se = FALSE, color = "red") +
geom_smooth(method = "loess", se = FALSE, color = "blue") +
xlab(c1) + ylab(c2) + ggtitle("Linear comparison") +
theme_bw()
}
else {
df %>% ggplot(aes(x = eval(parse(text = c1)), y = eval(parse(text = c2)),
color = eval(parse(text = c3)))) +
geom_point(size = 2) +
geom_smooth(method = "lm", se = FALSE, color = "red") +
geom_smooth(method = "loess", se = FALSE, color = "blue") +
xlab(c1) + ylab(c2) + ggtitle("Linear comparison") +
labs(color = c3) +
theme_bw()
}
}
model_compare <- function(...) {
model_list <- list(...)
df <- glance(model_list[[1]])
df$mname <- Reduce(paste, deparse(model_list[[1]]$call[[2]]))
df <- df %>% select(mname, everything())
for(i in 2:length(model_list)) {
new_df <- glance(model_list[[i]])
new_df$mname <- Reduce(paste, deparse(model_list[[i]]$call[[2]]))
new_df <- new_df %>% select(mname, everything())
df <- rbind(df, new_df)
}
df %>% select(mname, adj.r.squared, sigma, statistic, p.value, AIC, BIC)
}
assign1_test <- function(mdl) {
test <- read_csv("http://www.jamessuleiman.com/teaching/datasets/boston_test.csv",
col_types = cols(chas = col_integer(), rad = col_integer()))
test2 <- test %>%
add_predictions(mdl)
mse <- test2 %>%
summarize(MSE = mean((medv - pred)^2))
if(class(mdl)[[1]] == "train")
mdl = mdl$finalModel
if(length(mdl$coefficients) > 6)
stop('You must choose five or fewer predictors as stated in the assignment.')
df <- glance(mdl)
df <- df %>% select(adj.r.squared, BIC)
df <- cbind(df, mse)
title <- paste("mse: ", round(df$MSE, digits = 4), ", adjusted.r.squared",
round(df$adj.r.squared, digits = 4), ", BIC: ",
round(df$BIC, digits = 2))
lin_compare(test2, pred, medv) +
ggtitle(paste0(title))
}
|
7c98eb6003a1f2cb758db7aaa5a5730399680298 | 3a5fa834091a8fd9d9749fcd6cb2a0bfea46ac62 | /nearly_there.R | ce822e39984dbc302b0dc013ece038c823615f78 | [] | no_license | foundinblank/study1adults | 47100eb05ef9237ef452bb8e6e5497a6a17241e2 | 16239dbdb1a2b88678a072eb398a6d1a025c7d9c | refs/heads/master | 2022-08-03T17:41:14.491830 | 2020-05-23T12:11:23 | 2020-05-23T12:11:23 | 102,015,531 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,437 | r | nearly_there.R | # building a crqa loop
# Do averaged of fw bears
radius_fixed <- 34
embed_fixed <- 14
delay_fixed <- 117
target <- data_lists %>%
filter(name == "Adam" & story == "bears")
eye_y_target <- data_lists %>%
pull(eye_y) %>%
pluck(1)
rhand_y_target <- data_lists %>%
pull(rhand_y) %>%
pluck(1)
test_radius <- list(seq(10,50))
run_crqa_rr <- function(x, y, z){
results <- crqa(x,
y,
delay = 117,
embed = 14,
rescale = 2,
radius = z,
normalize = 2,
mindiagline = 2,
minvertline = 2,
tw = 0,
whiteline = FALSE,
recpt = FALSE,
side = 'both')
return(results[['RR']])
}
radius_loop <- function(x, y, z){
for(i in 1:length(z)){
u = run_crqa(x, y, i)
}
}
l <- list(list(eye_y_target), list(rhand_y_target), test_radius)
pmap_dbl(l, run_crqa_rr)
run_crqa(eye_y_target, rhand_y_target, 37)
bears <- data_lists %>%
filter(story == 'bears')
looping <- bears %>%
add_column(test_radius) %>%
unnest(test_radius, .drop = F) %>%
group_by(name) %>%
mutate(rec_values = future_pmap_dbl(list(eye_y, rhand_y, test_radius), run_crqa_rr))
target_radius <- looping %>%
select(name, test_radius, rec_values) %>%
mutate(diff = abs(5 - rec_values)) %>%
group_by(name) %>%
filter(diff == min(diff)) %>%
ungroup() %>%
arrange(rec_values)
target_radius_to_join <- target_radius %>%
select(name, test_radius) %>%
rename(radius = test_radius)
run_crqa <- function(x, y, z){
crqa(x,
y,
delay = 117,
embed = 14,
rescale = 2,
radius = z,
normalize = 2,
mindiagline = 2,
minvertline = 2,
tw = 0,
whiteline = FALSE,
recpt = FALSE,
side = 'both')
}
crqas <- bears %>%
left_join(target_radius_to_join, by = "name") %>%
mutate(rhand = future_pmap(list(rhand_y, eye_y, radius), run_crqa))
crqa_results <- crqas %>%
mutate(rhand_rr = map_dbl(rhand, pluck, "RR"),
rhand_det = map_dbl(rhand, pluck, "DET")) %>%
select(name, maingroup, story, direction, radius, rhand_rr, rhand_det)
crqa_results %>%
ggbetweenstats(x = maingroup,
y = rhand_det,
pairwise.comparisons = TRUE,
pairwise.annotation = "p.value",
p.adjust.method = "holm")
model1 <- lmer(data = crqa_results, rhand_det ~ maingroup + (1|direction))
summary(model1)
|
495707ae9d9122f51a3297d4313a6ca64b22f752 | 35d3e7eda3e80df5d1f7648091faa8cf8fe0c6c9 | /ui.R | ecab3f14212280abedb714393e647c8cbefe2ae2 | [] | no_license | aephidayatuloh/aida | af0ad3621dd77ff07ac81dfa3a3d76b634dddb59 | 240ebc7a106181c550f1e0f40d2d45d65ec1d4fa | refs/heads/master | 2021-09-10T07:09:04.645376 | 2018-03-22T04:42:23 | 2018-03-22T04:42:23 | 123,667,898 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,407 | r | ui.R | library(shiny)
library(shinyjs)
library(caret)
library(randomForest)
library(doSNOW)
library(ggplot2)
# library(RSQLite)
library(ROAuth)
library(twitteR)
library(stringr)
library(wordcloud2)
library(readr)
library(dplyr)
library(tidytext)
library(katadasaR)
source("TwitterAuth.R")
woeid <- read_csv("data/woeid.csv")
twitter.connect()
cat(1)
print(1)
PAGE_TITLE <- "Application for<br/>Integrated Data Analytic"
shinyUI(
fluidPage(
tags$head(
tags$style(type='text/css', ".predbutt { vertical-align: middle; height: 50px; width: 15%; font-size: 20px; align: middle; display: flex; justify-content: center;}")
# tags$style(type="text/css", "label{ display: table-cell; text-align: center; vertical-align: middle; } .form-group { display: table-row;}")
),
fluidRow(titlePanel(windowTitle = "AIDAnalytic",
title =
div(column(8,
h1(HTML(PAGE_TITLE), style = "font-size: 230%; font-weight: bold; color: darkblue;")
),
column(4,
img(
src = "images/aidanalytics2.png",
# height = 70,
width = 310 #,
# style = "margin:10px 10px; color: blue;"
),
img(
src = "images/marketing-data-analytics_chart.png",
# height = 70,
width = 300,
style = "margin:10px 10px; color: blue;"
)
)
)
)
),
br(),
p("Finding an accurate machine learning is not the end of the project.
In this post you will discover how to finalize your machine learning model in R including: making predictions on unseen data, re-building the model from scratch and saving your model for later use.
Let’s get started."),
tabsetPanel(type = "pills",
tabPanel("Predictive",
h1("Predictive Modeling", style = "text-transformation: bold;"),
column(3,
wellPanel("If you want to try to build model (Model Development) on this app, you need to download the training data first.",
br(),
downloadButton("dwnTrain", "Training"),
br(),
br(),
"If you want to try to predict using built model (Predictive) on this app, you need to download the test data first.",
br(),
downloadButton("dwnTest", "Test"),
style = "background: #347bbc; color: white; text-transformation: bold;")
),
column(9,
tabsetPanel(type = "pills",
tabPanel("Home",
br(),
fluidRow(
br(),
column(6,
HTML('<iframe width="900" height="490" src="https://www.youtube.com/embed/z8PRU46I3NY" frameborder="0" allow="autoplay; encrypted-media" allowfullscreen></iframe>')
)
),
p(HTML("<strong>DISCLAIMER</strong>"), "This is not the model builder web app platform like H2O or Microsoft Azure Machine Learning Studio. This app is built only using Titanic dataset from Kaggle Titanic machine learning competition. So, use other datasets would not fit this app and would be error.")
),
tabPanel("Model Development",
h1("Build Predictive Model"),
tabsetPanel(
tabPanel("Data",
br(),
h3("How To Use"),
p("You must click the", HTML("<strong>Browse...</strong>"), "button and upload data you want to use as train data."),
column(9,
fileInput("datatrain", "Upload Data File"),
checkboxInput("strToFactor1", "String as factor")
)
),
tabPanel("Preview",
br(),
column(12,
verbatimTextOutput("structure")
),
column(12,
verbatimTextOutput("summarize")
),
uiOutput("univariateTrain"),
verbatimTextOutput("uniSummary"),
p("Is there any missing value or blank?"),
verbatimTextOutput("isNA"),
plotOutput("uniplotTrain"),
dataTableOutput("prevDevTrain")),
tabPanel("Preprocess",
br(),
actionButton("preprocDev", "Auto Pre-processing")
),
tabPanel("Modeling",
br(),
actionButton("modDev", "Develop Model")
),
tabPanel("Metrics",
verbatimTextOutput("metric"))
)
),
tabPanel("Predictive",
h1("Use Predictive Model"),
tabsetPanel(
tabPanel("Data",
br(),
h3("How To Use"),
p("You must click the", HTML("<strong>Browse...</strong>"), "button and upload data you want to predict."),
fileInput("datapred", "Upload Data File"),
checkboxInput("strToFactor2", "String as factor")
),
tabPanel("Preview",
br(),
uiOutput("univariateTest"),
verbatimTextOutput("predSummary"),
verbatimTextOutput("predisNA"),
plotOutput("uniplotTest")
),
tabPanel("Preprocess",
br(),
actionButton("preprocPred", "Auto Pre-processing")),
tabPanel("Predict",
br(),
div(class = "predbutt",
actionButton("procPred", "Predict Now!")
),
br(),
br(),
br()
),
tabPanel("Results",
br(),
p("Download the predicted result as CSV file. The file consists of 2 (two) columns (PassengerId, Survived)."),
br(),
downloadButton("dwnPred", "Predicted"),
br(),
br(),
br()
)
)
)
)
)
),
tabPanel("Twitter Mining",
h1("Extract Insight From Twitter"),
tabsetPanel(type = "pills",
tabPanel("Trending Topics",
h1("Trending Topic Today"),
column(3,
selectInput("woecountry", "Country",
choices = woeid$country),
selectInput("woearea", "Location/Area",
choices = woeid$name),
actionButton("trendBut", "Refresh", icon = icon("hostory"))
),
column(9,
tableOutput("trends"),
br()
)
),
tabPanel("wordcloud",
column(3,
br(),
textInput("searchterm", "Search Keyword"),
numericInput("ntweets", "Max. Number of tweets", min = 0, max = 1000, value = 500),
br(),
br(),
actionButton("searchtw", "Process", icon = icon("searchengine"))
),
column(9,
wordcloud2Output("wordcld")
)
),
tabPanel("Web Scrapping",
h1("Extract Contents From A Web"),
# column(1, h5("URL: ")),
column(8, textInput("urlscrap", label = NULL, placeholder = "URL: (e.g https://www.microsoft.com/)", width = "100%")),
column(4, actionButton("scrapBut", "Process")),
# br(),
column(12,
h3("Information"),
renderWordcloud2("webcloud")
)
)
)
),
tabPanel("Stat Learn",
h1("Under Construction")
),
tabPanel("Graphical Learn",
h1("Under Construction")
),
tabPanel("Database Learn",
h1("Under Construction")
)
),
br(),
br(),
br(),
br(),
h5("Copyright 2018 Aep Hidayatuloh", style = "text-align: center; font-weight: bold;")
)
) |
0dd1c66945417819d54a1f348052a7b02ea6a15d | 836d7f80fc7a1a3af06da912ece15cf5c5faed4a | /pkg/GEARStools/R/ImageCollection.rasterEngine.R | e51c6444c552dc7265e53d476b6c56dba1567ca5 | [] | no_license | gearslaboratory/gearstools | 528c93cff9e42c7ada370c3c1070228aaead688c | 6fe38246d036ba54b496a03baf16bb5abe7b0083 | refs/heads/master | 2020-03-16T03:20:55.261482 | 2018-11-25T21:51:37 | 2018-11-25T21:51:37 | 132,485,668 | 0 | 4 | null | 2018-11-25T21:51:38 | 2018-05-07T16:09:53 | R | UTF-8 | R | false | false | 5,137 | r | ImageCollection.rasterEngine.R | #' @export
ImageCollection.rasterEngine <- function(
ImageCollection,
# rasterEngine stuff:
fun,args=NULL,
outdirectory,filesuffix, # Where to store files + suffix
chunk_format="data.frame",
blocksize=NULL,
# Image stuff:
retrieve_stack,
RasterStacks_names=NULL,
overwrite=F,
# Filter stuff:
filterDate=NULL,filterDOY=NULL,filterMonths=NULL,
filterBounds=NULL,
filterImageNums=NULL,
filtered_ImageCollection_fname=tempfile(fileext=".Rdata"),
# Parallel options:
parallel_engine="batchtools",
# Batchtools options:
batchtools_reg=NULL,
batchtools_resources=list(ncpus=1),
batchtools_chunk.size=1,
# batchtools_cluster.functions=NULL,
debugmode=F,verbose=F)
{
# TODO: ... to filter and other...
# Save the parameters to a list that will be preserved in the ImageCollection:
ImageCollection.rasterEngine_parameters <- mget(ls())
if(is.character(ImageCollection))
{
if(file.exists(ImageCollection)) load(ImageCollection) else stop("ImageCollection was not found...")
}
if(!is.null(filterDate) || !is.null(filterDOY) || !is.null(filterMonths) ||
!is.null(filterBounds) || !is.null(filterImageNums))
{
# Filter the imagecollection
ImageCollection <- ImageCollection.filter(
ImageCollection=ImageCollection,
filterDate=filterDate,filterDOY=filterDOY,filterMonths=filterMonths,
filterBounds=filterBounds,
# filterRawMetadata,
filterImageNums=filterImageNums,
ImageCollection_fname=filtered_ImageCollection_fname)
}
decompressed_dirs <- ImageCollection$buildparameters$decompressed_dirs
# These will be loaded into the cluster:
ImageCollection.rasterEngine_params_objects <- c(
# rasterEngine stuff:
"fun","args",
# "outdirectory","filesuffix",
"chunk_format","blocksize",
# Image stuff:
"decompressed_dirs","retrieve_stack",
"RasterStacks_names",
"overwrite",
# Other parameters:
"verbose"
)
ImageCollection.rasterEngine_function <- function(
# Image parameters:
fname,driver,decompressed_dirs,
retrieve_stack,RasterStacks_names,
# rasterEngine parameters:
fun,args,
# outdirectory,filesuffix, # Where to store files + suffix
outname,
chunk_format,blocksize,
# Other parameters:
verbose,
# rslurm_objects_file,
overwrite
)
{
require("GEARStools")
# print(rslurm_objects_file)
# Hack:
# if(!missing(rslurm_objects_file)) { load(rslurm_objects_file) }
# Create an image object:
Image_retrieved <- Image(
fname=fname,
driver=driver,
retrieve_metadata=F,
retrieve_stack = retrieve_stack,
decompressed_dir = decompressed_dirs,
verbose=verbose, overwrite=overwrite
)
# NEED TO FIX IMAGECOLLECTION TO CREATE BASENAMES:
# Image_retrieved$metadata$basename <- sub('\\.tar.gz$', '',basename(Image_retrieved$metadata$fname))
# filename <- file.path(outdirectory,paste(Image_retrieved$metadata$basename,filesuffix,sep=""))
if(!is.null(RasterStacks_names))
{
names(Image_retrieved$RasterStacks) <- RasterStacks_names
}
if(!overwrite && file.exists(outname))
{
Image_rasterEngine <- brick(outname)
} else
{
# Now apply function to this image:
Image_rasterEngine <- Image.rasterEngine(
Image=Image_retrieved,
fun=fun,
args=args,
chunk_format=chunk_format,
verbose=verbose,
output_fname=outname,
# RasterStacks_names=RasterStacks_names,
blocksize=blocksize)
}
return(Image_rasterEngine)
}
if(parallel_engine=="batchtools")
### BATCHTOOLS IMPLEMENTATION
{
if(verbose) message("Using batchtools for processing...")
# Need github stuff here:
if(!require("batchtools")) install.packages("batchtools")
# This should be moved higher up.
if(is.null(batchtools_reg))
stop("Please create a batchtools registry first, and pass it to this function.")
else setDefaultRegistry(batchtools_reg)
ImageCollection.rasterEngine_params <- data.frame(
fname=sapply(ImageCollection$Images,function(X)
{ return(X$metadata$fname) } ),
driver=sapply(ImageCollection$Images,function(X)
{ return(X$metadata$driver) } ),
outname=sapply(ImageCollection$Images,function(X)
{ return(file.path(outdirectory,paste(X$metadata$basename,filesuffix,sep=""))) } ),
stringsAsFactors=F)
# add output filenames here:
# filename <- file.path(outdirectory,paste(Image_retrieved$metadata$basename,filesuffix,sep=""))
ImageCollection.rasterEngine_params_objects_get <- lapply(ImageCollection.rasterEngine_params_objects,FUN=function(X) get(X))
names(ImageCollection.rasterEngine_params_objects_get) <- ImageCollection.rasterEngine_params_objects
ids = batchMap(fun=ImageCollection.rasterEngine_function,
args=ImageCollection.rasterEngine_params,
more.args=ImageCollection.rasterEngine_params_objects_get)
ids$chunk = chunk(x=seq(nrow(ids)),chunk.size=batchtools_chunk.size)
submitJobs(ids=ids,resources = batchtools_resources)
}
browser()
# Build the imagecollection here
fnames <- ImageCollection.rasterEngine_params$outname
buildparameters <- ImageCollection.rasterEngine_parameters
} |
333c751984c1d335ed88247dec0924d9171b7cdc | 99c7b0f9c6882ea90b3960f144e80c4eb77159e3 | /23_pull_nominal_SNPs_for_Eric.R | e3b8eca4442885a34757a5e703ee6f7b87851d01 | [] | no_license | WheelerLab/ThePlatinumStudyGWAS | 8a5ca9edcad3aff0b0b6e0b4f228965f9b5fa36d | 44d341141a8193f01ad4e938012476997ea28b18 | refs/heads/master | 2021-01-22T17:33:35.864643 | 2016-07-07T15:07:54 | 2016-07-07T15:07:54 | 62,816,038 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,044 | r | 23_pull_nominal_SNPs_for_Eric.R | ##pull nominal SNPs (P<0.05) for Eric
library(dplyr)
library(tidyr)
library(ggplot2)
"%&%" = function(a,b) paste(a,b,sep="")
my.dir = "/Volumes/dolan-lab/hwheeler/ThePlatinumStudy/GWAS/GWAS_results/"
gwasfile <- my.dir %&% 'N88.imputed_rnGM412_age.cisp.10PCs_chr1-22.assoc.dosage.sorted'
gwas <- read.table(gwasfile,header=T)
gwas <- gwas[complete.cases(gwas),] #rm NAs
gwas <- dplyr::filter(gwas,INFO<1.05,P<0.05)
write.table(gwas,file=gwasfile %&% ".P_0.05",row.names = F,quote=F)
gwasfile <- my.dir %&% 'N88.imputed_dosegroup_10PCs_chr1-22.assoc.dosage.sorted'
gwas <- read.table(gwasfile,header=T)
gwas <- gwas[complete.cases(gwas),] #rm NAs
gwas <- dplyr::filter(gwas,INFO<1.05,P<0.05)
write.table(gwas,file=gwasfile %&% ".P_0.05",row.names = F,quote=F)
gwasfile <- my.dir %&% 'N88.imputed_ord3CIPN8_agediagnosis_chr1-22.ordreg.assoc.dosage.sorted'
gwas <- read.table(gwasfile,header=T)
gwas <- gwas[complete.cases(gwas),] #rm NAs
gwas <- dplyr::filter(gwas,P<0.05)
write.table(gwas,file=gwasfile %&% ".P_0.05",row.names = F,quote=F)
|
30195df10c9e2225f0e327ded2e185ad8c0c3c1d | 2a4b68a32d433e2005d65a856cb0b0ec610a98f9 | /man/GQ.Rd | 3c83674df8dec5e4a4527953260b6392fe28a9c8 | [] | no_license | cran/reports | ea418a523b13ff10bc83021a6cd4a3dbfc33689a | 503622c5d8b136e74ab6075e2bc3b7d3c1f20f80 | refs/heads/master | 2021-01-25T12:24:12.337179 | 2013-02-18T00:00:00 | 2013-02-18T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,270 | rd | GQ.Rd | \name{GQ}
\alias{GQ}
\title{Format Quotes}
\usage{
GQ(quotes = TRUE, block = TRUE, text = "clipboard",
copy2clip = TRUE)
}
\arguments{
\item{quotes}{logical or c(\code{l}, \code{r}, \code{L},
\code{R}, \code{left} or \code{right}). If \code{TRUE}
LaTeX style quotes (2 backticks and two single quotes)
are wrapped around the text. If (\code{l}, \code{L} or
\code{left}) left ticks only are used. If (\code{r},
\code{R} or \code{right}) right ticks only are used.}
\item{block}{If \code{TRUE} LaTeX block quote code tags
are used instead of the backticks and single quotes.}
\item{text}{character vector or text copied to the
clipboard. Default is to read from the clipboard.}
\item{copy2clip}{logical. If \code{TRUE} attempts to
copy the output to the clipboard.}
}
\value{
Returns a character vector with LaTeX formatted text.
}
\description{
Tool to format text taken from articles for LaTeX.
Combines multiple stringed text into one string. Removes
non ascii characters and hyphens.
}
\details{
This function formats text for use with LaTeX documents.
}
\section{Warning}{
Ligatures are assumed to be "fi", however, these elements
may be "ff", "fi", "fl", "ffi" or "ffl".
}
|
b74984a9cbee3adff3c8b1a7b30939340213d76b | eff92f5dc6f811110609754e0accf176892b5af5 | /analysis/reconstruct_ancestral_ovary_morphology_type.R | b6eb930f189fd0a0a77bb9a0751e58fe547bee4b | [] | no_license | shchurch/insect_ovariole_number_evolution_2020 | 8c742a910e2d9e1ca233de8148f9f153559e15f1 | 22109637a1db53cbb06e3c64cda1f8773dc21d50 | refs/heads/master | 2023-06-18T23:06:42.385948 | 2021-07-08T17:27:42 | 2021-07-08T17:27:42 | 274,758,878 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,174 | r | reconstruct_ancestral_ovary_morphology_type.R | # This script was written by SHC in 2019.
# Its main purpose is to reconstruct the ancestral mode of oogenesis
library(OUwie)
library(corHMM)
library(parallel)
library(geiger)
set.seed(12345)
source("analysis/get_ovariole_number_data.R")
egg_database <- read.delim("analysis/datafiles/egg_database_final_values_July2019.tsv",header=T,stringsAsFactors=F)
### These commands are used to estimate the evolutionary shifts in ovary type
# read in table of taxonomic groups, labeled by ovary type, as recorded in Buning 1994
ovary_table <- read.delim("analysis/datafiles/mode_of_oogensis_Buning.tsv",header=T,stringsAsFactors=F)
# The Buning tables are listed by genus, but not all genera are in the final dataset
# However, ovary type is consistent across families within Bunings set
# So when a genus is not present, for phylogenetic purposes we can choose an alternate genus from the same family
tree_dataset <- egg_database[sample(nrow(egg_database)),] %>% #start with the largest dataset of genera, families, and orders
filter(genus %in% tree$tip.label) #use only tips in the tree
# Find one-to-one genus matches
ovary_table_genera_match <- ovary_table %>% filter(genus %in% tree_dataset$genus)
# Find families without one-to-one genus match
ovary_table_family_match <- ovary_table %>% filter(!(family %in% ovary_table_genera_match$family)) %>%
filter(family %in% tree_dataset$family)
# Find orders without genus or family match
ovary_table_order_match <- ovary_table %>% filter(!(order %in% ovary_table_genera_match$order)) %>%
filter(!(order %in% ovary_table_family_match$order)) %>%
filter(order %in% tree_dataset$order)
# Build a new ovary table with these substitutions
# First for families
new_ovary_table_family <- ovary_table_family_match %>%
group_by(family) %>%
slice(1L) %>%
select(-genus) %>%
left_join(tree_dataset %>%
group_by(family) %>%
slice(1L) %>%
select(family,genus),by="family")
# Then for orders
new_ovary_table_order <- ovary_table_order_match %>%
group_by(order) %>%
slice(1L) %>%
select(-genus) %>%
left_join(tree_dataset %>%
group_by(order) %>%
slice(1L) %>%
select(order,genus),by="order")
# Then combine
new_ovary_table <- bind_rows(ovary_table_genera_match,new_ovary_table_family,new_ovary_table_order)
# Format the data frame for the ancestral state reconstruction on the full tree
new_tree_dataset <- left_join(tree_dataset,new_ovary_table %>% # join ovary data
select(ovary,genus),by="genus") %>%
group_by(genus) %>% #choose one observation per genus
slice(1L) %>%
ungroup() %>%
mutate(species = genus, #format for corHMM
discrete = as.numeric(as.factor(ovary))) %>% #has missing ovary type data
select(species,discrete) %>%
as.data.frame()
# Prune a tree to only tips in the dataset (still has NAs though, max observations are used here)
pp_pruned <- drop.tip(tree,setdiff(tree$tip.label,new_tree_dataset$species))
# Reconstruct ancestral ovary type (with missing data), Equal Rates model
pp <- rayDISC(pp_pruned,na.omit(as.data.frame(new_tree_dataset[,c(1,2)])),model="ER",node.states="marginal")
save.image("reconstruct_ancestral_ovary_morphology_type.RData")
|
23a606a868e6374f2ceafda317c860b896ba63af | ce400939e11da3d17b10e800374c17bb947bae35 | /buildSvgrParseList/specialAttrs.R | a475f382f8433938d5f90fdcc23af6ed7633f819 | [] | no_license | mslegrand/ptRAceBldr | 255258dbede3e25cfbd595b7f50b9b11a9b68398 | 2c5d176e72efa158d78c22eefa0cadb926335681 | refs/heads/master | 2021-06-05T20:06:51.817158 | 2020-02-22T18:07:33 | 2020-02-22T18:07:33 | 103,204,581 | 0 | 0 | null | 2020-02-22T18:08:29 | 2017-09-12T01:02:16 | JavaScript | UTF-8 | R | false | false | 3,576 | r | specialAttrs.R | library(data.table)
library(XML)
if(!exists("requireTable")){ source("tableLoader.R") }
#source("specialTagHandlers.R")
# insertConditionalCode(ele.tag,attrsEle2Quote$filter, echoQuote, filterQuote),
# insertConditionalCode(ele.tag,attrsEle2Quote$fill, echoQuote, fillQuote),
# insertConditionalCode(ele.tag,attrsEle2Quote$clip.path, echoQuote, clipPathQuote),
# insertConditionalCode(ele.tag,attrsEle2Quote$mask, echoQuote, maskQuote),
# insertConditionalCode(ele.tag,attrsEle2Quote$marker, echoQuote, markerEndQuote),
# insertConditionalCode(ele.tag,attrsEle2Quote$marker, echoQuote, markerMidQuote),
# insertConditionalCode(ele.tag,attrsEle2Quote$marker, echoQuote, markerStartQuote),
# insertConditionalCode(ele.tag, c('text' , 'textPath' , 'tspan'), echoQuote, textQuote),
# insertConditionalCode(ele.tag, c("linearGradient", "radialGradient"), echoQuote, gradientColorQuote)
#
# translate=function(dx,dy=NULL){
#
# list(translate=c(dx,dy))
# },
# rotate=function(angle, x=NULL, y=NULL){
#
# list(rotate=c(angle,x,y))
# },
# rotatR=function(angle, x=NULL, y=NULL){
#
# tmp<-c(angle,x,y)
# tmp[1]<-as.numeric(tmp[1])*180/pi #convert from radians to degrees
# list(rotate=tmp)
# },
# scale=function(dx,dy=NULL){
#
# list(scale=c(dx,dy))
# },
#
# #
#
# gradXtra<-list(
# linearGradient=c("colors","offsets"),
# radialGradient=c("colors","offsets")
# )
requireTable(AET.DT, COP1.DT, PA.DT)
supports.cxy<-function(ele.tag){
ifelse(
nrow(AET.DT[ element==ele.tag &
(attr=='x' | attr=='y' | attr=='width' | attr=='height') ,]
)==4,
ele.tag,
NULL
)
}
xywh<-c('x','y','width','height')
# all elements
ele.tags<-unique(AET.DT$element)
#all attributes
#ele.tags.attributeName<-AET.DT[attr=="attributeName"]$element
#tmp<-lapply(ele.tags, supports.cxy)
AET.DT[attr %in% xywh,]->tmp.DT
tmp.DT[,.N, by=element]->tmp2.DT
tmp2.DT[N==4,element]->cxySupported
cxySupported<-c(c('text' , 'textPath' , 'tspan'),cxySupported)
requireTable(
"AVEL.DT", "AVD.DT", "es.DT", "eaCS.DT", "PA.DT",
"COP.DT", "COP1.DT", "AET.DT"
)
# PA.DT has items that need to be expanded, such as:
list(
"marker properties"=c( "marker-start", "marker-mid", "marker-end")
)
# ‘path’, ‘line’, ‘polyline’ ‘polygon’ all apply to marker properties
# want: for each element, all valid attributes
# given the results from validateAttribute.R
# add to element list, the right stuff
# start with ele.attr and add newAttr
# ele.attr[[ele]]<-c(ele.attr[[ele]], newAttr)
xywh<-c('x','y','width','height')
AET.DT[attr %in% xywh,]->tmp.DT
tmp.DT[,.N, by=element]->tmp2.DT
tmp2.DT[N==4,element]->cxySupported
cxySupported<-c(c('text' , 'textPath' , 'tspan'),cxySupported)
eleList<- cxySupported
for(el in eleList){
ele.attr[[el]]<-c(ele.attr[[el]], "cxy")
}
eleList<-c( "path", "line", "polyline", "polygon")
for(el in eleList){
ele.attr[[el]]<-c(ele.attr[[el]], "marker-start")
ele.attr[[el]]<-c(ele.attr[[el]], "marker-mid")
ele.attr[[el]]<-c(ele.attr[[el]], "marker-end")
}
eleList<-c( "linearGradient", "radialGradient")
for(el in eleList){
ele.attr[[el]]<-c(ele.attr[[el]], "colors")
ele.attr[[el]]<-c(ele.attr[[el]], "offsets")
}
getMissingAttrs<-function(el){
attrs<-c()
if(el %in% cxySupported){
attrs<-c(attrs,"cxy")
}
if(el %in% c( "path", "line", "polyline", "polygon") ){
attrs<-c(attrs,"marker-start", "marker-mid", "marker-end")
}
if(el %in% c( "linearGradient", "radialGradient") ){
attrs<-c(attrs,"colors", "offsets")
}
attrs
} |
0f53166b7d7179639a02582ca63b1210b5aebf91 | 539b3aff11d2fea161186a4726f26c0d2904c5e4 | /man/getWaterDataUrl.Rd | c4005a86d09d015c59d762c7e0d7350efd3f9e60 | [
"LicenseRef-scancode-warranty-disclaimer",
"CC0-1.0",
"LicenseRef-scancode-public-domain-disclaimer"
] | permissive | ee-usgs/repgen | c7ba7ffcff84173f6e82a88d4e5f96c1b021feb0 | 6cf32d38d4bcb217af7f136053f04db4c6ad5362 | refs/heads/master | 2020-12-25T22:08:24.342632 | 2016-12-20T14:22:17 | 2016-12-20T14:22:17 | 59,049,412 | 0 | 0 | null | 2016-05-17T18:43:11 | 2016-05-17T18:43:11 | null | UTF-8 | R | false | true | 429 | rd | getWaterDataUrl.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils-shared.R
\name{getWaterDataUrl}
\alias{getWaterDataUrl}
\title{Put the waterdata.usgs.gov url (if it exists) into the base of the report}
\usage{
getWaterDataUrl(data)
}
\arguments{
\item{data}{coming in to create a plot which may have waterdata info}
}
\description{
Put the waterdata.usgs.gov url (if it exists) into the base of the report
}
|
f6f563863c276386c570f60e8847fe557c8b2967 | f8f1a2ca238fc61352ca21978516f5c996a6dfc4 | /R/AllGenerics.R | 4a9c9195f6758b1d1468da85f37db4f38063ecbe | [] | no_license | cran/portfolio | d53520af3b425d850c539bd6563496fa4caa0100 | 3b2b0ed0616e06d45116c245a390bd95ae6f027a | refs/heads/master | 2021-07-14T03:52:58.298908 | 2021-07-10T15:30:05 | 2021-07-10T15:30:05 | 17,698,678 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 4,685 | r | AllGenerics.R | ################################################################################
##
## $Id: AllGenerics.R 387 2007-01-10 04:14:02Z enos $
##
## All generic functions for the portfolio class.
##
################################################################################
if(!isGeneric("create"))
setGeneric("create", function(object, ...) standardGeneric("create"))
if(!isGeneric("scaleWeights"))
setGeneric("scaleWeights", function(object, ...) standardGeneric("scaleWeights"))
if(!isGeneric("balance"))
setGeneric("balance", function(object, in.var, ...) standardGeneric("balance"))
if(!isGeneric("exposure"))
setGeneric("exposure", function(object, exp.var, ...) standardGeneric("exposure"))
if(!isGeneric("performance"))
setGeneric("performance", function(object, ...) standardGeneric("performance"))
if(!isGeneric("totalReturn"))
setGeneric("totalReturn", function(object, ...) standardGeneric("totalReturn"))
if(!isGeneric("portfolioDiff"))
setGeneric("portfolioDiff", function(object, x, ...) standardGeneric("portfolioDiff"))
if(!isGeneric("contribution"))
setGeneric("contribution", function(object, contrib.var, ...) standardGeneric("contribution"))
if(!isGeneric("securityInfo"))
setGeneric("securityInfo", function(object, id, ...) standardGeneric("securityInfo"))
## Class portfolio only.
if(!isGeneric("calcWeights"))
setGeneric("calcWeights", function(object, ...) standardGeneric("calcWeights"))
if(!isGeneric("calcShares"))
setGeneric("calcShares", function(object, ...) standardGeneric("calcShares"))
if(!isGeneric("mvLong"))
setGeneric("mvLong", function(object, ...) standardGeneric("mvLong"))
if(!isGeneric("mvShort"))
setGeneric("mvShort", function(object, ...) standardGeneric("mvShort"))
if(!isGeneric("sizeLong"))
setGeneric("sizeLong", function(object, ...) standardGeneric("sizeLong"))
if(!isGeneric("sizeShort"))
setGeneric("sizeShort", function(object, ...) standardGeneric("sizeShort"))
if(!isGeneric("updatePrices"))
setGeneric("updatePrices", function(object, id, price, ...) standardGeneric("updatePrices"))
if(!isGeneric("matching"))
setGeneric("matching", function(object, ...) standardGeneric("matching"))
if(!isGeneric("getYahooData"))
setGeneric("getYahooData", function(object, symbol.var, ...) standardGeneric("getYahooData"))
if(!isGeneric("expandData"))
setGeneric("expandData", function(object, ...) standardGeneric("expandData"))
if(!isGeneric("expose"))
setGeneric("expose", function(object, trades, ...) standardGeneric("expose"))
## Class tradelist
## Main methods
if(!isGeneric("calcCandidates"))
setGeneric("calcCandidates", function(object, orig, target, ...) standardGeneric("calcCandidates"))
if(!isGeneric("calcRanks"))
setGeneric("calcRanks", function(object, ...) standardGeneric("calcRanks"))
if(!isGeneric("calcChunks"))
setGeneric("calcChunks", function(object, ...) standardGeneric("calcChunks"))
if(!isGeneric("calcSwaps"))
setGeneric("calcSwaps", function(object, ...) standardGeneric("calcSwaps"))
if(!isGeneric("calcSwapsActual"))
setGeneric("calcSwapsActual", function(object, ...) standardGeneric("calcSwapsActual"))
if(!isGeneric("calcChunksActual"))
setGeneric("calcChunksActual", function(object, ...) standardGeneric("calcChunksActual"))
if(!isGeneric("calcActual"))
setGeneric("calcActual", function(object, ...) standardGeneric("calcActual"))
if(!isGeneric("calcFinal"))
setGeneric("calcFinal", function(object, ...) standardGeneric("calcFinal"))
## Utility methods
if(!isGeneric("candidatesCols"))
setGeneric("candidatesCols", function(object, ...) standardGeneric("candidatesCols"))
if(!isGeneric("ranksCols"))
setGeneric("ranksCols", function(object, ...) standardGeneric("ranksCols"))
if(!isGeneric("actualCols"))
setGeneric("actualCols", function(object, ...) standardGeneric("actualCols"))
if(!isGeneric("finalCols"))
setGeneric("finalCols", function(object, ...) standardGeneric("finalCols"))
if(!isGeneric("chunksCols"))
setGeneric("chunksCols", function(object, ...) standardGeneric("chunksCols"))
if(!isGeneric("restrictedCols"))
setGeneric("restrictedCols", function(object, ...) standardGeneric("restrictedCols"))
if(!isGeneric("trimSide"))
setGeneric("trimSide", function(object, side, value, ...) standardGeneric("trimSide"))
if(!isGeneric("dummyChunks"))
setGeneric("dummyChunks", function(object, side, num, quality, ...) standardGeneric("dummyChunks"))
if(!isGeneric("securityInfo"))
setGeneric("securityInfo", function(object, id, ...) standardGeneric("securityInfo"))
if(!isGeneric("mapMarket"))
setGeneric("mapMarket", function(object, ...) standardGeneric("mapMarket"))
|
9dd7c4bf00d1236843fb7873cfeedc7b902ddfdf | 282921fb2ddc8d6c49555e055c6271620dd9ed5f | /Getmeword.R | 35a68442f8813f80a9db778f3b26ac8e7ea2c0fc | [] | no_license | ElinorThorne/QA_Work | c109db5dca2040fff3b280c64e2b374361591e5b | a2b23bc03f2a7323064ba804dd59f83a4dc05da0 | refs/heads/master | 2021-01-17T18:01:53.738695 | 2017-06-27T09:37:40 | 2017-06-27T09:37:40 | 95,534,443 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 834 | r | Getmeword.R | ones <- function(a){
return(switch(a,"one ","two ","three ","four ","five ","six ","seven ","eight ","nine "))
}
teens <- function(a){
return(switch(a,"eleven ","twelve ","thirteen ","fourteen ","fifteen ", "sixteen ","seventeen ","eighteen ","nineteen "))
}
tens <- function(a){
return(switch(a,"ten ", "twenty ", "thirty ", "forty ", "fifty ", "sixty ", "seventy ","eighty ","ninety "))
}
a <-as.numeric(readline("Insert number 1-9999: "))
word <- NULL
if(a>=1000){
word <- paste(word, ones(a/1000), "thousand ")
a = a%%1000;
}
if(a>=100){
word <- paste(word, ones(a/100), "hundred and ")
a = a%%100;
}
if(a>10 && a<20){
word <- paste(word, teens(a%%10))
} else{
x = trunc(a/10)
word <- paste(word, tens(x))
a = a%%10;
word <- paste(word, ones(a))
}
print(word) |
b21543a1e9c4a17fa73ecefcd3201e5c65617315 | 6d8d9ce4c52349db43cbf4d854cfa91e5397b0c7 | /fullmat.R | 9b5321321e793f5dd09f78369e2d0ed4b5b521ad | [] | no_license | GissellaPineda/Tareas-Curso-BioinfInvRepro | e41d3f80139487337dafd427a23fae07b88bc382 | 769228736cd735cae65a55ae17ce9daad543c990 | refs/heads/master | 2021-01-13T00:02:39.936355 | 2017-05-17T20:43:32 | 2017-05-17T20:43:32 | 81,381,981 | 0 | 0 | null | 2017-04-05T02:43:59 | 2017-02-08T22:19:57 | null | UTF-8 | R | false | false | 2,702 | r | fullmat.R |
##Cargar un df llamada fullmat
fullmat <- read.delim (file = "../meta/maizteocintle_SNP50K_meta_extended.txt")
fullmat
##Que tipo de objeto se creó
class(fullmat)
##Como ver las 6 primeras lineas del archivo
fullmat[1:6,]
##Cuantas muestras hay
nrow(fullmat)
##De cuantos estados se tienen muestras?
levels (fullmat$Estado)
##Cuantas muestras fueron colectadas antes de 1980
sum(fullmat$A.o._de_colecta<1980, na.rm = T)
##Cuantas muestras hay de cada raza?
levels(fullmat$Raza_Primaria)
sum(fullmat$Raza=="Ancho")
sum(fullmat$Raza=="Arrocillo")
sum(fullmat$Raza=="Blando de Sonora")
sum(fullmat$Raza=="C\xcc_nico")
sum(fullmat$Raza=="C\xcc_nico Norte̱o")
sum(fullmat$Raza=="Celaya")
sum(fullmat$Raza=="Chapalote")
sum(fullmat$Raza=="Complejo Serrano de Jalisco")
sum(fullmat$Raza=="Coscomatepec")
sum(fullmat$Raza=="Dulce")
sum(fullmat$Raza=="Dzit-Bacal")
sum(fullmat$Raza=="Elotes C\xcc_nicos")
sum(fullmat$Raza=="Gordo")
sum(fullmat$Raza=="mexicana")
sum(fullmat$Raza=="Nal-tel de Altura")
sum(fullmat$Raza=="Olotillo")
sum(fullmat$Raza=="Palomero de Chihuahua")
sum(fullmat$Raza=="parviglumis")
sum(fullmat$Raza=="Rat\xcc_n")
sum(fullmat$Raza=="Tablilla de Ocho")
sum(fullmat$Raza=="Tabloncillo Perla")
sum(fullmat$Raza=="Tepecintle")
sum(fullmat$Raza=="Tuxpe̱o Norte̱o")
sum(fullmat$Raza=="Zamorano Amarillo")
sum(fullmat$Raza=="Zapalote Grande")
sum(fullmat$Raza=="Apachito")
sum(fullmat$Raza=="Azul")
sum(fullmat$Raza=="Bofo")
sum(fullmat$Raza=="C\xcc_nico ")
sum(fullmat$Raza=="Cacahuacintle")
sum(fullmat$Raza=="Chalque̱o")
sum(fullmat$Raza=="Comiteco")
sum(fullmat$Raza=="Conejo")
sum(fullmat$Raza=="Cristalino de Chihuahua")
sum(fullmat$Raza=="Dulcillo del Noroeste")
sum(fullmat$Raza=="Elotero de Sinaloa")
sum(fullmat$Raza=="Elotes Occidentales")
sum(fullmat$Raza=="Jala")
sum(fullmat$Raza=="Mushito")
sum(fullmat$Raza=="Olot\xcc_n")
sum(fullmat$Raza=="Onave̱o")
sum(fullmat$Raza=="Palomero Toluque̱o")
sum(fullmat$Raza=="Pepitilla")
sum(fullmat$Raza=="Reventado")
sum(fullmat$Raza=="Tabloncillo")
sum(fullmat$Raza=="Tehua")
sum(fullmat$Raza=="Tuxpe̱o")
sum(fullmat$Raza=="Vande̱o")
sum(fullmat$Raza=="Zapalote Chico")
##En promedio ¿A que altitud fueron colectadas las muestras?
mean(fullmat$Altitud)
##A que altitud maxima y munima fueron colectadas
min(fullmat$Altitud)
max(fullmat$Altitud)
##Crea una df de datos solo con las muestras de la raza Olotillo
olotillo<-subset(fullmat, fullmat$Raza=="Olotillo")
##Crea una nueva df s´ólo con las muestras de las razas Reventador, Jala y Ancho
varias<-subset(fullmat, fullmat$Raza==c("Reventador", "Jala", "Ancho"))
##Escribe la matriz anterior a un archivo llamado "submat.csv en meta
write.csv(varias, "../meta/submat.csv")
|
e3d6678e036e23c7d580afe4059f7ba7b54a951d | 9bdc98a92ba2f789c06107b0d1718180237bb929 | /mardham2/inst/analysis/mardhamAnalysis.R | 0dc4fd7ad288070f71fdb5dde6ef4fac318fbb5a | [] | no_license | dth2/CampAdol | ab68a1bb723c0a81fe60b1f4f891a9ad2d5fcf9a | ba4718bb01371d315f0e88aa5d10dfb5758c136f | refs/heads/master | 2021-01-12T09:53:11.176662 | 2017-09-21T21:39:46 | 2017-09-21T21:39:46 | 76,286,640 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,566 | r | mardhamAnalysis.R |
if(F) {
##############################################################
mean(mardham.sim.01a$atts.list[[520]]$vl[mardham.sim.01a$atts.list[[520]]$stage%in%"AF"])
mean(mardham.sim.01a$atts.list[[520]]$vl[mardham.sim.01a$atts.list[[520]]$stage%in%"AR"])
mean(mardham.sim.01a$atts.list[[520]]$vl[mardham.sim.01a$atts.list[[520]]$stage%in%"C"])
mean(mardham.sim.01a$atts.list[[520]]$vl[mardham.sim.01a$atts.list[[520]]$stage%in%"D"])
###############################################################
times <- 1:length(mardham.sim.01b$disc.ai)
discai <- sapply(times, function(x) nrow(mardham.sim.01b$disc.ai[[x]]))
disc.uai <- sapply(times, function(x) sum(mardham.sim.01b$disc.ai[[x]]$uai==1))
mean.vl <- sapply(times, function(x) mean(mardham.sim.01b$atts.list[[x]]$vl, na.rm=T))
percent.aids <- sapply(times, function(x) mean(mardham.sim.01b$atts.list[[x]]$stage=="D", na.rm=T))
percent.chronic <- sapply(times, function(x) mean(mardham.sim.01b$atts.list[[x]]$stage=="C", na.rm=T))
percent.chronic <- sapply(times, function(x) mean(mardham.sim.01b$atts.list[[x]]$stage=="C", na.rm=T))
################################################################
window <- 200
plot(filter(
mardham.sim.01b$summ$i - mardham.sim.01b$summ$dg/5 - mardham.sim.01b$summ$da,
rep(1,window))/window)
lines(c(0,10000),c(0,0))
points(filter(diff(mardham.sim.01b$summ$p/mardham.sim.01b$summ$n)*10, rep(1,window)))
pdf("basics.pdf")
plot(mardham.sim.01b$summ$p, main='absolute prevalence')
plot(mardham.sim.01b$summ$p/mardham.sim.01b$summ$n, main='prevalence')
plot(mardham.sim.01b$summ$n, main='popsize')
window <- 200
plot(filter(
mardham.sim.01b$summ$i - mardham.sim.01b$summ$dg*.23 - mardham.sim.01b$summ$da,
rep(1,window))/window)
lines(c(0,10000),c(0,0))
dev.off()
#####
diff(mardham.sim.01c$summ$p) -
(mardham.sim.01c$summ$i-mardham.sim.01c$summ$da-mardham.sim.01c$summ$dgp)[-1]
window<-25
plot(filter(mardham.sim.01c$summ$p, rep(1,window))/window)
plot(filter(mardham.sim.01c$summ$p/mardham.sim.01c$summ$n, rep(1,window))/window)
plot(filter(mardham.sim.01c$summ$i, rep(1,window))/window)
plot(filter(mardham.sim.01c$summ$da, rep(1,window))/window)
plot(filter(mardham.sim.01c$summ$dgp, rep(1,window))/window)
timestep <- 1300
table(mardham.sim.01c$atts.list[[timestep]]$
tt.traj[mardham.sim.01c$atts.list[[timestep]]$stage=="D"])
hist(mardham.sim.01c$atts.list[[timestep]]$
inf.time[mardham.sim.01c$atts.list[[timestep]]$stage=="D"])
max(mardham.sim.01c$atts.list[[timestep]]$
inf.time[mardham.sim.01c$atts.list[[timestep]]$stage=="D"],na.rm=T)
min(mardham.sim.01c$atts.list[[timestep]]$
inf.time[mardham.sim.01c$atts.list[[timestep]]$stage=="D"],na.rm=T)
mean(mardham.sim.01c$atts.list[[timestep]]$
inf.time[mardham.sim.01c$atts.list[[timestep]]$stage=="D"],na.rm=T)
aaa <- which(mardham.sim.01c$atts.list[[timestep]]$stage=="D")
mardham.sim.01c$atts.list[[timestep]]$stage[aaa]
bbb <- data.frame(
a = mardham.sim.01c$atts.list[[timestep]]$inf.time[aaa],
b = mardham.sim.01c$atts.list[[timestep]]$tt.traj[aaa],
c = mardham.sim.01c$atts.list[[timestep]]$cum.time.off.tx[aaa],
d = mardham.sim.01c$atts.list[[timestep]]$cum.time.on.tx[aaa],
e = mardham.sim.01c$atts.list[[timestep]]$stage.time[aaa],
f = mardham.sim.01c$atts.list[[timestep]]$uid[aaa]
)
xxx <- 8
yyy <- mardham.get.id.from.uid(mardham.sim.01c, xxx)
zzz <- mardham.get.att.from.uid(mardham.sim.01c, 'stage.time', yyy, xxx)
plot(mardham.get.att.from.uid(mardham.sim.01c, 'cum.time.off.tx', yyy, xxx))
plot(mardham.get.att.from.uid(mardham.sim.01c, 'vl', yyy, xxx))
mean(mardham.sim.01c$atts.list[[1]]$vl, na.rm=T)
mean(mardham.sim.01c$atts.list[[1000]]$vl, na.rm=T)
##########################
plot(mardham.sim.01f$summ$p/mardham.sim.01f$summ$n, ylim=c(0,0.3))
points(mardham.sim.01c$summ$p/mardham.sim.01c$summ$n, col='red')
plot(mardham.sim.01f$summ$p.B/mardham.sim.01f$summ$n.B, ylim=c(0,0.5))
points(mardham.sim.01f$summ$p.W/mardham.sim.01f$summ$n.W, col='red')
plot(mardham.sim.01f$summ$n.B)
points(mardham.sim.01f$summ$n.W, col='red')
mardham.meanstats.01$meanstats.p[[1]]*2/network.size(mardham.basepop.01$nD.main)
mardham.meanstats.01$meanstats.i[[1]]*2/network.size(mardham.basepop.01$nD.main)
plot(mardham.sim.01$summ$md.MB, ylim=c(0,1))
points(mardham.sim.01$summ$md.MW, col='red')
lines(c(0,10000), rep(mardham.meanstats.01$meanstats.m[[1]]*2/
network.size(mardham.basepop.01$nD.main),2))
plot(mardham.sim.01$summ$md.PB, ylim=c(0,1))
points(mardham.sim.01$summ$md.PW, col='red')
lines(c(0,10000), rep(mardham.meanstats.01$meanstats.p[[1]]*2/
network.size(mardham.basepop.01$nD.main),2))
plot(mardham.sim.01$summ$md.IB, ylim=c(0,1))
points(mardham.sim.01$summ$md.IW, col='red')
lines(c(0,10000), rep(sum(mardham.meanstats.01$meanstats.i[1:12])/
network.size(mardham.basepop.01$nD.main),2))
######################
discai <- sapply(1:length(mardham.sim.01$disc.ai), function(x) nrow(mardham.sim.01$disc.ai[[x]]))
discai.a <- sapply(1:length(mardham.sim.01a$disc.ai), function(x) nrow(mardham.sim.01a$disc.ai[[x]]))
plot(discai)
points(discai.a,col='red')
disc.uai <- sapply(1:length(mardham.sim.01$disc.ai), function(x) sum(mardham.sim.01$disc.ai[[x]]$uai==1))
disc.uai.a <- sapply(1:length(mardham.sim.01a$disc.ai), function(x) sum(mardham.sim.01a$disc.ai[[x]]$uai==1))
plot(disc.uai)
points(disc.uai.a,col='red')
mean(mardham.sim.01a$atts.list[[1]]$vl, na.rm=T)
mean(mardham.sim.01a$atts.list[[520]]$vl, na.rm=T)
mean(mardham.sim.01a$atts.list[[1]]$circ, na.rm=T)
mean(mardham.sim.01a$atts.list[[520]]$circ, na.rm=T)
table(mardham.sim.01a$atts.list[[1]]$stage)
table(mardham.sim.01a$atts.list[[520]]$stage)
mean(mardham.sim.01a$atts.list[[1]]$vl[mardham.sim.01a$atts.list[[1]]$stage%in%"AF"])
mean(mardham.sim.01a$atts.list[[1]]$vl[mardham.sim.01a$atts.list[[1]]$stage%in%"AR"])
mean(mardham.sim.01a$atts.list[[1]]$vl[mardham.sim.01a$atts.list[[1]]$stage%in%"C"])
mean(mardham.sim.01a$atts.list[[1]]$vl[mardham.sim.01a$atts.list[[1]]$stage%in%"D"])
mean(mardham.sim.01a$atts.list[[520]]$vl[mardham.sim.01a$atts.list[[520]]$stage%in%"AF"])
mean(mardham.sim.01a$atts.list[[520]]$vl[mardham.sim.01a$atts.list[[520]]$stage%in%"AR"])
mean(mardham.sim.01a$atts.list[[520]]$vl[mardham.sim.01a$atts.list[[520]]$stage%in%"C"])
mean(mardham.sim.01a$atts.list[[520]]$vl[mardham.sim.01a$atts.list[[520]]$stage%in%"D"])
mean(mardham.sim.01a$atts.list)
plot(filter(mardham.sim.01$summ$i, rep(1,25))/25)
plot(filter(mardham.sim.01$summ$dg, rep(1,25))/25)
plot(filter(mardham.sim.01$summ$da, rep(1,25))/25)
###############################################################
###############################################################
###############################################################
###############################################################
###############################################################
###############################################################
plot(mardham.sim.01b$summ$p/mardham.sim.01b$summ$n,ylim=c(0,0.3))
plot(mardham.sim.01b$summ$n)
plot(mardham.sim.01b$summ$md.MB, ylim=c(0,1))
points(mardham.sim.01b$summ$md.MW, col='red')
lines(c(0,10000), rep(mardham.meanstats.01$meanstats.m[[1]]*2/
network.size(mardham.basepop.01$nD.main),2))
plot(mardham.sim.01b$summ$md.PB, ylim=c(0,1))
points(mardham.sim.01b$summ$md.PW, col='red')
lines(c(0,10000), rep(mardham.meanstats.01$meanstats.p[[1]]*2/
network.size(mardham.basepop.01$nD.main),2))
plot(mardham.sim.01b$summ$md.IB, ylim=c(0,1))
points(mardham.sim.01b$summ$md.IW, col='red')
lines(c(0,10000), rep(sum(mardham.meanstats.01$meanstats.i[1:12])/
network.size(mardham.basepop.01$nD.main),2))
######################
times <- 1:length(mardham.sim.01b$disc.ai)
discai <- sapply(times, function(x) nrow(mardham.sim.01b$disc.ai[[x]]))
plot(discai)
disc.uai <- sapply(times,
function(x) sum(mardham.sim.01b$disc.ai[[x]]$uai==1))
plot(disc.uai)
mean.vl <- sapply(times,
function(x) mean(mardham.sim.01b$atts.list[[x]]$vl, na.rm=T))
plot(mean.vl)
percent.aids <- sapply(times,
function(x) mean(mardham.sim.01b$atts.list[[x]]$stage=="D", na.rm=T))
plot(percent.aids)
percent.chronic <- sapply(times,
function(x) mean(mardham.sim.01b$atts.list[[x]]$stage=="C", na.rm=T))
plot(percent.chronic)
percent.chronic <- sapply(times,
function(x) mean(mardham.sim.01b$atts.list[[x]]$stage=="C", na.rm=T))
plot(percent.chronic)
plot(filter(mardham.sim.01b$summ$i, rep(1,25))/25)
plot(filter(mardham.sim.01b$summ$dg/5, rep(1,25))/25)
plot(filter(mardham.sim.01b$summ$da, rep(1,25))/25)
window <- 200
plot(filter(
mardham.sim.01b$summ$i - mardham.sim.01b$summ$dg/5 - mardham.sim.01b$summ$da,
rep(1,window))/window)
lines(c(0,10000),c(0,0))
points(filter(diff(mardham.sim.01b$summ$p/mardham.sim.01b$summ$n)*10, rep(1,window)))
pdf("basics.pdf")
plot(mardham.sim.01b$summ$p, main='absolute prevalence')
plot(mardham.sim.01b$summ$p/mardham.sim.01b$summ$n, main='prevalence')
plot(mardham.sim.01b$summ$n, main='popsize')
window <- 200
plot(filter(
mardham.sim.01b$summ$i - mardham.sim.01b$summ$dg*.23 - mardham.sim.01b$summ$da,
rep(1,window))/window)
lines(c(0,10000),c(0,0))
dev.off()
#####
diff(mardham.sim.01c$summ$p) -
(mardham.sim.01c$summ$i-mardham.sim.01c$summ$da-mardham.sim.01c$summ$dgp)[-1]
window<-25
plot(filter(mardham.sim.01c$summ$p, rep(1,window))/window)
plot(filter(mardham.sim.01c$summ$p/mardham.sim.01c$summ$n, rep(1,window))/window)
plot(filter(mardham.sim.01c$summ$i, rep(1,window))/window)
plot(filter(mardham.sim.01c$summ$da, rep(1,window))/window)
plot(filter(mardham.sim.01c$summ$dgp, rep(1,window))/window)
timestep <- 1300
table(mardham.sim.01c$atts.list[[timestep]]$
tt.traj[mardham.sim.01c$atts.list[[timestep]]$stage=="D"])
hist(mardham.sim.01c$atts.list[[timestep]]$
inf.time[mardham.sim.01c$atts.list[[timestep]]$stage=="D"])
max(mardham.sim.01c$atts.list[[timestep]]$
inf.time[mardham.sim.01c$atts.list[[timestep]]$stage=="D"],na.rm=T)
min(mardham.sim.01c$atts.list[[timestep]]$
inf.time[mardham.sim.01c$atts.list[[timestep]]$stage=="D"],na.rm=T)
mean(mardham.sim.01c$atts.list[[timestep]]$
inf.time[mardham.sim.01c$atts.list[[timestep]]$stage=="D"],na.rm=T)
aaa <- which(mardham.sim.01c$atts.list[[timestep]]$stage=="D")
mardham.sim.01c$atts.list[[timestep]]$stage[aaa]
bbb <- data.frame(
a = mardham.sim.01c$atts.list[[timestep]]$inf.time[aaa],
b = mardham.sim.01c$atts.list[[timestep]]$tt.traj[aaa],
c = mardham.sim.01c$atts.list[[timestep]]$cum.time.off.tx[aaa],
d = mardham.sim.01c$atts.list[[timestep]]$cum.time.on.tx[aaa],
e = mardham.sim.01c$atts.list[[timestep]]$stage.time[aaa],
f = mardham.sim.01c$atts.list[[timestep]]$uid[aaa]
)
xxx <- 8
yyy <- mardham.get.id.from.uid(mardham.sim.01c, xxx)
zzz <- mardham.get.att.from.uid(mardham.sim.01c, 'stage.time', yyy, xxx)
plot(mardham.get.att.from.uid(mardham.sim.01c, 'cum.time.off.tx', yyy, xxx))
plot(mardham.get.att.from.uid(mardham.sim.01c, 'vl', yyy, xxx))
mean(mardham.sim.01c$atts.list[[1]]$vl, na.rm=T)
mean(mardham.sim.01c$atts.list[[1000]]$vl, na.rm=T)
##########################
plot(mardham.sim.01$summ$p/mardham.sim.01$summ$n, ylim=c(0,0.3))
points(mardham.sim.01$summ$p.B/mardham.sim.01$summ$n.B, col ='red')
points(mardham.sim.01$summ$p.W/mardham.sim.01$summ$n.W, col ='blue')
aaa <- xtabs(~floor(mardham.sim.01$atts.curr$age)+
mardham.sim.01$atts.curr$inf.status)
plot(aaa[,2]/rowSums(aaa))
bbb <- xtabs(~floor(mardham.basepop.01$atts.curr$age)+
mardham.basepop.01$atts.curr$inf.status)
points(bbb[,2]/rowSums(bbb), col='red')
#######################
aaa <- xtabs(~mardham.sim.01$atts.curr$diag.status+
mardham.sim.01$atts.curr$tx.status+
mardham.sim.01$atts.curr$race)
bbb <- xtabs(~mardham.sim.01$atts.curr$tx.status+
(mardham.sim.01$atts.curr$vl==1.5)+
mardham.sim.01$atts.curr$race)
aaa[2,2,]/colSums(aaa[2,,])
bbb[2,2,]/colSums(bbb[2,,])
#######################
ccc <- xtabs(~mardham.basepop.01$atts.curr$diag.status+
mardham.basepop.01$atts.curr$tx.status+
mardham.basepop.01$atts.curr$race)
ddd <- xtabs(~mardham.basepop.01$atts.curr$tx.status+
(mardham.basepop.01$atts.curr$vl==1.5)+
mardham.basepop.01$atts.curr$race)
ccc[2,2,]/colSums(ccc[2,,])
ddd[2,2,]/colSums(ddd[2,,])
###############
uids <- 1:3000
#stage.time <- matrix(NA, length(uids), 520)
#cum.time.on.tx <- matrix(NA, length(uids), 520)
#cum.time.off.tx <- matrix(NA, length(uids), 520)
tx.status <- matrix(NA, length(uids), 520)
stage <- matrix(NA, length(uids), 520)
for (uid in uids) {
id <- mardham.get.id.from.uid(mardham.sim.01.test.tx, uid)
if (sum(!is.na(id))>0) {
# stage.time[uid,] <- mardham.get.att.from.uid(mardham.sim.01.test.tx, 'stage.time', id, uid)
# cum.time.on.tx[uid,] <- mardham.get.att.from.uid(mardham.sim.01.test.tx, 'cum.time.on.tx', id, uid)
# cum.time.off.tx[uid,] <- mardham.get.att.from.uid(mardham.sim.01.test.tx, 'cum.time.off.tx', id, uid)
tx.status[uid,] <- mardham.get.att.from.uid(mardham.sim.01.test.tx, 'tx.status', id, uid)
stage[uid,] <- mardham.get.att.from.uid(mardham.sim.01.test.tx, 'stage', id, uid)
}
cat(uid,'\n')
}
chronic <- matrix(stage%in%"C", nrow=length(uids))
chronic <- chronic[, -ncol(chronic)]
offoff <- sapply(1:519, function(x) (tx.status[,x]%in%0) & (tx.status[,x+1]%in%0))
offon <- sapply(1:519, function(x) (tx.status[,x]%in%0) & (tx.status[,x+1]%in%1))
onoff <- sapply(1:519, function(x) (tx.status[,x]%in%1) & (tx.status[,x+1]%in%0))
onon <- sapply(1:519, function(x) (tx.status[,x]%in%1) & (tx.status[,x+1]%in%1))
chronic.sum <- colSums(chronic)
offoff.sum <- colSums(chronic & offoff)
offon.sum <- colSums(chronic & offon)
onoff.sum <- colSums(chronic & onoff)
onon.sum <- colSums(chronic & onon)
offoff.prop <- offoff.sum/chronic.sum
offon.prop <- offon.sum/chronic.sum
onoff.prop <- onoff.sum/chronic.sum
onon.prop <- onon.sum/chronic.sum
mean(offon.prop/ (offoff.prop + offon.prop))
mean(onoff.prop/ (onoff.prop + onon.prop))
######
prop.on.tx <- sapply(1:520, function(x)
sum(mardham.sim.01.test.tx$atts.list[[x]]$tx.status, na.rm=T) /
sum(mardham.sim.01.test.tx$atts.list[[x]]$diag.status, na.rm=T) )
full.prop.on.tx <- sapply(1:520, function(x)
sum(mardham.sim.01.test.tx$atts.list[[x]]$tx.status==1 &
mardham.sim.01.test.tx$atts.list[[x]]$tt.traj=="YF", na.rm=T) /
sum(mardham.sim.01.test.tx$atts.list[[x]]$diag.status==1 &
mardham.sim.01.test.tx$atts.list[[x]]$tt.traj=="YF", na.rm=T) )
part.prop.on.tx <- sapply(1:520, function(x)
sum(mardham.sim.01.test.tx$atts.list[[x]]$tx.status==1 &
mardham.sim.01.test.tx$atts.list[[x]]$tt.traj=="YP", na.rm=T) /
sum(mardham.sim.01.test.tx$atts.list[[x]]$diag.status==1 &
mardham.sim.01.test.tx$atts.list[[x]]$tt.traj=="YP", na.rm=T) )
} |
57774fc3958da7f0b25ef14fa5c492e999793c45 | 967a0a9fc0868defc4c5eee1a1d805d9c006f90d | /plot4Rcode.R | f9ccb3cd356f10d10c36fd9b57d57f712eff8034 | [] | no_license | ormalik/EDAproject1 | 0c918fc7a919e888e12ad6df2b3379d985c1ac5e | 2bd4c0394bd1848fb33602165e123da4f17710e5 | refs/heads/master | 2021-01-15T17:45:52.645011 | 2014-11-09T16:26:23 | 2014-11-09T16:26:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,013 | r | plot4Rcode.R | ## plot4.png code
par(mfrow = c(2, 2), mar = c(14, 6, 2, 2), cex=.5)
plot(totime, power_data$Global_active_power, xaxt=NULL, xlab = "", ylab = "Global Active Power", type="n")
lines(totime, power_data$Global_active_power, type="S")
plot(totime, power_data$Voltage, xaxt=NULL, xlab = "datetime", ylab = "Voltage", type="n")
lines(totime, power_data$Voltage, type="S")
plot(totime, power_data$Sub_metering_1, xaxt=NULL, xlab = "", ylab = "Energy sub metering", type="n")lines(totime, power_data$Sub_metering_1, col = "black", type = "S")
lines(totime, power_data$Sub_metering_2, col = "red", type = "S")
lines(totime, power_data$Sub_metering_3, col = "blue", type = "S")
legend("topright", bty = "n", lty = c(1, 1), lwd = c(1, 1, 1), col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
plot(totime, power_data$Global_reactive_power, xaxt=NULL, xlab = "datetime", ylab = "Global_reactive_power", type="n")
lines(totime, power_data$Global_reactive_power, type="S") |
b766aa89dcc3cf5f7b9a2e7fa025317bb4cbb42b | effdc0070dd900b61ffa37776e3c58c8d9aa090d | /Gehart2019.R | b77332769671f1901b52a1def199f5d6bb1e1c7a | [] | no_license | agranado/signaling-motifs | 734f7e7af28fe07361687a63e3fd7e950e0a7763 | b3da9e1779ffb7d748ff4a184525849993e3b955 | refs/heads/master | 2021-06-19T14:58:20.982956 | 2021-02-23T02:35:22 | 2021-02-23T02:35:22 | 164,530,664 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,026 | r | Gehart2019.R |
#library(Matrix)
library(Seurat)
library(data.table)
library(stringr)
rm(list=ls())
#data from intestine single cell rna seq
data.path = "/home/agranado/MEGA/Caltech/rnaseq/datasets/Gehart2019/GSE113561_RAW/rawcounts/"
files = paste(data.path,list.files(data.path),sep="")
f1<-fread(files[1])
gene.names.raw = f1[,1] #names in first colum #same order for all csv, so we can take it as then assign it to the big matrix
f1<-f1[,-1]
raw.data.list = list(Matrix(as.matrix(f1),sparse=T))
for (i in 2:length(files)){
f2<-fread(files[i])
f2<-f2[,-1]
raw.data<-Matrix(as.matrix(f2),sparse=T)
raw.data.list<-append(raw.data.list,raw.data)
# f1<-cbind(f1,f2)
}
raw.data <- do.call(cbind, raw.data.list)
row.names(raw.data)<-gene.names.raw$V1
#before doing anything, we need to include the meta data annotated by the authors:
meta.data<-read.csv("/home/agranado/MEGA/Caltech/rnaseq/datasets/Gehart2019/GSE113561_RAW/GSE113561_SCIntOrg.CELLIDMetatable.csv")
#there is a duplicate entry here, lets remove it :
meta.data=meta.data[!duplicated(meta.data$CELLID),]
#for some reason there are more cells in the raw counts that in the meta.data, so let's remove them:
common.cells = intersect(colnames(raw.data),meta.data$CELLID)
#remove them from the count matrix
raw.data= raw.data[,which(colnames(raw.data) %in% common.cells)]
#there is one duplicated cell in the meta.data file :
meta.data=meta.data[which(meta.data$CELLID %in% common.cells), ]
#now we can match the entries by name:
row.names(meta.data)<-meta.data$CELLID
meta.data = meta.data[colnames(raw.data),]
#they have their own "exclude column"
raw.data = raw.data[,!meta.data$exclude]
#7507 cells after filtering
#plot the histogram of reads across cells:
hist(log10(Matrix::colSums(raw.data)))
#First filter: at least 2000 UNIQUE transcripts
high.count.cells = Matrix::colSums(raw.data>0)>2000 #this threshold they use in the paper
raw.data = raw.data[,high.count.cells]
#The data consists of intestine and organoid cells so lets create two objects:
organoid<-grep(pattern="SCOrg.*", colnames(raw.data),value=F)
raw.organoid<-raw.data[,organoid]
raw.data<-raw.data[,-organoid]
#look for more about ERCC here http://tools.thermofisher.com/content/sfs/manuals/cms_086340.pdf
erccs <- grep(pattern = "^ERCC-", x = rownames(x = raw.data), value = TRUE)
# there are genes (rows) that come from the ERCC control, so we can find them and calibrate the quantifitation
# there should be 92 ERCC transcripts
#percent is the ratio, for each cell, between the sum of ERCC detection divided by the total count
percent.ercc <- Matrix::colSums(raw.data[erccs, ])/Matrix::colSums(raw.data)
sum.ercc<-Matrix::colSums(raw.data[erccs, ])
ercc.index <- grep(pattern = "^ERCC-", x = rownames(x = raw.data), value = FALSE)
raw.data <- raw.data[-ercc.index,] #remove the ERCC sequences
#remove mitocondrial genes
#in this dataset, cells with high mitochondrial content (>50%) have been already removed
# this matrix is basically already being filtered and QC'd
mt.index <- grep(pattern = "^mt-", x = rownames(x = raw.data), value = FALSE)
percent.met <- Matrix::colSums(raw.data[mt.index, ])/Matrix::colSums(raw.data)
raw.data<-raw.data[-mt.index,]
#remove specific genes (listed by the authors)
#These genes are known to cause problem in clustering (artifacts)
remove.genes<-c("Rn45s", "Malat1", "Kcnq1ot1", "A630089N07Rik","Gm17821")
remove.pattern<-paste(remove.genes,collapse="|")
raw.data<-raw.data[-which(row.names(raw.data) %like% remove.pattern),]
#####
#this datset has the chromose number for each each, let's remove that:
#gene.names<-str_match(row.names(raw.data),"(.*)__chr.*")[,2]
#row.names(raw.data)<-gene.names
# remove exclude now? >meta.data[colnames(raw.data),]$exclude
#TIME data only exist for 1735 cells, so that's why their "exclude" filter might be important
#extract time data for each cell
cell.times<-time.data[colnames(raw.data),]$Time
#after removing the chromosome we have duplicated genes, let's remove them:
#raw.data = raw.data[-which(duplicated(row.names(raw.data))),]
# START Seurat:
# # # # ## SEURAT OBJECT
tiss <- CreateSeuratObject(raw.data = raw.data)
tiss <- AddMetaData(object = tiss, meta.data)
tiss <- AddMetaData(object = tiss, percent.ercc, col.name = "percent.ercc")
tiss <- AddMetaData(object = tiss, percent.met, col.name = "percent.mt")
tiss <- AddMetaData(object = tiss, cell.times, col.name = "cell.time" )
# Change default name for sums of counts from nUMI to nReads
colnames(tiss@meta.data)[colnames(tiss@meta.data) == 'nUMI'] <- 'nReads' # this is not UMI data so Seurat calculates only the number of reads
x11()
tiss <- NormalizeData(object = tiss, scale.factor = 1e4) #default normalization by Seurat
tiss <- ScaleData(object = tiss)
tiss <- FindVariableGenes(object = tiss, do.plot = TRUE, x.high.cutoff = 4, y.cutoff = 0.5, x.low.cutoff = 0.0125)
tiss <- RunPCA(object = tiss, do.print = FALSE, pcs.compute = 100,genes.print = 5)
tiss <- ProjectPCA(object = tiss, do.print = FALSE)
x11()
#```{r, echo=FALSE, fig.height=4, fig.width=8}
PCHeatmap(object = tiss, pc.use = 1:3, cells.use = 500, do.balanced = TRUE, label.columns = FALSE, num.genes = 8)
#```
x11()
PCElbowPlot(object = tiss, num.pc = 100)
n.pcs = 30 #based on PCElbowPlot
res.used <- 0.8 # mid range value for this parameter is related to the number of clusters goes from 0.6 - 1.2
x11();VizPCA(object = tiss, pcs.use = 1:4,font.size=1)
x11();PCAPlot(object = tiss, dim.1 = 1, dim.2 = 2)
#resolution: Value of the resolution parameter, use a value above (below) 1.0 if you want to obtain a larger (smaller) number of communities.
tiss <- FindClusters(object = tiss, reduction.type = "pca", dims.use = 1:n.pcs,
resolution = res.used, print.output = 0, save.SNN = TRUE,force.recalc=T,plot.SNN=T) #DONE
tiss <- RunTSNE(object = tiss, dims.use = 1:n.pcs, perplexity=40,
check_duplicates = F)
x11()
TSNEPlot(tiss)
#
tiss@meta.data['cluster'] <- tiss@ident
tiss@meta.data['cell'] <- rownames(tiss@meta.data)
#look for markers that authors found in their analysis:
features.plot = c("Sct", "Agr2", "Spink4", "Tff3",
"Muc2", "Lyz1", "Defa17", "Dll1", "Neurog3","Neurod1","Reg4","Chga","Tac1","Tph1")
#another set of markers they mention in the paper
features.plot <- c("Cck","Gcg","Nts","Sst","Ghrl","Tac1","Tph1","Gip","Neurog3","Neurod1","Isl1","Reg4","Chga")
features.plot =paste(features.plot,"_",sep="") #to avoid similar named genes
cellMarkers<- paste(features.plot,collapse="|")
plot.markers = row.names(raw.data)[which(row.names(raw.data) %like% cellMarkers)]
x11()
FeaturePlot(object = tiss, features.plot = plot.markers, cols.use = c("grey", "blue"),
reduction.use = "tsne")
# find markers for all clusters:
tiss.markers <- FindAllMarkers(object = tiss, only.pos = TRUE, min.pct = 0.25,
thresh.use = 0.25)
tiss.markers %>% group_by(cluster) %>% top_n(2, avg_logFC)
##
top10 <- tiss.markers %>% group_by(cluster) %>% top_n(20, avg_logFC)
top10$gene<-str_match(top10$gene,"(.*)__chr.*")[,2] #remove the chr from the gene name
# setting slim.col.label to TRUE will print just the cluster IDS instead of
# every cell name
x11();DoHeatmap(object = tiss, genes.use = top10$gene, slim.col.label = TRUE, remove.key = TRUE)
### CALL cell types in clusters based on known markers provided by the authors :
cell.markers<-read.csv("/home/agranado/MEGA/Caltech/rnaseq/datasets/Gehart2019/GSE113561_RAW/1-s2.0-S009286741831643X-mmc3.csv")
all.clusters = unique(top10$cluster)
cell.types = colnames(cell.markers)
cell.type.call = matrix(0,length(all.clusters),length(cell.types))
for(i in all.clusters){
i = as.numeric(i)
for(this.type in 1:length(cell.types)){
cell.type.call[i+1,this.type] = length(which(top10$gene[top10$cluster==i] %in% cell.markers[,cell.types[this.type]]))
}
}
colnames(cell.type.call)<-cell.types
row.names(cell.type.call)<- as.character(all.clusters)
###
#BMP profiles for clusters:
bmp.receptors<-c("Bmpr1a","Bmpr1b","Acvr1","Acvrl1","Acvr1b","Tgfbr1","Acvr1c","Acvr2a","Acvr2b","Bmpr2","Tgfbr2")
bmp.ligands<-c("Bmp2","Bmp3","Bmp4","Bmp5","Bmp6","Bmp7",
"Bmp8a","Gdf3","Gdf9","Gdf10","Gdf11","Gdf15")
bmp.smads<-c("Smad1" ,"Smad2" ,"Smad3", "Smad4", "Smad5", "Smad6", "Smad7", "Smad9")
features.plot = c(bmp.receptors,bmp.ligands,bmp.smads)
features.plot =paste(features.plot,"_",sep="") #to avoid similar named genes
cellMarkers<- paste(features.plot,collapse="|")
plot.markers = row.names(raw.data)[which(row.names(raw.data) %like% cellMarkers)]
## NOTCH
notch.all<-c(
"Dll1","Dll3","Dll4","Dtx1","Jag1","Jag2","Adam10","Psen1","Psen2","Psenen","Notch1","Notch2","Notch3","Notch4","Mfng","Lfng","Rfng")
features.plot = notch.all
features.plot =paste(features.plot,"_",sep="") #to avoid similar named genes
cellMarkers<- paste(features.plot,collapse="|")
plot.markers = row.names(raw.data)[which(row.names(raw.data) %like% cellMarkers)]
x11();FeaturePlot(tiss,plot.markers,cols.use = c("lightgrey","blue"))
###### MONOCLE
cds<-importCDS(tiss,import_all=T)
pData(cds)$cell_type2 <- plyr::revalue(as.character(pData(cds)$cluster),
c("0" = 'Late_progenitors',
"1" = 'Early_progenitors',
"2" = 'EC_early',
"3" = 'L_I_N_cells',
"4" = 'EC_late',
"5" = 'K_cells',
"6" = 'EC_late',
"7" = 'Goblet_cells',
"8" = 'NA',
"9" = 'X_cells',
"10" = 'Delta_cells'))
cell_type_color <- c("Late_progenitors" = "#E088B8",
"Early_progenitors" = "#46C7EF",
"EC_early" = "#EFAD1E",
"L_I_N_cells" = "#8CB3DF",
"EC_late" = "#53C0AD",
"K_cells" = "#4EB859",
"Goblet_cells" = "#D097C4",
"X_cells" = "#ACC436",
"Delta_cells" = "#F5918A",
'NA' = '#000080')
DelayedArray:::set_verbose_block_processing(TRUE)
# Passing a higher value will make some computations faster but use more memory. Adjust with caution!
options(DelayedArray.block.size=1000e6)
cds <- estimateSizeFactors(cds)
cds <- preprocessCDS(cds, num_dim = 50)
cds <- reduceDimension(cds, reduction_method = 'UMAP')
cds <- partitionCells(cds)
cds <- learnGraph(cds, RGE_method = 'SimplePPT')
x11()
plot_cell_trajectory(cds,
color_by = "cell_type2") +
scale_color_manual(values = cell_type_color)
#set the root
# a helper function to identify the root principal points:
get_correct_root_state <- function(cds, cell_phenotype, root_type){
cell_ids <- which(pData(cds)[, cell_phenotype] == root_type)
closest_vertex <-
cds@auxOrderingData[[cds@rge_method]]$pr_graph_cell_proj_closest_vertex
closest_vertex <- as.matrix(closest_vertex[colnames(cds), ])
root_pr_nodes <-
V(cds@minSpanningTree)$name[as.numeric(names
(which.max(table(closest_vertex[cell_ids,]))))]
root_pr_nodes
}
MPP_node_ids = get_correct_root_state(cds,
cell_phenotype =
'cell_type2', "Early_progenitors")
cds <- orderCells(cds, root_pr_nodes = MPP_node_ids)
x11()
plot_cell_trajectory(cds)
## takes a bit
pr_graph_test <- principalGraphTest(cds, k=3, cores=8)
###
#plot and svae all trajectories:
for (i in 1:length(plot.markers)){
pdf(paste(plots.path,"notch/","trajectory_",plot.markers[i],".pdf",sep=""))
plot_cell_trajectory(cds, markers = c(plot.markers[i]), use_color_gradient = TRUE)
dev.off()
}
bmp.all = pathway.genes(pathway = "bmp")
bmp.indexes=match(bmp.all,rownames(raw.data))
|
ad1d667ecb1a46b3c92bb330108da91f9208e9af | d76e17939d1a18e6a1a272b8ea55cbe813ee1f58 | /UserMacros/TSHEQ_2ndEd_Programs_etc/Source_Programs/R/sgnrk.R | 65ce574f6b5ba3055af84d9897865870eab0a3db | [] | no_license | github-student-taker/sas_pgm | 60f6ad8476692001f73ee633a50e7db634bc3cca | 6a5662594a99d174ab610dc970b6f94bca5c9cf2 | refs/heads/added | 2021-01-21T14:01:06.502900 | 2016-01-25T08:11:46 | 2016-01-25T08:11:46 | 50,336,028 | 1 | 1 | null | 2016-05-31T00:51:15 | 2016-01-25T08:12:27 | SAS | UTF-8 | R | false | false | 1,034 | r | sgnrk.R | alpha <- 0.05
n <- 20
qpl1 <- 0.2398
qpl2 <- 0.7602
qplct <- (qpl1+qpl2)/2
eps <- (qpl2-qpl1)/2
d <- scan("<mydirectory>/Examples/ex5_4_sgnrk.raw",what=numeric(n),nlines=20,multi.line=TRUE,skip=0)
u <- 0
for (i in 1:(n-1))
for (j in (i+1):n)
u <- u + trunc(0.5*(sign(d[i]+d[j])+1))
zeta <- 0
for (i in 1:(n-2))
for (j in (i+1):(n-1))
for (k in (j+1):n)
zeta <- zeta + trunc(0.5*(sign(min(d[i]+d[j],d[i]+d[k])) + 1)) +
trunc(0.5*(sign(min(d[j]+d[i],d[j]+d[k])) + 1)) +
trunc(0.5*(sign(min(d[k]+d[i],d[k]+d[j])) + 1))
u <- u*2/n/(n-1)
zeta <- zeta*2/n/(n-1)/(n-2) - u**2
sigmah <- sqrt( (4*(n-2)*zeta + 2*u*(1-u) ) /n/(n-1) )
crit <- sqrt(qchisq(0.05,1,(eps/sigmah)**2))
if (abs((u-qplct)/sigmah) >= crit) rej <- 0 else
rej <- 1
if (is.na(sigmah) || is.na(crit)) rej <- 0
cat(" ALPHA =",alpha," N =",n," QPL1_ =",qpl1," QPL2_ =",qpl2,
" U =",u," SIGMAH =",sigmah," CRIT =",crit," REJ =",rej)
|
35b0060fdc017a9c527253b4cd41d35316e5818b | 88225e53030787da384dab55e9ddf05ca97561ee | /man/get_surveys.Rd | 02edf423fee09746408e7ae1f668c69de67bffa6 | [
"MIT"
] | permissive | romainfrancois/coalitions | bb3faa2fa7ec4fbc3339fe232517533cd5743e7d | 7ba8f93ed4d72412ddb37d30764c551612d26840 | refs/heads/master | 2021-07-09T22:59:44.105391 | 2019-03-08T15:04:21 | 2019-03-08T15:04:21 | 178,819,390 | 0 | 0 | null | 2019-04-01T08:30:34 | 2019-04-01T08:30:33 | null | UTF-8 | R | false | true | 1,392 | rd | get_surveys.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/scrapers.R, R/survey-helpers.R
\name{get_surveys}
\alias{get_surveys}
\alias{get_surveys_by}
\alias{get_surveys_nds}
\alias{get_latest}
\title{Scrape surveys from all pollsters}
\usage{
get_surveys(country = c("DE", "AT"))
get_surveys_by()
get_surveys_nds()
get_latest(surveys = NULL, max_date = Sys.Date())
}
\arguments{
\item{country}{Choose country from which surveys should be scraped.
Currently \code{"DE"} (Germany) and \code{"AT"} (Austria) are supported.}
\item{surveys}{If provided, latest survey will be obtained from this object,
otherwise calls \code{\link{get_surveys}}.}
\item{max_date}{Specifies the date, relative to which latest survey will
be searched for. Defaults to \code{Sys.Date}.}
}
\description{
Scrapes data from \url{wahlrecht.de} and performs some sanitizing.
Scrapes data from \url{wahlrecht.de} and performs some sanitizing.
Given a specific date, extract the survey from this date or the last one
before this date.
}
\examples{
library(coalitions)
# scrape data for the German federal election
# get_surveys()
library(coalitions)
### Scrape the newest poll for the German federal election
# Possibility 1: Calling get_latest without arguments scrapes surveys from the web
# Possibility 2: Use get_latest() on an already scraped dataset
surveys <- get_latest(surveys_sample)
}
|
13f893bf8993e9bba299ce82e8279bc20bc6f7f4 | 7088e3fae8303e45c1e18a2dcf41392cc249a05c | /man/addin.Rd | e6af9e35140f842a067d20257a035932ac428604 | [] | no_license | cran/autota | 1ae6b962eef6022ab4b48c2de8245a4705d5a4e6 | 214f69dbc4e22cd2c1e02562eb4c09942f93d746 | refs/heads/master | 2021-04-08T08:35:52.063450 | 2020-03-22T06:10:09 | 2020-03-22T06:10:09 | 248,758,775 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 410 | rd | addin.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/autota.R
\name{addin}
\alias{addin}
\title{Run the AutoTA RStudio addin.
You can either run this directly, or run it through the RStudio Addins menu.}
\usage{
addin()
}
\description{
Run the AutoTA RStudio addin.
You can either run this directly, or run it through the RStudio Addins menu.
}
\examples{
\donttest{autota::addin()}
}
|
799dd89886318e9e560253a85878c3f1d3b583ef | 5f75893cbd0755a2230d17dda2f0c3c9a9557740 | /draw_annotation_blocks.R | 07fb4c0f35b1c2f43d5c60a2314d386446b242ba | [] | no_license | wrf/genomeGTFtools | df94e3fe9ab9415c276b52d1baed9b665f33cae0 | 951069a9728541c44dd68542dcb785954deaa7a5 | refs/heads/master | 2023-08-16T09:03:04.962548 | 2023-08-15T14:23:29 | 2023-08-15T14:23:29 | 33,300,278 | 51 | 25 | null | 2023-01-07T16:52:55 | 2015-04-02T09:19:23 | Python | UTF-8 | R | false | false | 4,637 | r | draw_annotation_blocks.R | #!/usr/bin/env Rscript
# convert tabular annotation format to rectangle blocks
# or polygon blocks for single CDS genes, like bacteria
# last modified 2022-12-21
args = commandArgs(trailingOnly=TRUE)
inputfile = args[1]
#inputfile = "~/genomes/mnemiopsis_leidyi/ml0011_63k-88k_annot_short.tab"
#inputfile = "~/git/genomeGTFtools/test_data/lux_locus_short_annot.tab"
outputfile = gsub("([\\w/]+)\\....$","\\1.pdf",inputfile,perl=TRUE)
annottab = read.table(inputfile,header=FALSE,sep="\t",stringsAsFactors=FALSE)
num_tx = 30
seqcount = args[2]
if (!is.na(seqcount)) {
num_tx = as.integer(seqcount)
}
# mode to draw style of bacterial genomes, meaning polygons in a row
# meaning like below, all on the same line
# |||> ||> ||> |||||> <||
bmode = FALSE
# default is to draw rectangles as exons with an arrow
# for bacteria, genes are usually entire CDS and are better rendered with polygons
draw_polygons = FALSE
# separate features by category
axistype = annottab[which(annottab[,2]=="axis"),]
# for some reason this has to be forced to numeric sometimes
window_start = as.numeric(axistype[1,3])
window_end = as.numeric(axistype[1,4])
if (axistype[1,5]=="True") {
bmode = TRUE
}
axis_width = window_end - window_start
offset_width = axis_width * 0.05
print(paste("# plotting axis of distance",axis_width,"for up to",num_tx,"genes"))
# mRNA or transcripts
mrnatypes = annottab[which(annottab[,2]=="mRNA"),]
# for cases where no mRNA is given, assume meant to be entire genes as blocks
if (dim(mrnatypes)[1]==0) {
mrnatypes = annottab[which(annottab[,2]=="gene"),]
draw_polygons = TRUE
offset_width = axis_width * 0.01
}
is_forward = which(mrnatypes[,5]=="+")
forward_tx = mrnatypes[is_forward,]
is_reverse = which(mrnatypes[,5]=="-")
reverse_tx = mrnatypes[is_reverse,]
is_strandless = which(mrnatypes[,5]==".")
nostrand_tx = mrnatypes[is_strandless,]
exontypes = annottab[which(annottab[,2]=="exon"),]
# for cases where no mRNA is given, assume meant to be entire genes as blocks
# so rects are drawn for genes, not exons
if (dim(exontypes)[1]==0) {
exontypes = annottab[which(annottab[,2]=="gene"),]
}
tx_names = unique(mrnatypes[,1])
print(paste("# counted", length(tx_names) ,"transcripts"))
tx_index = match(mrnatypes[,1],tx_names)
exon_index = match(exontypes[,1],tx_names)
# draw the PDF
pdf(file=outputfile, width=8, height=10)
plot(0,0, type='n', axes=FALSE, frame.plot=FALSE, ylim=c(0,num_tx), xlim=c(window_start,window_end), xlab="", ylab="")
par(mar=c(4.5,2,1,1))
axis(1, cex.axis=1.4)
# if drawing genes alone, no exons, then draw them as polygons
if (draw_polygons==TRUE) {
# draw forward polygons
for (yval in tx_index[is_forward]) {
genelen = mrnatypes[yval,4]-mrnatypes[yval,3]
used_offset = ifelse(genelen < offset_width, genelen, offset_width)
forward_x = c( mrnatypes[yval,4], mrnatypes[yval,4]-used_offset, mrnatypes[yval,3], mrnatypes[yval,3], mrnatypes[yval,4]-used_offset, mrnatypes[yval,4])
if (bmode){yval = num_tx/2}
forward_y = c( yval, yval-0.3, yval-0.3, yval+0.3, yval+0.3, yval)
polygon( forward_x, forward_y, col="#25893a")
}
# draw reverse polygons
for (yval in tx_index[is_reverse]) {
genelen = mrnatypes[yval,4]-mrnatypes[yval,3]
used_offset = ifelse(genelen < offset_width, genelen, offset_width)
reverse_x = c( mrnatypes[yval,3], mrnatypes[yval,3]+used_offset, mrnatypes[yval,4], mrnatypes[yval,4], mrnatypes[yval,3]+used_offset, mrnatypes[yval,3])
if (bmode){yval = num_tx/2}
reverse_y = c( yval, yval-0.3, yval-0.3, yval+0.3, yval+0.3, yval)
polygon( reverse_x, reverse_y, col="#25893a")
}
# draw rectangles for strandless features, usually this is an error
for (yval in tx_index[is_strandless]) {
rect(mrnatypes[yval,3], yval-0.3, mrnatypes[yval,4], yval+0.3, col="#25893a")
}
# otherwise draw arrows marking direction, and each exon becomes a box
# strandless exons should be drawn but have no arrow
} else {
# draw as arrows and boxes
arrows(forward_tx[,3], tx_index[is_forward], forward_tx[,4]+offset_width, tx_index[is_forward], lwd=3, angle=15, length=0.1, col="#bbbbbb")
arrows(reverse_tx[,4], tx_index[is_reverse], reverse_tx[,3]-offset_width, tx_index[is_reverse], lwd=3, angle=15, length=0.1, col="#bbbbbb")
rect(exontypes[,3], exon_index-0.3, exontypes[,4], exon_index+0.3, col="#25893a")
}
# write the names of each transcript
if (bmode) { text(mrnatypes[,3], rep(num_tx/2, length(tx_index)), tx_names, adj=c(1,1), srt=45)
} else {text(mrnatypes[,3], tx_index, tx_names, pos=2)}
# write the scaffold name on the margin
mtext(axistype[1,1], side=1, at=window_start-(1.5*offset_width), cex=1.8, line=-0.4)
#
dev.off()
#
|
dc3bb073c80da06c20542f99760014d2921f1cf7 | 2e3ec14a9cc530d88ca945855086fcd33a268136 | /man/getDNAClass.Rd | c855c1f9327f08b3d13a09440da1f8f33e240a84 | [
"WTFPL"
] | permissive | ritianjiang/RTFE | 569965601ed19e6f301384a26b85c5b7e0e82933 | 2e9de0849bb0989f1bef973b3ceb8a38f8c3c473 | refs/heads/master | 2021-07-05T21:36:21.365653 | 2020-08-04T10:32:33 | 2020-08-04T10:32:33 | 147,652,107 | 1 | 1 | WTFPL | 2020-01-24T01:00:05 | 2018-09-06T09:42:43 | R | UTF-8 | R | false | true | 407 | rd | getDNAClass.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PseKNC.R
\name{getDNAClass}
\alias{getDNAClass}
\title{getDNAClass}
\usage{
getDNAClass(nuc)
}
\arguments{
\item{nuc}{A character belongs to A/T/C/G}
}
\value{
A vector contains the 3 numeric values, only 0 or 1
}
\description{
The function return 3 numeric values, denoted theRing struc, Functional group and
Hydrogen bonding
}
|
76d7d811c94bf579911bd03cbb02ef83d7194936 | 0732340eadb6fbf63c4ca67d48e8ade7cbe8a464 | /functions/dates_and_times.R | 114b5077fdd31a00be8c210deff64e7501d038a5 | [] | no_license | gstewart12/delmarva-baywatch | 244410deec3c0c5149a806a1884aaa682633fd45 | eac5638514b974aab0e6f390bf1d5cb0cbafa886 | refs/heads/master | 2023-07-09T08:41:07.509024 | 2021-08-16T20:21:22 | 2021-08-16T20:21:22 | 254,463,133 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,746 | r | dates_and_times.R |
decimal_hour <- function(x) {
lubridate::hour(x) + lubridate::minute(x) / 60
}
add_time_comps <- function(data, timestamp = timestamp, offset = 900) {
timestamp <- rlang::enquo(timestamp)
id_along <- function(x) {
rle <- rle(as.vector(x))
rep(seq_along(rle$lengths), times = rle$lengths)
}
data %>%
dplyr::mutate(
year = lubridate::year(!!timestamp - offset),
month = lubridate::month(!!timestamp - offset),
week = lubridate::week(!!timestamp - offset),
date = lubridate::date(!!timestamp - offset),
day = id_along(date),
hour = decimal_hour(!!timestamp)
)
}
remove_time_comps <- function(data, ...) {
dots <- rlang::exprs(...)
time_comps <- dplyr::select(
data, timestamp, year, month, week, date, day, hour
)
if (!rlang::is_empty(dots)) time_comps <- dplyr::select(time_comps, !!!dots)
time_names <- names(time_comps)
dplyr::select(data, -dplyr::any_of(time_names))
}
# Generate Year-long Half-hourly Time Step Vector
create_timesteps <- function(year, dts = 48, tz = "UTC", shift_by = 720 / dts) {
year <- as.numeric(year)
if (!dts %in% c(24, 48)) {
stop("Only implemented for 24 or 48 daily time steps.", call. = FALSE)
}
format <- "%Y-%m-%d-%H-%M"
start <- paste(year, 1, 1, 0, shift_by, sep = "-")
end <- paste(year + 1, 1, 1, 0, 30 - shift_by, sep = "-")
# Timestamp vector with half-hourly timestamps
out <- seq(
strptime(start, format, tz), strptime(end, format, tz), (24 / dts * 60 * 60)
)
out
}
complete_time_seq <- function(data, time, time_diff, .first, .last) {
# Not using yet--risky if timestamp doesn't start/end on start/end of year
# Determine time var if not given
if (missing(time)) {
time_name <- data %>%
purrr::map(is.POSIXct) %>%
purrr::keep(~ .) %>%
names() %>%
vctrs::vec_slice(1)
time <- rlang::sym(time_name)
} else {
time_name <- rlang::ensym(time) %>% rlang::as_string()
time <- rlang::enquo(time)
}
time_seq <- dplyr::pull(data, !!time)
if (missing(.first)) .first <- dplyr::first(time_seq)
if (missing(.last)) .last <- dplyr::last(time_seq)
# Determine interval if not given
# TODO this should be its own function 'get_time_diff'
if (missing(time_diff)) {
time_diff <- time_seq %>%
lubridate::int_diff() %>%
lubridate::as.duration() %>%
vctrs::vec_count() %>%
vctrs::vec_slice(1) %>%
purrr::pluck("key", 1)
} else {
time_diff <- lubridate::duration(time_diff)
}
# Create sequence vector
full_seq <- tibble::tibble(
!!time_name := seq(.first, .last, by = time_diff)
)
dplyr::left_join(full_seq, data, by = time_name)
} |
1fd96f96db2d388e9b749927c0f9a5ec2089fd2b | f71d0b16f3e1ed1137d2caa2460818a870d3b6e7 | /ContinuationReport/ConcordanceRate/PI-HAT_HETEROZYGOSITY.R | 7303c812ecf4dca47998db505e74dbc618838bee | [] | no_license | MTutino/PhD | 1bd049ad70f78bafee8f212ac5a8cbcdb4ef6205 | 5bf26e1479220fd1a7bdc307e4c550b73aa8524e | refs/heads/master | 2020-03-29T06:03:34.630341 | 2018-11-06T12:45:09 | 2018-11-06T12:45:09 | 149,607,024 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,995 | r | PI-HAT_HETEROZYGOSITY.R | library(tidyverse)
#Set working directory
setwd("B:/MauroTutino/ConcordanceRate/")
#Load IBD data
ibd<- read.table("ibd.genome", header=T)
#Make the plot
ggheatmap <- ggplot(ibd, aes(x=ibd$FID1, y=ibd$FID2, fill=ibd$PI_HAT)) +
geom_tile(color="white") +
scale_fill_gradient2(low = "blue", high = "red", mid = "white",
midpoint = 0.5, limit = c(0,1), space = "Lab",
name="LD Correlation\nR2") +
scale_x_discrete(position="top") +
theme_bw()+
theme(axis.text.x = element_text(angle = 90, face="bold"),
axis.text.y = element_text(face="bold"),
panel.grid.major = element_blank(),
panel.border = element_blank(),
panel.background = element_blank())+
coord_fixed()
print(ggheatmap)
#Modify the plot before printing
p<-ggheatmap +
theme(
axis.title.x = element_blank(),
axis.title.y = element_blank(),
panel.grid.major = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.ticks = element_blank(),
legend.justification = c(1, 0),
legend.position = c(0.9, 0.2),
legend.direction = "horizontal")+
guides(fill = guide_colorbar(barwidth = 14, barheight = 3,
title.position = "top", title.hjust = 0.5))
# Plot the distribution of specific pi_hat
ibd[ibd$FID1 == "14350" | ibd$FID2 == 14350,] %>%
ggplot(aes(x=PI_HAT)) + geom_histogram(binwidth = .005)
ibd[ibd$FID1 == "11265" | ibd$FID2 == 11265,] %>%
ggplot(aes(x=PI_HAT)) + geom_histogram(binwidth = .005)
nrow(ibd[ibd$FID1 == 11265 | ibd$FID2 == 11265 & ibd$PI_HAT > 0,])/nrow(ibd[ibd$FID1 == 11265 | ibd$FID2 == 11265,])
nrow(ibd[ibd$FID1 == 14350 | ibd$FID2 == 14350 & ibd$PI_HAT > 0,])/nrow(ibd[ibd$FID1 == 14350 | ibd$FID2 == 14350,])
# Plot the distribution of all pi_hat > 0.5
ggplot(ibd[ibd$PI_HAT > 0,],aes(x=PI_HAT)) +
geom_histogram(binwidth = .01) +
labs(title="PI-HAT count",
x="PI-HAT",
y="Count") +
theme(plot.title = element_text(size = 28, face = "bold"),
axis.text = element_text(size=15, face = "bold"),
axis.title = element_text(size = 25, face="bold"),
legend.text = element_text(size=15, face = "bold"),
legend.title = element_text(size=15, face = "bold"))
# Create a data frame containing only the maximum pi_hats per sample
mylist<-list()
i=1
for (smpl in unique(ibd$FID1)){
mylist[[i]]<-ibd[ibd$FID1 == smpl & ibd$PI_HAT == max(ibd$PI_HAT[ibd$FID1 == smpl]),][1,1:ncol(ibd)]
i=i+1
}
df <- do.call("rbind",mylist) #combine all vectors into a matrix
df<-as.data.frame(df)
colnames(df)<- colnames(ibd)
# Get the pi-hat of only the samples that have a match in genotype
df_05<-df[df$FID1[df$PI_HAT > 0] %in% Conc_rate$sample,]
df_05<-df_05[df_05$FID1 %in% Conc_rate$sample,]
# Plot the distribution of the maximum pi_hat
ggplot(df_05,aes(x=df_05$PI_HAT)) + geom_histogram(binwidth = .01)
# Plot the distribution of the maximum pi_hat, excluding those with no match
ggplot(df_05[df_05$PI_HAT > 0,],aes(x=PI_HAT)) +
geom_histogram(binwidth = .01) +
labs(title="Max PI-HAT count",
x="Max PI-HAT/Sample",
y="Count") +
theme(plot.title = element_text(size = 28, face = "bold"),
axis.text = element_text(size=15, face = "bold"),
axis.title = element_text(size = 25, face="bold"),
legend.text = element_text(size=15, face = "bold"),
legend.title = element_text(size=15, face = "bold"))
df[df$FID1 == 35798, ]
df[df$FID1 == 11265, ]
df[df$FID1 == "11265G", ]
df[df$FID1 == 14350, ]
df[df$FID1 == "14350G", ]
setwd("B:/MauroTutino/ConcordanceRate")
Conc_rate<-read.table("Conc_rate.txt", header = T)
write.table(df_05, "Sequencing_pi_hat.txt", sep = "\t", col.names = T, row.names = F)
##### PI-HAT vs Mapped Readsn ######################
# Load mapped reads counts
setwd("B:/MauroTutino/QC_and_preprocessing")
mapped_reads_no_chr6<-read.table("Total_mapped_reads_no_chr6.txt", header = F)
colnames(mapped_reads_no_chr6)<-c("Sample_ID","M_reads")
setwd("B:/MauroTutino/ConcordanceRate")
Conc_rate<-read.table("Conc_rate.txt", header = T)
setwd("B:/MauroTutino/eQTL/")
IDs_link<-read.table("Sequecing_Cytokine_names.txt", header = F)
colnames(IDs_link)<-c("SeqID","Sample_ID")
mapped_reads_IDs<-merge(mapped_reads_no_chr6, IDs_link, by.x="Sample_ID", by.y="SeqID")
plot_concRate_mappedReads <- merge(x=mapped_reads_IDs, y=Conc_rate, by.x="Sample_ID.y", by.y = "sample")
# Load maximum PI-HATs
setwd("B:/MauroTutino/ConcordanceRate")
PI_HAT<-read.table("Sequencing_pi_hat.txt", header = T)
plot_Pi_Hat_mappedReads <-merge(mapped_reads_IDs, PI_HAT, by.x="Sample_ID.y", by.y="FID1")
# PLot PI-HAT vs Mapped reads
plot_Pi_Hat_mappedReads %>%
ggplot(aes(x=log10(M_reads), y=PI_HAT)) +
geom_point()
################ Heterozygosity #######################
#Set working directory
setwd("B:/MauroTutino/ConcordanceRate/")
#Load the heterozygosity
Het<- read.table("Sequencing_heterozygosity.txt", header=T)
#Subset to only the sequencing samples
Het<-Het[!grepl("G", Het$FID),]
# Calculate mean heterozygosity (N-O)/N :
#(Total number of non missing genotypes-Observed homozygous calls)/Total number of non missing genotypes
Het$meanHet = (Het$N.NM. - Het$O.HOM.)/Het$N.NM.
#Plot Heterozygosity
ggplot(Het,aes(x=F, y=..count..)) + #names of levels are arbitrary, putting in alphabetical order
geom_histogram(binwidth = .01)
#Load missing data info
setwd("B:/MauroTutino/VariantCalling/")
MissInd<-read_tsv("out.imiss",col_names = T)
MissInd<- MissInd %>% arrange(F_MISS)
MissInd$INDV<-factor(MissInd$INDV,levels = MissInd$INDV[order(-MissInd$F_MISS)])
# Plot Mean heterozygosity vs Missing genotypes
colors <- densCols(MissInd$F_MISS,Het$meanHet)
plot(log10(MissInd$F_MISS+0.000001),Het$meanHet, col=colors, xlim=c(-3,0),ylim=c(0,0.5),pch=20, xlab="Proportion of missing genotypes", ylab="Heterozygosity rate",axes=F)
axis(2,at=c(0,0.05,0.10,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5),tick=T)
axis(1,at=c(-3,-2,-1,0),labels=c(0.001,0.01,0.1,1))
abline(h=mean(Het$meanHet)-(3*sd(Het$meanHet)),col="RED",lty=2)
abline(h=mean(Het$meanHet)+(3*sd(Het$meanHet)),col="RED",lty=2)
abline(v=-1.522879, col="RED", lty=2)
# Plot distribution of the Mean heterozygosity +/- 3 SD
ggplot(Het,aes(x=Het$meanHet, y=..count..)) + #names of levels are arbitrary, putting in alphabetical order
geom_histogram(binwidth = .001) +
geom_vline(xintercept =mean(Het$meanHet)-(3*sd(Het$meanHet)), col="RED", lty=2) +
geom_vline(xintercept=mean(Het$meanHet)+(3*sd(Het$meanHet)), col="RED", lty=2) +
labs(title="Mean heterozygosity Distribution",
x="Mean Heterozygosity",
y="Count") +
theme(plot.title = element_text(size = 28, face = "bold"),
axis.text = element_text(size=15, face = "bold"),
axis.title = element_text(size = 25, face="bold"),
legend.text = element_text(size=15, face = "bold"),
legend.title = element_text(size=15, face = "bold"))
|
b26930c85feeee1851bb9dc80901f3873d6b0d43 | 8edf0521ebc0ca53ec618d6d220c47c851caaa71 | /man/SS_profile.Rd | 82d969c1d2bca15daf8d4bdb2e56f67da821b5f4 | [
"LicenseRef-scancode-warranty-disclaimer"
] | no_license | amart/r4ss | 9b730038ee4c4b6d38aaabe81b6ad9fddf0eb4f3 | fbccbace9a70e846401d32577aeab9f25cb31ba5 | refs/heads/master | 2021-01-17T06:03:03.172272 | 2020-10-04T01:38:14 | 2020-10-04T01:38:14 | 24,735,775 | 1 | 1 | null | null | null | null | UTF-8 | R | false | true | 6,028 | rd | SS_profile.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/SS_profile.R
\name{SS_profile}
\alias{SS_profile}
\title{Run a likelihood profile in Stock Synthesis.}
\usage{
SS_profile(
dir = "C:/myfiles/mymodels/myrun/",
masterctlfile = "control.ss_new",
newctlfile = "control_modified.ss",
linenum = NULL,
string = NULL,
profilevec = NULL,
usepar = FALSE,
globalpar = FALSE,
parfile = "ss.par",
parlinenum = NULL,
parstring = NULL,
dircopy = TRUE,
exe.delete = FALSE,
model = "ss",
extras = "-nox",
systemcmd = FALSE,
saveoutput = TRUE,
overwrite = TRUE,
whichruns = NULL,
SSversion = "3.30",
prior_check = TRUE,
read_like = TRUE,
verbose = TRUE
)
}
\arguments{
\item{dir}{Directory where input files and executable are located.}
\item{masterctlfile}{Source control file. Default = "control.ss_new"}
\item{newctlfile}{Destination for new control files (must match entry in
starter file). Default = "control_modified.ss".}
\item{linenum}{Line number of parameter to be changed. Can be used instead
of \code{string} or left as NULL.}
\item{string}{String partially matching name of parameter to be changed. Can
be used instead of \code{linenum} or left as NULL.}
\item{profilevec}{Vector of values to profile over. Default = NULL.}
\item{usepar}{Use PAR file from previous profile step for starting values?}
\item{globalpar}{Use global par file ("parfile_original_backup.sso", which is
automatically copied from original \code{parfile}) for all runs instead
of the par file from each successive run}
\item{parfile}{Name of par file to use (for 3.30 models, this needs to
remain 'ss.par'). When \code{globalpar=TRUE}, the backup copy of this
is used for all runs.}
\item{parlinenum}{Line number in par file to change.}
\item{parstring}{String in par file preceding line number to change.}
\item{dircopy}{Copy directories for each run? NOT IMPLEMENTED YET.}
\item{exe.delete}{Delete exe files in each directory? NOT IMPLEMENTED YET.}
\item{model}{Name of executable. Default = "ss".}
\item{extras}{Additional commands to use when running SS. Default = "-nox"
will reduce the amount of command-line output.}
\item{systemcmd}{Should R call SS using "system" function instead of "shell".
This may be required when running R in Emacs. Default = FALSE.}
\item{saveoutput}{Copy output .SSO files to unique names. Default = TRUE.}
\item{overwrite}{Overwrite any existing .SSO files. Default = TRUE. If FALSE,
then some runs may be skipped.}
\item{whichruns}{Optional vector of run indices to do. This can be used to
re-run a subset of the cases in situations where the function was
interrupted or some runs fail to converge. Must be a subset of 1:n, where n
is the length of profilevec.}
\item{SSversion}{SS version number. Currently only "3.24" or "3.30" are
supported, either as character or numeric values
(noting that numeric 3.30 = 3.3).}
\item{prior_check}{Check to make sure the starter file is set to include
the prior likelihood contribution in the total likelihood. Default = TRUE.}
\item{read_like}{Read the table of likelihoods from each model as it finishes.
Default = TRUE. Changing to FALSE should allow the function to play through
even if something is wrong with reading the table.}
\item{verbose}{Controls amount of info output to command line. Default =
TRUE.}
}
\description{
Iteratively changes the control file using SS_changepars.
}
\note{
The starting values used in this profile are not ideal and some models
may not converge. Care should be taken in using an automated tool like this,
and some models are likely to require rerunning with alternate starting
values.
Also, someday this function will be improved to work directly with the
plotting function \code{\link{SSplotProfile}}, but they don't yet work well
together. Thus, even if \code{\link{SS_profile}} is used, the output should
be read using \code{\link{SSgetoutput}} or by multiple calls to
\code{\link{SS_output}} before sending to \code{\link{SSplotProfile}}.
}
\examples{
\dontrun{
# note: don't run this in your main directory
# make a copy in case something goes wrong
mydir <- "C:/ss/Simple - Copy"
# the following commands related to starter.ss could be done by hand
# read starter file
starter <- SS_readstarter(file.path(mydir, 'starter.ss'))
# change control file name in the starter file
starter$ctlfile <- "control_modified.ss"
# make sure the prior likelihood is calculated
# for non-estimated quantities
starter$prior_like <- 1
# write modified starter file
SS_writestarter(starter, dir=mydir, overwrite=TRUE)
# vector of values to profile over
h.vec <- seq(0.3,0.9,.1)
Nprofile <- length(h.vec)
# run SS_profile command
profile <- SS_profile(dir=mydir, # directory
# "NatM" is a subset of one of the
# parameter labels in control.ss_new
model="ss",
masterctlfile="control.ss_new",
newctlfile="control_modified.ss",
string="steep",
profilevec=h.vec)
# read the output files (with names like Report1.sso, Report2.sso, etc.)
profilemodels <- SSgetoutput(dirvec=mydir, keyvec=1:Nprofile)
# summarize output
profilesummary <- SSsummarize(profilemodels)
# OPTIONAL COMMANDS TO ADD MODEL WITH PROFILE PARAMETER ESTIMATED
MLEmodel <- SS_output("C:/ss/SSv3.24l_Dec5/Simple")
profilemodels$MLE <- MLEmodel
profilesummary <- SSsummarize(profilemodels)
# END OPTIONAL COMMANDS
# plot profile using summary created above
SSplotProfile(profilesummary, # summary object
profile.string = "steep", # substring of profile parameter
profile.label="Stock-recruit steepness (h)") # axis label
# make timeseries plots comparing models in profile
SSplotComparisons(profilesummary,legendlabels=paste("h =",h.vec))
}
}
\seealso{
\code{\link{SSplotProfile}}, \code{\link{SSgetoutput}},
\code{\link{SS_changepars}}, \code{\link{SS_parlines}}
}
\author{
Ian Taylor
}
|
e2839b6cefa37a92145aa58e091b7050eff38918 | 08c4885facb8a0a40a995eaf989c6252fb8350b0 | /man/create_transmod_params.Rd | dc49c726e1e56e8ded9ad14bc4286c5827108064 | [] | no_license | InnovationValueInitiative/IVI-NSCLC | a1aeefb23113fe88b578e1023d21fbf0a6488254 | bafd1faa8b1887b91cd42af00b43cd5b41ee53cf | refs/heads/master | 2020-03-24T08:23:52.095148 | 2019-07-23T04:21:11 | 2019-07-23T04:21:11 | 142,594,493 | 14 | 14 | null | 2019-07-23T04:21:12 | 2018-07-27T15:29:57 | R | UTF-8 | R | false | true | 1,770 | rd | create_transmod_params.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/transmod.R
\name{create_transmod_params}
\alias{create_transmod_params}
\title{Parameters for transition model}
\usage{
create_transmod_params(n = 100, data,
params_mstate_nma = iviNSCLC::params_mstate_nma, check_covs = FALSE,
covs = NULL)
}
\arguments{
\item{n}{The number of random observations of the parameters to draw.}
\item{data}{A data table of class "expanded_hesim_data" returned from
\code{\link{create_transmod_data}}.}
\item{params_mstate_nma}{A list of \code{\link{params_surv}} objects,
where each element in the list denotes a survival distribution. Should have
the same variable names as \code{\link{params_mstate_nma}}.}
\item{check_covs}{Logical indicating whether to check that all covariates in
\code{data} are contained in \code{params}.}
\item{covs}{If \code{check_covs} is \code{TRUE}, then \code{data_covs}
cannot be \code{NULL} and must specify all of the covariates in \code{data}
that should be contained in \code{params}.}
}
\value{
A \code{\link[hesim]{params_surv}} objects from the
\href{https://hesim-dev.github.io/hesim/}{hesim} package.
}
\description{
Extract parameters from a multi-state NMA for use with the data table returned by
\code{\link{create_transmod_data}}, which are used to simulate health state
transitions with a continuous time state transition model (CTSTM).
}
\details{
The "dist" attribute from \code{data} is used to select a survival
distribution from the \code{mstate_nma} element contained in \code{params}. The
covariates for the selected survival distribution in \code{mstate_nma}
that are also contained in \code{data} are extracted.
}
\seealso{
\code{\link{create_transmod}}, \code{\link{create_transmod_data}}
}
|
7eceb0e38dc683f8173387b0ac982d929640c484 | 37b8b20c14dec7fa435f7b05c18185e57d0dd38a | /example_bayesm.R | 84468eefa5f5fd979914fe44dd629cdf1313359c | [
"MIT"
] | permissive | 01shruti/R_Progams | b3737a086c6a72425794166eec1b003079fcd390 | df6cfe2acb61e5815e01157fa87a3d598339b533 | refs/heads/master | 2021-03-19T16:21:03.432524 | 2018-02-26T14:34:20 | 2018-02-26T14:34:20 | 81,755,903 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,668 | r | example_bayesm.R | install.packages("mlogit")
library(mlogit)
data("Electricity", package = "mlogit")
Electr <- mlogit.data(Electricity, id="id", choice="choice",
varying=3:26, shape="wide", sep="")
Elec.mxl <- mlogit(choice~pf+cl+loc+wk+tod+seas|0, Electr,
rpar=c(pf='n', cl='n', loc='n', wk='n', tod='n', seas='n'),
R=100, halton=NA, print.level=0, panel=TRUE)
summary(Elec.mxl)
########################################################################################
library(bayesm)
id=levels(as.factor(Electricity$id))
nresp<-length(unique(id))
lgtdata=NULL
for (i in 1:nresp)
{
respdata=Electricity[Electricity$id==id[i],]
ty<-NULL
tdesign<-NULL
ty=respdata$choice
nobs=length(ty)
for (j in 1:nobs) {
design1<-as.matrix(respdata[j,c(3,7,11,15,19,23)])
design2<-as.matrix(respdata[j,c(4,8,12,16,20,24)])
design3<-as.matrix(respdata[j,c(5,9,13,17,21,25)])
design4<-as.matrix(respdata[j,c(6,10,14,18,22,26)])
tdesign<-rbind(tdesign,design1,design2,design3,design4)
}
lgtdata[[i]]=list(y=ty,X=as.matrix(tdesign))
}
lgtdata
mcmc=list(R=2000,keep=10)
out=rhierMnlRwMixture(Data=list(p=4,lgtdata=lgtdata),
Prior=list(ncomp=1),Mcmc=mcmc)
plot(out$loglike,type="l")
trace<-t(apply(out$betadraw,c(2,3),mean))
matplot(trace, type="l")
beta.51_200<-apply(out$betadraw[,,51:200],2,mean)
beta.101_200<-apply(out$betadraw[,,101:200],2,mean)
beta.151_200<-apply(out$betadraw[,,151:200],2,mean)
cbind(beta.51_200,beta.101_200,beta.151_200)
estimate<-apply(out$betadraw[,,101:200],c(1,2),mean)
estimate2<-cbind(matrix(id),estimate)
write.csv(estimate2, file="estimate.csv")
|
c4f72aadb550d3c1306c56aeae8bee90b1206569 | a66e9e10a8d2d8919615009054cbe40c560930cc | /man/rel_states.Rd | 2ba783b850aaeb2fb1915344c1cee9ef8ba5b951 | [] | no_license | skranz/RelationalContracts | 91035b5ae6ce0d5804688da1b98655c84fdb15a2 | e29d080e8bfff5d9033f9b0f8b86ff5e002c6369 | refs/heads/master | 2021-06-14T17:48:23.383867 | 2021-03-05T13:27:25 | 2021-03-05T13:27:25 | 161,347,415 | 4 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,149 | rd | rel_states.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/relhold.R
\name{rel_states}
\alias{rel_states}
\alias{rel_state}
\title{Add one or multiple states. Allows to specify action spaces, payoffs and state transitions via functions}
\usage{
rel_states(
g,
x,
A1 = NULL,
A2 = NULL,
pi1,
pi2,
A.fun = NULL,
pi.fun = NULL,
trans.fun = NULL,
static.A1 = NULL,
static.A2 = NULL,
static.A.fun = NULL,
static.pi1,
static.pi2,
static.pi.fun = NULL,
x.T = NULL,
pi1.form,
pi2.form,
...
)
rel_state(
g,
x,
A1 = NULL,
A2 = NULL,
pi1,
pi2,
A.fun = NULL,
pi.fun = NULL,
trans.fun = NULL,
static.A1 = NULL,
static.A2 = NULL,
static.A.fun = NULL,
static.pi1,
static.pi2,
static.pi.fun = NULL,
x.T = NULL,
pi1.form,
pi2.form,
...
)
}
\arguments{
\item{g}{a relational contracting game created with rel_game}
\item{x}{The names of the states}
\item{A1}{The action set of player 1. A named list, like \code{A1=list(e1=1:10)}, where each element is a numeric or character vector.}
\item{A2}{The action set of player 2. See A1.}
\item{pi1}{Player 1's payoff. (Non standard evaluation)}
\item{pi2}{Player 2's payoff. (Non standard evaluation)}
\item{A.fun}{Alternative to specify A1 and A2, a function that returns action sets.}
\item{pi.fun}{Alternative to specify pi1 and pi2 as formula. A vectorized function that returns payoffs directly for all combinations of states and action profiles.}
\item{trans.fun}{A function that specifies state transitions}
\item{x.T}{Relevant when solving a capped game. Which terminal state shall be set in period T onwards. By default, we stay in state x.}
\item{pi1.form}{Player 1's payoff as formula with standard evaluation}
\item{pi2.form}{Player 2's payoff as formula with standard evaluation}
}
\value{
Returns the updated game
}
\description{
Add one or multiple states. Allows to specify action spaces, payoffs and state transitions via functions
}
\section{Functions}{
\itemize{
\item \code{rel_state}: rel_state is just a synonym for the rel_states. You may want to use it if you specify just a single state.
}}
|
5f193733ddc58355d6bf2deb203782360f16a579 | 7f04f1db9801f0c5dbe26c854412b4537ea26961 | /R/barplot_fixed.r | 3733affabc972d3cc114d2f571e3edf35c73c87e | [] | no_license | guilhermesena1/abismal_benchmark | 5037d6501600cf01811da65e3ad9a00c7c8e8fa7 | e1f3735c9fb4e49da51d4a99219847f862b41a8d | refs/heads/master | 2023-04-14T14:28:11.511423 | 2021-09-01T01:42:17 | 2021-09-01T01:42:17 | 311,887,623 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,210 | r | barplot_fixed.r | barplot.default <- function (height, width = 1, space = NULL, names.arg = NULL,
legend.text = NULL, beside = FALSE, horiz = FALSE, density = NULL,
angle = 45, col = NULL, border = par("fg"), main = NULL,
sub = NULL, xlab = NULL, ylab = NULL, xlim = NULL, ylim = NULL,
xpd = TRUE, log = "", axes = TRUE, axisnames = TRUE, cex.axis = par("cex.axis"),
cex.names = par("cex.axis"), inside = TRUE, plot = TRUE,
axis.lty = 0, offset = 0, add = FALSE, ann = !add && par("ann"),
args.legend = NULL, ...)
{
if (!missing(inside))
.NotYetUsed("inside", error = FALSE)
if (is.null(space))
space <- if (is.matrix(height) && beside)
c(0, 1)
else 0.2
space <- space * mean(width)
if (plot && axisnames && is.null(names.arg))
names.arg <- if (is.matrix(height))
colnames(height)
else names(height)
if (is.vector(height) || (is.array(height) && (length(dim(height)) ==
1))) {
height <- cbind(height)
beside <- TRUE
if (is.null(col))
col <- "grey"
}
else if (is.matrix(height)) {
if (is.null(col))
col <- gray.colors(nrow(height))
}
else stop("'height' must be a vector or a matrix")
if (is.logical(legend.text))
legend.text <- if (legend.text && is.matrix(height))
rownames(height)
stopifnot(is.character(log))
logx <- logy <- FALSE
if (log != "") {
logx <- length(grep("x", log)) > 0L
logy <- length(grep("y", log)) > 0L
}
if ((logx || logy) && !is.null(density))
stop("Cannot use shading lines in bars when log scale is used")
NR <- nrow(height)
NC <- ncol(height)
if (beside) {
if (length(space) == 2)
space <- rep.int(c(space[2L], rep.int(space[1L],
NR - 1)), NC)
width <- rep_len(width, NR)
}
else {
width <- rep_len(width, NC)
}
offset <- rep_len(as.vector(offset), length(width))
delta <- width/2
w.r <- cumsum(space + width)
w.m <- w.r - delta
w.l <- w.m - delta
log.dat <- (logx && horiz) || (logy && !horiz)
if (log.dat) {
if (min(height + offset, na.rm = TRUE) <= 0)
stop("log scale error: at least one 'height + offset' value <= 0")
if (logx && !is.null(xlim) && min(xlim) <= 0)
stop("log scale error: 'xlim' <= 0")
if (logy && !is.null(ylim) && min(ylim) <= 0)
stop("log scale error: 'ylim' <= 0")
rectbase <- if (logy && !horiz && !is.null(ylim))
ylim[1L]
else if (logx && horiz && !is.null(xlim))
xlim[1L]
else 0.9 * min(height, na.rm = TRUE)
}
else rectbase <- 0
if (!beside)
height <- rbind(rectbase, apply(height, 2L, cumsum))
rAdj <- offset + (if (log.dat)
0.9 * height
else -0.01 * height)
delta <- width/2
w.r <- cumsum(space + width)
w.m <- w.r - delta
w.l <- w.m - delta
if (horiz) {
if (is.null(xlim))
xlim <- range(rAdj, height + offset, na.rm = TRUE)
if (is.null(ylim))
ylim <- c(min(w.l), max(w.r))
}
else {
if (is.null(xlim))
xlim <- c(min(w.l), max(w.r))
if (is.null(ylim))
ylim <- range(rAdj, height + offset, na.rm = TRUE)
}
if (beside)
w.m <- matrix(w.m, ncol = NC)
if (plot) {
dev.hold()
opar <- if (horiz)
par(xaxs = "i", xpd = xpd)
else par(yaxs = "i", xpd = xpd)
on.exit({
dev.flush()
par(opar)
})
if (!add) {
plot.new()
plot.window(xlim, ylim, log = log, ...)
}
xyrect <- function(x1, y1, x2, y2, horizontal = TRUE,
...) {
if (horizontal)
rect(x1, y1, x2, y2, ...)
else rect(y1, x1, y2, x2, ...)
}
if (beside)
xyrect(rectbase + offset, w.l, c(height) + offset,
w.r, horizontal = horiz, angle = angle, density = density,
col = col, border = border)
else {
for (i in 1L:NC) {
xyrect(height[1L:NR, i] + offset[i], w.l[i],
height[-1, i] + offset[i], w.r[i], horizontal = horiz,
angle = angle, density = density, col = col,
border = border)
}
}
if (axisnames && !is.null(names.arg)) {
at.l <- if (length(names.arg) != length(w.m)) {
if (length(names.arg) == NC)
colMeans(w.m)
else stop("incorrect number of names")
}
else w.m
axis(if (horiz)
2
else 1, at = at.l, labels = names.arg, lty = axis.lty,
cex.axis = cex.names, ...)
}
if (!is.null(legend.text)) {
legend.col <- rep_len(col, length(legend.text))
if ((horiz & beside) || (!horiz & !beside)) {
legend.text <- rev(legend.text)
legend.col <- rev(legend.col)
density <- rev(density)
angle <- rev(angle)
}
xy <- par("usr")
if (is.null(args.legend)) {
legend(xy[2L] - xinch(0.1), xy[4L] - yinch(0.1),
legend = legend.text, angle = angle, density = density,
fill = legend.col, xjust = 1, yjust = 1)
}
else {
args.legend1 <- list(x = xy[2L] - xinch(0.1),
y = xy[4L] - yinch(0.1), legend = legend.text,
angle = angle, density = density, fill = legend.col,
xjust = 1, yjust = 1)
args.legend1[names(args.legend)] <- args.legend
do.call("legend", args.legend1)
}
}
if (ann)
title(main = main, sub = sub, xlab = xlab, ylab = ylab,
...)
if (axes)
axis(if (horiz)
1
else 2, cex.axis = cex.axis, ...)
invisible(w.m)
}
else w.m
}
|
59a73162c1ee4fdb489958ead4733b51b31cf7c5 | 746610d7fe75e163cf9344622cbec7ebdad29355 | /Plot4.R | dbe1607046e879498f0101217106573e3de03ea6 | [] | no_license | LeonardoSanna/Exploratory-Data-Analysis-Final-Project | af53dfd21ffadd18b35b7507a4cd10e47ba7981e | ce4bd86e32475fcdaf90811bd38ceb7d8a12e553 | refs/heads/master | 2022-06-18T11:26:29.659865 | 2020-05-08T22:53:24 | 2020-05-08T22:53:24 | 262,367,860 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 876 | r | Plot4.R | library(dplyr)
library(ggplot2)
#download file and unzip file
data_source <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
file <- "exploratory-final.zip"
download.file(data_source, destfile = file)
dataset <- unzip(file)
source <- readRDS("Source_Classification_Code.rds")
summary <- readRDS("summarySCC_PM25.rds")
#question 4
source_coal <- source %>%
filter(grepl("coal", EI.Sector, ignore.case=TRUE)) %>%
select(SCC)
coal_by_year <- summary %>%
filter(SCC %in% source_coal$SCC) %>%
group_by(year) %>%
summarize(tot_em = sum(Emissions))
ggplot(coal_by_year, aes(x=year, y=tot_em/1000)) +
geom_bar(stat="identity", fill = "#660000") +
labs(x="Year", y="PM2.5 Emissions (Tons*1000)") +
ggtitle("USA PM2.5 Emissions from Coal Sources")
dev.copy(png, "plot4.png")
dev.off()
|
04d799f3908342bbd6a797735819a02f20d02335 | ff1ef9ad06dc81de72ac27c95d4142fb921eeb96 | /man/glob_obs_vec.Rd | 0b2b85a11e00bdb7d2c76b5c54c05cbad0a9e4b5 | [
"MIT"
] | permissive | be-green/fqr | 03b83f077644c2d1ea00209eea7eca68dacb9068 | 5ecbb91610be80d3bcc1bb8264c62b9985e4032a | refs/heads/main | 2023-08-02T03:06:32.988501 | 2021-10-02T15:29:08 | 2021-10-02T15:29:08 | 367,083,805 | 2 | 1 | null | null | null | null | UTF-8 | R | false | true | 431 | rd | glob_obs_vec.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{glob_obs_vec}
\alias{glob_obs_vec}
\title{Glob observations w/ residuals above a certain magnitude}
\usage{
glob_obs_vec(y, r, thresh)
}
\arguments{
\item{y}{design matrix}
\item{r}{vector of residuals}
\item{thresh}{threshold to use when computing globs}
}
\description{
Glob observations w/ residuals above a certain magnitude
}
|
67aa4ee8d155f0a73b6d41751673ed5048a60c2d | 5b035c7fad4414d89d8a57d3840c4ee1cbb2d837 | /src/field_data_plots.R | 86994bd25d9136a1deb6cf5ef8b520b11423fba6 | [] | no_license | jbukoski/thailand_mangroves | 00a98330a454e65c40ad969d63de9d05543bc2d8 | 88ba36be229fab128f4c05c8479abe79cd720c38 | refs/heads/master | 2021-04-06T08:51:20.386494 | 2018-12-22T09:48:25 | 2018-12-22T09:48:25 | 124,941,309 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,794 | r | field_data_plots.R | # R file to generate plots corresponding to forest structure and C stocks
library(tidyverse)
library(ggthemes)
library(gridExtra)
# Specify data directory
data_dir <- "/home/jbukoski/research/data/thailand_stocks/output/"
#----------------------------------------------------
# Read in data summaries to plot
c_summary <- read_csv(paste0(data_dir, "c_summary.csv"))
#----------------------------------------------------
# Plot C summary values
c_2_plot <- c_summary %>%
rowwise() %>%
mutate(ttl = ifelse(site == "Krabi" | site == "Nakorn",
agc_avg + bgc_avg + cwd_avg + soc_avg,
agc_avg + bgc_avg + soc_avg),
ttl_se = ifelse(site == "Krabi" | site == "Nakorn",
sqrt(sum(agc_se^2 + bgc_se^2 + cwd_se^2 + soc_se^2)),
sqrt(sum(agc_se^2 + bgc_se^2 + soc_se^2)))) %>%
gather(-site, -agc_se, -bgc_se, -cwd_se, -soc_se, -ttl_se,
key = "pool", value = "mean") %>%
gather(-site, -pool, -mean, key = "se_pool", value = "se") %>%
arrange(site, pool) %>%
separate(pool, into = c("pool", "trash"), sep = "_") %>%
select(-trash) %>%
separate(se_pool, into = c("pool_se", "trash"), sep = "_") %>%
select(-trash) %>%
filter(pool == pool_se) %>%
select(-pool_se) %>%
rowwise %>%
mutate(site = ifelse(site == "krabi_aqua", "KRE Aquaculture",
ifelse(site == "Krabi", "KRE Mangrove",
ifelse(site == "Nakorn", "PPM Mangrove", "PPM Aquaculture")))) %>%
add_row(site = "PPM Aquaculture", pool = "agc")
c_2_plot_err <- c_2_plot %>%
filter(pool == "ttl")
c_2_plot %>%
filter(pool != "ttl") %>%
ggplot(aes(x = site, y = mean)) +
geom_errorbar(data = c_2_plot_err, aes(ymin = mean - 10, ymax = mean + se), width = 0.05) +
geom_bar(aes(fill = pool), stat = "identity", width = 0.5, color = "black") +
scale_fill_colorblind(labels = c("Aboveground biomass", "Belowground biomass",
"Coarse woody debris", "Soil organic carbon")) +
theme_bw() +
theme(legend.position = "bottom",
axis.title.x = element_blank()) +
ylab("Organic C (Mg/ha)") +
guides(fill = guide_legend(title = NULL)) +
scale_y_continuous(breaks = seq(0, 1400, by = 200))
#-------------------------------------------------------------
# Plot soil parameters by depth interval
soil <- read_csv(paste0(data_dir, "soil_intervals.csv")) %>%
mutate(depth_int = ifelse(interval == 1, -15,
ifelse(interval == 2, -30,
ifelse(interval == 3, -50,
ifelse(interval == 4, -100, -200))))) %>%
rename(c_dens = int_c_dens,
c_dens_se = int_c_dens_se) %>%
mutate(c_dens = c_dens * 1000,
c_dens_se = c_dens_se * 1000)
p_bd <- soil %>%
select(site, interval, bd, bd_se, depth_int) %>%
mutate(interval = interval * -1) %>%
filter(site == "Krabi") %>%
ggplot(aes(y = depth_int, x = bd)) +
geom_errorbarh(data = filter(soil, site == "Krabi"),
aes(xmax = bd + bd_se, xmin = bd - bd_se), height = 3, color = "blue") +
geom_point(data = filter(soil, site == "Krabi"), color = "blue", shape = 19) +
geom_path(data = filter(soil, site == "Krabi"), color = "blue") +
geom_errorbarh(data = filter(soil, site == "krabi_aqua"),
aes(xmax = bd + bd_se, xmin = bd - bd_se), height = 3, color = "blue") +
geom_point(data = filter(soil, site == "krabi_aqua"), color = "blue", shape = 19) +
geom_path(data = filter(soil, site == "krabi_aqua"), linetype = 2, color = "blue" ) +
geom_errorbarh(data = filter(soil, site == "Nakorn"),
aes(xmax = bd + bd_se, xmin = bd - bd_se), height = 3) +
geom_point(data = filter(soil, site == "Nakorn"), shape = 4, fill = "black") +
geom_path(data = filter(soil, site == "Nakorn")) +
theme_tufte() +
scale_y_continuous(breaks = seq(0, -200, by = -25),
labels = c("0", "", "50", "", "100", "", "150", "", "200")) +
scale_x_continuous(breaks = seq(0.25, 1.5, by = 0.25), position = "top") +
ylab("Depth (cm)") +
xlab(expression("Bulk Density (g cm"^-3*")")) +
theme(axis.line.x = element_line(color="black", size = 0.2),
axis.line.y = element_line(color="black", size = 0.2))
p_poc <- soil %>%
select(site, interval, poc, poc_se, depth_int) %>%
mutate(interval = interval * -1) %>%
filter(site == "Krabi") %>%
ggplot(aes(y = depth_int, x = poc)) +
geom_errorbarh(data = filter(soil, site == "Krabi"),
aes(xmax = poc + poc_se, xmin = poc - poc_se), height = 3, color = "blue") +
geom_point(data = filter(soil, site == "Krabi"), color = "blue", shape = 19) +
geom_path(data = filter(soil, site == "Krabi"), color = "blue") +
geom_errorbarh(data = filter(soil, site == "krabi_aqua"),
aes(xmax = poc + poc_se, xmin = poc - poc_se), height = 3, color = "blue") +
geom_point(data = filter(soil, site == "krabi_aqua"), color = "blue", shape = 19) +
geom_path(data = filter(soil, site == "krabi_aqua"), linetype = 2, color = "blue" ) +
geom_errorbarh(data = filter(soil, site == "Nakorn"),
aes(xmax = poc + poc_se, xmin = poc - poc_se), height = 3) +
geom_point(data = filter(soil, site == "Nakorn"), shape = 4, fill = "black") +
geom_path(data = filter(soil, site == "Nakorn")) +
theme_tufte() +
ylab("Depth (cm)") +
xlab(expression("Percent Organic Carbon (%)"^{ }*"")) +
scale_y_continuous(breaks = seq(0, -200, by = -25)) +
scale_x_continuous(breaks = seq(1, 6, by = 1), position = "top") +
theme(axis.title.x = element_text(margin = margin(t = 2000)),
axis.line.x = element_line(color="black", size = 0.2),
axis.line.y = element_line(color="black", size = 0.2),
axis.title.y = element_blank(),
axis.text.y = element_blank())
p_c_dens <- soil %>%
select(site, interval, c_dens, c_dens_se, depth_int) %>%
mutate(interval = interval * -1) %>%
ggplot(aes(y = depth_int, x = c_dens)) +
geom_errorbarh(data = filter(soil, site == "Krabi"),
aes(xmax = c_dens + c_dens_se, xmin = c_dens - c_dens_se), height = 3, color = "blue") +
geom_point(data = filter(soil, site == "Krabi"), color = "blue", shape = 19) +
geom_path(data = filter(soil, site == "Krabi"), color = "blue") +
geom_errorbarh(data = filter(soil, site == "krabi_aqua"),
aes(xmax = c_dens + c_dens_se, xmin = c_dens - c_dens_se), height = 3, color = "blue") +
geom_point(data = filter(soil, site == "krabi_aqua"), color = "blue", shape = 19) +
geom_path(data = filter(soil, site == "krabi_aqua"), linetype = 2, color = "blue" ) +
geom_errorbarh(data = filter(soil, site == "Nakorn"),
aes(xmax = c_dens + c_dens_se, xmin = c_dens - c_dens_se), height = 3) +
geom_point(data = filter(soil, site == "Nakorn"), shape = 4, fill = "black") +
geom_path(data = filter(soil, site == "Nakorn")) +
theme_tufte() +
ylab("Depth (cm)") +
xlab(expression("Soil Carbon Density (mg C cm"^-3*")")) +
scale_x_continuous(position = "top", breaks = seq(0, 40, by = 5)) +
scale_y_continuous(breaks = seq(0, -200, by = -25)) +
theme(axis.line.x = element_line(color="black", size = 0.2),
axis.line.y = element_line(color="black", size = 0.2),
axis.title.y = element_blank(),
axis.text.y = element_blank())
grid.arrange(p_bd, p_poc, p_c_dens, nrow = 1)
#-----------------------------------------------------------------------------
# Generate plots
c_2_plot %>%
filter(plot <= 7) %>%
ggplot(aes(x = plot, y = plot_soc, col = site)) +
geom_point(size = 3) +
geom_errorbar(aes(ymin = min_soc,
ymax = max_soc), width = 0.15,
size = 1.1) +
labs(y = "Soil Organic Carbon (Mg C/ha)",
x = "Plot") +
theme_bw() +
theme(text = element_text(size = 24)) +
ylim(0, 1500) +
scale_colour_discrete(name="Site",
labels = c("Krabi", "Nakorn"))
soil_site %>%
ggplot(aes(x = site, y = soc)) +
geom_errorbar(aes(ymin = soc - soc_se,
ymax = soc + soc_se), width = 0.05) +
geom_point() +
ggtitle("Soil Carbon Plot Means +/- 1 St. Error") +
ylab("Soil Organic Carbon (Mg/ha)") +
xlab("Site") +
theme_bw() +
ylim(0, 1000)
#------------------------------------------------------------------------------
# Add other relevant parameters to characterize the plot (i.e. mean DBH and Tons ag.biomass)
error_summary <- trees_ci %>%
left_join(soil_site, by = c("site" = "site")) %>%
left_join(site_cwd, by = c("site" = "site")) %>%
dplyr::select(site, ag_se, bg_se, soc_se, cwd_se) %>%
mutate(ag_se = ag_se,
bg_se = bg_se,
soc_se = soc_se,
cwd_se = cwd_se) %>%
gather(pool, error, -site) %>%
arrange(site) %>%
mutate(pool = rep(c("agc", "bgc", "soc", "cwd"), 2))
summary <- trees_ci %>%
dplyr::select(site, mean_biomass, ag_mean, bg_mean) %>%
left_join(soil_site, by = c("site" = "site")) %>%
left_join(site_cwd, by = c("site" = "site")) %>%
mutate(bgc = bg_mean *.46 * -1,
agc = ag_mean *.46,
soc = soc * -1,
cwd = cwd * 0.5) %>%
dplyr::select(site, agc, bgc, soc, cwd) %>%
gather(pool, value, -site) %>%
left_join(error_summary, by = c("site", "pool")) %>%
mutate(error2 = error)
##---------------------------------------------------------------------------------
## Visualizing the data
# Ordering of stacked bars is determined by levels of the factor
test_dat <- tibble(
site = c(rep("Krabi", 4), rep("Nakorn", 4)),
pool = rep(c(rep("Above", 2), rep("Below", 2)), 2),
min_err = c(72.3, NA, -888.9 - 53.41, NA, 108 + 8.10, NA, -39.9 - 251 - 18 - 8.17, NA),
max_err = c(72.3 + 18.88, NA, -888.9, NA, 108 + 8.10 + 23.2 + 2.85, NA, -39.9 - 251, NA)
)
p1 <- summary %>%
mutate(pool = factor(pool, levels = c("agc", "cwd", "soc", "bgc"))) %>%
ggplot(aes(x = site, y = value, fill = pool)) +
geom_bar(stat = "identity", width = 0.3) +
geom_bar(stat = "identity", width = 0.3, color="black", show.legend=FALSE)+
theme_bw() +
xlab("Site") +
ylab("Carbon storage (Mg C/ha)") +
ylim(-1000, 200) +
scale_fill_discrete(name="Ecosystem C Pools",
breaks = c("agc", "cwd", "bgc", "soc"),
labels = c("Aboveground Biomass",
"Coarse Woody Debris",
"Belowground Biomass",
"Soil Organic Carbon")) +
theme(text = element_text(size = 22))
p1 +
geom_linerange(aes(x = test_dat$site,
ymin = test_dat$min_err,
ymax = test_dat$max_err)) +
geom_segment(aes(x = 0.95, xend = 1.05, y = -888.9 - 53.41, yend = -888.9 - 53.41)) +
geom_segment(aes(x = 0.95, xend = 1.05, y = 72.3 + 18.88, yend = 72.3 + 18.88)) +
geom_segment(aes(x = 1.95, xend = 2.05,
y = -39.9 - 251 - 18 - 8.17,
yend = -39.9 - 251 - 18 - 8.17)) +
geom_segment(aes(x = 1.95, xend = 2.05,
y = 108 + 8.10 + 23.2 + 2.85,
yend = 108 + 8.10 + 23.2 + 2.85))
#-----------------------------------------------------------------------------------
# Build a summary table for visualizations of the data at the subplot level
sp_summary <- trees %>%
group_by(site, plot, subplot) %>%
left_join(site_areas, by = "site") %>%
mutate(agb_tot = area*(sum(agb)/(pi*(7^2))),
agb_ha = (10*agb_tot)/area,
bgb_tot = area*(sum(bgb)/(pi*(7^2))),
bgb_ha = (10*bgb_tot)/area,
n_sps = nlevels(factor(sps_code))) %>%
left_join(meta, by = c("site" = "Site", "plot" = "Plot", "subplot" = "Subplot")) %>%
left_join(soil_summary, by = c("site", "plot", "subplot")) %>%
mutate(tot_c = soc + agb_ha + bgb_ha,
distance = `Distance from shoreline`) %>%
dplyr::select(site, plot, subplot, agb_tot, agb_ha, bgb_tot, bgb_ha, soc, tot_c, distance, n_sps) %>%
distinct()
trees %>%
filter(plot <= 7) %>%
group_by(site, plot) %>%
summarize(n = n(),
agb_se = sd(agb)/sqrt(n) * 0.43,
agb = 10000*sum(agb)/plot_size/1000 * 0.43,
bgb_se = sd(bgb)/sqrt(n) * 0.43,
bgb = 10000*sum(bgb)/plot_size/1000 * 0.43) %>%
ggplot(aes(x = plot, y = agb + bgb, col = site)) +
geom_point(size = 3) +
geom_errorbar(aes(ymin = agb + bgb - agb_se - bgb_se,
ymax = agb + bgb + agb_se + bgb_se), width = 0.15,
size = 1.1) +
labs(y = "Biomass Carbon (Mg C/ha)",
x = "Plot") +
theme_bw() +
theme(text = element_text(size = 24)) +
scale_colour_discrete(name="Site",
labels = c("Krabi", "Nakorn"))
|
821929e639cc022b2a09b04dbf064b31db98c7fe | 5a5da8fd1b51f17c5a20c6002b7993dbe61a55b3 | /man/tnf_train.Rd | 07e938f6e9af0f05d2c5de054186b0894fc0397f | [] | no_license | news-r/decipher | 5c03ef30a8d58a5bdd7388a9b05e02cb10b0b3d8 | eb002682d761368301f52e19f8cb87cc864c1468 | refs/heads/master | 2020-04-05T00:45:18.260373 | 2019-07-16T20:33:37 | 2019-07-16T20:33:37 | 156,409,227 | 3 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,936 | rd | tnf_train.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/TokenNameFinder.R
\name{tnf_train_}
\alias{tnf_train_}
\alias{tnf_train}
\title{Train name finder model}
\usage{
tnf_train_(model, lang, data, feature.gen = NULL, name.types = NULL,
sequence.codec = NULL, factory = NULL, resources = NULL,
params = NULL, encoding = NULL, type = NULL)
tnf_train(model, lang, data, feature.gen = NULL, name.types = NULL,
sequence.codec = NULL, factory = NULL, resources = NULL,
params = NULL, encoding = NULL, type = NULL)
}
\arguments{
\item{model}{Full path to output model file.}
\item{lang}{Language which is being processed.}
\item{data}{Data to be used, full path to file, usually \code{.txt}.}
\item{feature.gen}{Path to the feature generator descriptor file.}
\item{name.types}{Name types to use for training.}
\item{sequence.codec}{sequence codec used to code name spans.}
\item{factory}{A sub-class of \code{TokenNameFinderFactory}.}
\item{resources}{The resources directory.}
\item{params}{Training parameters file.}
\item{encoding}{Encoding for reading and writing text, if absent the system default is used.}
\item{type}{The type of the token name finder model.}
}
\value{
Full path to the \code{model} for convenience.
}
\description{
Train a name finder model.
}
\examples{
\dontrun{
# get working directory
# need to pass full path
wd <- getwd()
# Training to find "WEF"
data <- paste("This organisation is called the <START:wef> World Economic Forum <END>",
"It is often referred to as <START:wef> Davos <END> or the <START:wef> WEF <END> .")
# train model
tnf_train(model = paste0(wd, "/model.bin"), lang = "en", data = data)
# Same with .txt files.
# Save the above as file
write(data, file = "input.txt")
# Trains the model and returns the full path to the model
model <- tnf_train_(model = paste0(wd, "/wef.bin"), lang = "en",
data = paste0(wd, "/input.txt"), type = "wef")
}
}
|
90e3be8ee7f6a8fc0b49e6e91ecb27db594387c3 | 56b8468245923e5d648e5e254266d8945fe1dbfa | /simulation/SimpleTest3 Time complexity of Optimal Partitioning and PELT.R | 442fbe1c69e44c7d6ec02fe43d8d4e64ee766d90 | [] | no_license | lpishchagina/OptPartitioning2D | 4c2e71d66001690b6ca3f0016c16a7c3e2d78d27 | 25f15670b6bff2a2f2f104d2b8989c66fcdecf03 | refs/heads/master | 2023-02-28T03:10:39.450800 | 2021-02-07T12:33:04 | 2021-02-07T12:33:04 | 332,521,131 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,653 | r | SimpleTest3 Time complexity of Optimal Partitioning and PELT.R | #Time complexity. Optimal Partitioning and PELT algorithms
devtools::install_github("lpishchagina/OptPartitioning2D")
library(OptPartitioning2D)
#Function OneStep returns the execution time of a given algorithm
OneStep <- function(data1, data2, penalty, type, func = "OptPart2D")
{
if(type == "null"){t <- system.time(OptPart2D(data1, data2, penalty, type = "null"))[[1]]}
if(type == "pruning"){t <- system.time(OptPart2D(data1, data2, penalty, type = "pruning"))[[1]]}
return(t)
}
#Test5: One time complexity test
library(microbenchmark)
library("ggplot2")
T5_n <- 5000
T5_chp <- seq(from = 100, to = T5_n, by = 100)
T5_chp
T5_mu1 <- rpois(T5_n/100, 10)
T5_mu1
T5_mu2 <- rpois(T5_n/100, 5)
T5_mu2
T5_sigma <- 1
T5_penalty <- 2 * T5_sigma * log(T5_n)
T5_data <- GenData2D(T5_n, T5_chp, T5_mu1, T5_mu2, T5_sigma, T5_sigma)
T5_timeOptPart <- OneStep(T5_data[1,], T5_data[2,], T5_penalty, type = "null",func = "OptPart2D")
T5_timePELT <- OneStep(T5_data[1,], T5_data[2,], T5_penalty, type = "pruning",func = "OptPart2D")
T5_timeOptPart
#[1]0.16
T5_timePELT
#[1] 0.03
T5_timeOptPart/T5_timePELT
#[1] 5.333333
##Test6: Iterations ( T5_data )
T6_nStep <- 10
T6_timeOptPart <- 0
T6_timePELT <- 0
for(i in 1:T6_nStep){T6_timeOptPart <- T6_timeOptPart + OneStep(T5_data[1,], T5_data[2,], T5_penalty, type = "null",func = "OptPart2D")}
for(i in 1:T6_nStep){T6_timePELT <- T6_timePELT + OneStep(T5_data[1,], T5_data[2,], T5_penalty, type = "pruning",func = "OptPart2D")}
T6_timeOptPart/T6_timePELT
#[1] 9.25
##Test7: microbenchmark ( T5_data, T7_data )
library(microbenchmark)
library("ggplot2")
T7_n <- 10000
T7_chp <- seq(from = 100, to = T7_n, by = 100)
T7_chp
T7_mu1 <- rpois(T7_n/100, 10)
T7_mu1
T7_mu2 <- rpois(T7_n/100, 5)
T7_mu2
T7_sigma <- 1
T7_penalty <- 2 * T7_sigma * log(T7_n)
T7_data <- GenData2D(T7_n, T7_chp, T7_mu1, T7_mu2, T7_sigma, T7_sigma)
# T5_n = 5000
T7_resT5_n <- microbenchmark( OneStep(T5_data[1,], T5_data[2,], T5_penalty, type = "null",func = "OptPart2D"), OneStep(T5_data[1,], T5_data[2,], T5_penalty, type = "pruning",func = "OptPart2D"), times = 50)
T7_resT5_n
#Unit: milliseconds
# expr
# OneStep(T5_data[1, ], T5_data[2, ], T5_penalty, type = "null", func = "OptPart2D")
# OneStep(T5_data[1, ], T5_data[2, ], T5_penalty, type = "pruning", func = "OptPart2D")
# min lq mean median uq max neval
# 258.2255 273.5639 351.7033 298.9834 373.9682 623.9380 50
# 112.8713 128.7546 158.5457 141.4346 203.9433 224.7527 50
autoplot(T7_resT5_n)
# T7_n = 10000
T7_resT7_n <- microbenchmark( OneStep(T7_data[1,], T7_data[2,], T7_penalty, type = "null",func = "OptPart2D"), OneStep(T7_data[1,], T7_data[2,], T7_penalty, type = "pruning",func = "OptPart2D"), times = 50)
T7_resT7_n
Unit: milliseconds
# expr min
# OneStep(T7_data[1, ], T7_data[2, ], T7_penalty, type = "null", func = "OptPart2D") 1378.7331
# OneStep(T7_data[1, ], T7_data[2, ], T7_penalty, type = "pruning", func = "OptPart2D") 179.5884
# lq mean median uq max neval
# 1518.303 2099.470 1672.9987 2099.0085 6110.8055 50
# 202.246 234.018 221.5641 260.0976 337.5985 50
autoplot(T7_resT7_n)
#estimate the difference in running time
##Test8: Time complexity (the plot of the mean running time with respect to data length).
#Run T8_nRep times Optimal Partitioning and PELT algorithms of each value of the vector_n vector of length T8_nStep. We show the plot of the mean running time with respect to data length.
#
#
T8_nStep <- 10
T8_vect_n <- seq(from = 1000, to = 10000, length.out = T8_nStep)
T8_vect_n
#[1] 1000 2000 3000 4000 5000 6000 7000 8000 9000 10000
T8_nRep <- 10
T8_resOptPart <- data.frame(matrix(0, T8_nStep, T8_nRep + 1))
colnames(T8_resOptPart) <- c("n", paste0("Rep",1:T8_nRep))
T8_resPELT <- data.frame(matrix(0, T8_nStep, T8_nRep + 1))
colnames(T8_resPELT) <- c("n", paste0("Rep",1:T8_nRep))
T8_sigma <- 1
for(i in 1:length(T8_vect_n))
{
T8_chp <- seq(from = 100, to = T8_vect_n[i], by = 100)
T8_mu1 <- rpois(T8_vect_n[i]/100, 10)
T8_mu2 <- rpois(T8_vect_n[i]/100, 5)
T8_penalty <- 2 * T8_sigma * log(T8_vect_n[i])
T8_data <- GenData2D(T8_vect_n[i], T8_chp, T8_mu1, T8_mu2, T8_sigma, T8_sigma)
T8_resOptPart[i,] <- c(T8_vect_n[i], replicate(T8_nRep, OneStep(T8_data[1,], T8_data[2,], T8_penalty, type = "null",func = "OptPart2D")))
T8_resPELT[i,] <- c(T8_vect_n[i], replicate(T8_nRep, OneStep(T8_data[1,], T8_data[2,], T8_penalty, type = "pruning",func = "OptPart2D")))
}
T8_resOptPart
# n Rep1 Rep2 Rep3 Rep4 Rep5 Rep6 Rep7 Rep8 Rep9 Rep10
#1 1000 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
#2 2000 0.07 0.03 0.04 0.03 0.01 0.03 0.03 0.03 0.04 0.04
#3 3000 0.05 0.03 0.03 0.04 0.06 0.03 0.03 0.05 0.06 0.05
#4 4000 0.11 0.09 0.10 0.07 0.09 0.11 0.11 0.11 0.11 0.11
#5 5000 0.16 0.19 0.18 0.16 0.20 0.14 0.19 0.14 0.16 0.16
#6 6000 0.23 0.22 0.20 0.25 0.18 0.25 0.31 0.30 0.25 0.28
#7 7000 0.37 0.45 0.47 0.44 0.49 0.42 0.45 0.45 0.53 0.49
#8 8000 0.78 0.79 0.79 0.73 0.78 0.86 0.73 0.83 0.82 0.77
#9 9000 0.99 0.97 1.02 1.00 0.98 1.04 1.09 1.47 1.18 1.09
#10 10000 1.55 1.65 1.51 1.75 1.44 1.46 1.45 1.50 1.39 1.49
T8_resPELT
# n Rep1 Rep2 Rep3 Rep4 Rep5 Rep6 Rep7 Rep8 Rep9 Rep10
#1 1000 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
#2 2000 0.00 0.01 0.02 0.00 0.00 0.01 0.00 0.00 0.01 0.00
#3 3000 0.02 0.00 0.00 0.02 0.00 0.00 0.00 0.02 0.01 0.01
#4 4000 0.01 0.01 0.00 0.00 0.01 0.00 0.01 0.04 0.00 0.02
#5 5000 0.02 0.02 0.02 0.03 0.00 0.03 0.03 0.02 0.03 0.01
#6 6000 0.03 0.01 0.05 0.00 0.03 0.01 0.05 0.03 0.05 0.03
#7 7000 0.00 0.01 0.05 0.04 0.03 0.04 0.03 0.06 0.01 0.03
#8 8000 0.06 0.06 0.03 0.03 0.01 0.04 0.06 0.06 0.05 0.01
#9 9000 0.05 0.08 0.06 0.04 0.06 0.03 0.03 0.06 0.08 0.09
#10 10000 0.06 0.08 0.08 0.04 0.03 0.09 0.07 0.06 0.05 0.07
T8_mean_OptPart <- rowMeans(T8_resOptPart[,-1])
#[1] 0.000 0.035 0.043 0.101 0.168 0.247 0.456 0.788 1.083 1.519
plot(T8_vect_n, T8_mean_OptPart, xlab = "data length", ylab = "mean time in second", main = "time complexity of Optimal Partitioning", col="red3")
lines(T8_vect_n, T8_mean_OptPart, col="red3")
T8_mean_PELT <- rowMeans(T8_resPELT[,-1])
#[1] 0.000 0.005 0.008 0.010 0.021 0.029 0.030 0.041 0.058 0.063
plot(T8_vect_n, T8_mean_PELT, xlab = "data length", ylab = "mean time in second", main = "time complexity of PELT", col="steelblue")
lines(T8_vect_n, T8_mean_PELT, col="steelblue")
plot(T8_vect_n, T8_mean_OptPart, xlab = "data length", ylab = "mean time in second", main = "time complexity of Optimal Partitioning and PELT", col="red3")
lines(T8_vect_n, T8_mean_OptPart, col="red3")
points(T8_vect_n, T8_mean_PELT, col="steelblue")
lines(T8_vect_n, T8_mean_PELT, col="steelblue")
location = "topleft"
labels = c("Optimal Partitioning", "PELT")
colors = c("red3", "steelblue")
legend(location, labels, fill=colors)
#Test9
T9_nStep <- 10
T9_vect_n <- seq(from = 1000, to = 10000, length.out = T9_nStep)
T9_vect_n
#[1] 1000 2000 3000 4000 5000 6000 7000 8000 9000 10000
T9_vect_m <- floor(sqrt(T9_vect_n)/4)
# [1] 7 11 13 15 17 19 20 22 23 25
T9_nRep <- 10
T9_resOptPart <- data.frame(matrix(0, T9_nStep, T9_nRep + 1))
colnames(T9_resOptPart) <- c("n", paste0("Rep",1:T9_nRep))
T9_resPELT <- data.frame(matrix(0, T9_nStep, T9_nRep + 1))
colnames(T9_resPELT) <- c("n", paste0("Rep",1:T9_nRep))
T9_sigma <- 1
for(i in 1:length(T9_vect_n))
{
T9_chp <- c(sort(runif(T9_vect_m[i] - 1,0,T9_vect_n[i]-1)), T9_vect_n[i])
T9_mu1 <- rpois(T9_vect_m[i], 10)
T9_mu2 <- rpois(T9_vect_m[i], 5)
T9_penalty <- 2 * T9_sigma * log(T9_vect_n[i])
T9_data <- GenData2D(T9_vect_n[i], T9_chp, T9_mu1, T9_mu2, T9_sigma, T9_sigma)
T9_resOptPart[i,] <- c(T9_vect_n[i], replicate(T9_nRep, OneStep(T9_data[1,], T9_data[2,], T9_penalty, type = "null",func = "OptPart2D")))
T9_resPELT[i,] <- c(T9_vect_n[i], replicate(T9_nRep, OneStep(T9_data[1,], T9_data[2,], T9_penalty, type = "pruning",func = "OptPart2D")))
}
T9_resOptPart
# n Rep1 Rep2 Rep3 Rep4 Rep5 Rep6 Rep7 Rep8 Rep9 Rep10
#1 1000 0.02 0.00 0.00 0.00 0.00 0.01 0.00 0.02 0.00 0.00
#2 2000 0.05 0.05 0.04 0.05 0.05 0.03 0.03 0.03 0.03 0.01
#3 3000 0.05 0.06 0.05 0.03 0.06 0.04 0.05 0.05 0.06 0.08
#4 4000 0.13 0.13 0.12 0.10 0.11 0.08 0.10 0.09 0.10 0.09
#5 5000 0.20 0.20 0.22 0.24 0.18 0.21 0.18 0.18 0.16 0.16
#6 6000 0.29 0.29 0.30 0.32 0.31 0.28 0.26 0.25 0.25 0.28
#7 7000 0.49 0.39 0.36 0.32 0.36 0.39 0.47 0.43 0.52 0.46
#8 8000 0.98 0.84 0.81 0.82 0.94 0.97 0.90 0.86 0.98 0.88
#9 9000 1.12 1.22 1.19 1.56 1.20 1.17 1.14 1.21 1.26 1.19
#10 10000 1.65 1.57 1.54 1.78 1.67 1.59 1.61 1.56 1.58 1.72
T9_resPELT
# n Rep1 Rep2 Rep3 Rep4 Rep5 Rep6 Rep7 Rep8 Rep9 Rep10
#1 1000 0.00 0.01 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00
#2 2000 0.02 0.01 0.00 0.00 0.00 0.01 0.00 0.01 0.00 0.01
#3 3000 0.00 0.04 0.02 0.02 0.03 0.03 0.01 0.02 0.00 0.01
#4 4000 0.02 0.02 0.02 0.01 0.03 0.01 0.01 0.03 0.03 0.02
#5 5000 0.04 0.05 0.05 0.03 0.04 0.03 0.03 0.03 0.04 0.01
#6 6000 0.06 0.07 0.07 0.04 0.06 0.07 0.05 0.06 0.10 0.07
#7 7000 0.05 0.05 0.05 0.07 0.08 0.06 0.09 0.08 0.11 0.04
#8 8000 0.17 0.20 0.16 0.10 0.16 0.15 0.12 0.15 0.14 0.14
#9 9000 0.09 0.11 0.11 0.13 0.08 0.06 0.11 0.12 0.12 0.14
#10 10000 0.14 0.13 0.16 0.09 0.11 0.17 0.09 0.15 0.16 0.13
T9_mean_OptPart <- rowMeans(T9_resOptPart[,-1])
T9_mean_OptPart
# [1] 0.005 0.037 0.053 0.105 0.193 0.283 0.419 0.898 1.226 1.627
plot(T9_vect_n, T9_mean_OptPart, xlab = "data length", ylab = "mean time in second", main = "Time complexity of Optimal Partitioning", col="red3")
lines(T9_vect_n, T9_mean_OptPart, col="red3")
T9_mean_PELT <- rowMeans(T9_resPELT[,-1])
T9_mean_PELT
# [1] 0.001 0.006 0.018 0.020 0.035 0.065 0.068 0.149 0.107 0.133
plot(T9_vect_n, T9_mean_PELT, xlab = "data length", ylab = "mean time in second", main = "Time complexity of PELT", col="steelblue")
lines(T9_vect_n, T9_mean_PELT, col="steelblue")
plot(T9_vect_n, T9_mean_OptPart, xlab = "data length", ylab = "mean time in second", main = "Time complexity of Optimal Partitioning and PELT", col="red3")
lines(T9_vect_n, T9_mean_OptPart, col="red3")
points(T9_vect_n, T9_mean_PELT, col="steelblue")
lines(T9_vect_n, T9_mean_PELT, col="steelblue")
location = "topleft"
labels = c("Optimal Partitioning", "PELT")
colors = c("red3", "steelblue")
legend(location, labels, fill=colors)
|
3a9a315ab5601bab36fe5f4292c2100e3f3f71f9 | b696aa649115006e6ddc561c29c15d1f6c6e52a2 | /R/survparamsim-methods.R | 2f361a90ad773069e801dc095a6f5d9606babe14 | [] | no_license | yoshidk6/survParamSim | b120278626829e62a8045d0fda21027ee1998a2b | 5a5adc9fd14369a4417bf463504ccce4dc20c7c7 | refs/heads/master | 2023-05-26T23:29:37.392001 | 2023-05-24T01:49:10 | 2023-05-24T01:49:10 | 200,903,829 | 3 | 0 | null | 2023-03-01T20:02:28 | 2019-08-06T18:26:57 | R | UTF-8 | R | false | false | 857 | r | survparamsim-methods.R | #' Methods for S3 objects in the package
#'
#' @name survparamsim-methods
#'
NULL
#' @rdname survparamsim-methods
#' @export
#' @param x An object of the corresponding class
#' @param object An object of the corresponding class
#' @param ... Additional arguments passed to methods.
print.survparamsim <- function(x, ...) {
cat("---- Simulated survival data with the following model ----\n")
dput(x$survreg$call)
cat("\n")
cat("* Use `extract_sim()` function to extract individual simulated survivals\n")
cat("* Use `calc_km_pi()` function to get survival curves and median survival time\n")
cat("* Use `calc_hr_pi()` function to get hazard ratio\n\n")
cat("* Settings:\n")
cat(" #simulations:", max(x$sim$rep), "\n", sep=" ")
cat(" #subjects:", x$newdata.nona.obs %>% nrow(),
"(without NA in model variables)\n", sep=" ")
}
|
24aeb50d53ce8fb059a0fd909ac84f12f238ee4a | 253898851acec35165e39d80808e4ea948d65d47 | /man/clean_tweets.Rd | 6ec84de04e094107697095113e8e6caaad686b89 | [] | no_license | rturn/parseTweetFiles | 486b4ecff7299b54d58d417a683a20861418c35e | c0922ccf2a8efc6370dfacfa1fbc2ad56e2331e1 | refs/heads/master | 2023-07-21T12:46:48.596085 | 2023-07-14T18:46:46 | 2023-07-14T18:46:46 | 36,214,329 | 7 | 2 | null | 2015-10-14T00:49:00 | 2015-05-25T06:33:46 | R | UTF-8 | R | false | false | 1,091 | rd | clean_tweets.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/parseTweetFiles.R
\name{clean_tweets}
\alias{clean_tweets}
\title{Cleans tweet data frames}
\usage{
clean_tweets(tweets.df, tz = NULL, stoplist = NULL)
}
\arguments{
\item{tweets.df}{An array of tweets with desired variables attached. (Use dplyr to filter variables)}
\item{tz}{A list of time zones to filter by, currently case sensitive}
\item{stoplist}{The stoplist used to filter words}
}
\value{
The tweet data frame with all editing / filtering done. Empty dataset
}
\description{
Performs all necessary cleaning on a data frame of tweets. This includes removing all symbols from tweets, converting
them to lower case, removing all stop words, and converting timestamps to an R usable format.
Can also filter by time zone if desired (default does not filter)
}
\examples{
\dontrun{df = select(rawdata, text, time_zone)}
\dontrun{tweets = clean_tweets(dataframe)}
\dontrun{tweets = clean_tweets(dataframe, tz = c("Pacific Time (US & Canada)", "Eastern Time (US & Canada)),
stoplist = stoplist))}
}
|
d801ba4cf1aceb09242713f468adb47e5605efa3 | 96766930ae6925c441e86939cee456f56a856ef2 | /01_weather_stations/17_reformat_pnud.R | 97ecf14de729f8b52b7058c829308cc7ee903008 | [] | no_license | CIAT-DAPA/usaid_hnd | 1dfb92736dbbfe0a5bf1723b7094d4fe3a3e9e65 | 6c35057a79695863f8c13effdc2ff33660ca6436 | refs/heads/master | 2021-07-17T20:32:41.923436 | 2021-03-01T14:45:46 | 2021-03-01T14:45:46 | 85,354,515 | 2 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 1,570 | r | 17_reformat_pnud.R | library(dplyr)
inDir = "C:/Users/lllanos/Desktop/all plot/monthly"
files_all = list.files(inDir, full.names = T, pattern = ".csv")
data_station= read.csv(files_all[1],header = T)
variable = substring(basename(files_all),1,4)
entidad = strsplit(basename(files_all),"_") %>% lapply(., `[[`, 3) %>% unlist %>% strsplit(.,"[.]")%>% lapply(., `[[`, 1) %>% unlist
Datos <- lapply(files_all,function(x){read.csv(x,header=T)})
object = Datos[[1]]
nomb_prec = substring(names(object[,-1:-3]),2,nchar(names(object[,-1:-3])))
nomb_s_prec = do.call("rbind",strsplit(nomb_prec,"_"))
name_st = paste0(nomb_s_prec[,2]," (",nomb_s_prec[,1],")")
pdf(paste0(inDir,"/",variable[1],"_",entidad[1],"_line_graph_all.pdf"))
n_plot = seq(4,(ncol(object)-3),3)
for(i in n_plot){
par(mfrow = c(4,1), # 2x2 layout
oma = c(2, 2, 0, 0), # two rows of text at the outer left and bottom margin
mar = c(4, 4, 3, 3), # space for one row of text at ticks and to separate plots
xpd = NA)
seg = i:(i+3)
if(i == (ncol(object)-3)){ seg = i:ncol(object)}
for(j in seg){
plot(object[,j],type="l",col="grey",xaxt="n",yaxt="n",xlab="",ylab="Precipitación (mm/día)",main = paste0("Estación ",name_st[j-3]),cex.lab=0.8)
axis(1,at=seq(1,nrow(object),366),labels=seq(min(object$year),max(object$year),1),las=1,col="black",las =2,cex.axis = 0.8)
axis(2,at=seq(min(object[,j],na.rm=T),max(object[,j],na.rm=T)+0.5,30),labels=seq(min(object[,j],na.rm=T),max(object[,j],na.rm=T)+0.5,30),las=1,col="black",las =1,cex.axis = 0.7)
}
}
dev.off()
|
c168955d6c51ba19a4aba01666ff1092e492ad02 | 232f6430d39404d86262c238a2bff55b797ebf59 | /AbaloneAgeclassification/newvar8class.R | c1a44e6783aade5ef53ac0151a828573b504aebc | [] | no_license | rahuvela/Projects | cbbdbe9c7c18d003d850a2d4d3dbf9c35cc3704e | a501f88b9e672d205ebbad7a9c4bc64556b6e309 | refs/heads/master | 2021-01-19T21:06:17.847603 | 2018-06-27T13:53:37 | 2018-06-27T13:53:37 | 101,244,933 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,562 | r | newvar8class.R | library(party)
library(randomForest)
library(Boruta)
library(caret)
library(rpart)
library(e1071)
abalone.cols = c("sex", "length", "diameter", "height", "whole weight", "shucked weight", "viscera weight", "shell weight", "rings");
url <- 'http://archive.ics.uci.edu/ml/machine-learning-databases/abalone/abalone.data';
abalone <- read.table(url, sep=",", row.names = NULL, col.names = abalone.cols, nrows=4177, stringsAsFactors=FALSE);
abalone$sex[abalone$sex == 'F'] <- 1
abalone$sex[abalone$sex == 'M'] <- 0
abalone$sex[abalone$sex == 'I'] <- 2
n = nrow(abalone)
trainp = (as.integer(n*0.8))
mydata = abalone
#create new variable
datanewvar = mydata
for(i in 1:n){
datanewvar[i,2] = datanewvar[i,2] + datanewvar[i,3] + datanewvar[i,4]
datanewvar[i,5] = datanewvar[i,6] + datanewvar[i,5]
}
#drops <- c("diamete","height","shuckedweight","visceraweight","shellweight")
#datanewvar[ , !(names(datanewvar) %in% drops)]
datanewvar <- datanewvar[,-3]
datanewvar <- datanewvar[,-3]
datanewvar <- datanewvar[,-4]
datanewvar <- datanewvar[,-4]
datanewvar <- datanewvar[,-4]
for(i in 2:3){
colmean = mean(abalone[,i])
sd = sd(abalone[,i])
for(j in 1:n){
abalone[j,i]=(abalone[j,i]-colmean)/sd
}
}
newvar8data = datanewvar
for(i in 1:n){
if(mydata[i,9] <= 4){
newvar8data[i,4] = 1
}else if(mydata[i,9] <= 8 && mydata[i,9] > 4){
newvar8data[i,4] = 2
}else if(mydata[i,9] <= 12 && mydata[i,9] > 8){
newvar8data[i,4] = 3
}else if(mydata[i,9] <= 16 && mydata[i,9] > 12){
newvar8data[i,4] = 4
}else if(mydata[i,9] <= 20 && mydata[i,9] > 16){
newvar8data[i,4] = 5
}else if(mydata[i,9] <= 24 && mydata[i,9] > 20){
newvar8data[i,4] = 6
}else if(mydata[i,9] <= 28 && mydata[i,9] > 24){
newvar8data[i,4] = 7
}else if(mydata[i,9] <= 32 && mydata[i,9] > 28){
newvar8data[i,4] = 8
}
}
datanewvarTraining <- newvar8data[1:trainp,]
datanewvarTesting <- newvar8data[trainp:n,]
datanewvarTreeTraining <- rpart(rings ~., method="class", data=datanewvarTraining)
plot(datanewvarTreeTraining, uniform=TRUE, main="Classification Tree for data new var Training");
text(datanewvarTreeTraining, use.n = TRUE, all=TRUE, cex=.8);
newrawPredict <- predict(datanewvarTreeTraining, datanewvarTesting, type="class");
cm <- table(newrawPredict, datanewvarTesting$rings);
print(datanewvarTreeTraining)
print("\n xerror")
printcp(datanewvarTreeTraining)
u = union(newrawPredict, datanewvarTesting$rings);
t = table(factor(newrawPredict, u), factor(datanewvarTesting$rings, u));
print(confusionMatrix(t))
|
39f6556dbcbec549764ac920e819bfc7be505716 | f67a72538b78724ec8906e1f2df643a97f85c4db | /run_analysis.R | f4678ee64fdd850c5909eb4d4fb8f792e03e9431 | [] | no_license | mohsenarabgol/GettingCleaningData | ddc6a91b95c641914c229df7a0f14b8d4f3d8e32 | 4fd227c935cb35e4ee316ac95c8b5a10c6a9c3cb | refs/heads/master | 2021-01-17T06:43:12.811108 | 2016-05-17T17:23:56 | 2016-05-17T17:23:56 | 58,880,062 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,963 | r | run_analysis.R | ##Reading the features and activity labels
features<-readLines("features.txt")
activity<-readLines("activity_labels.txt")
for (i in 1:length(features)){
features[i]<-unlist(strsplit(features[i], " "))[2]
}
##Reading and organizing the train data
X_train<-readLines("train/X_train.txt")
y_train<-readLines("train/y_train.txt")
subject_train<-readLines("train/subject_train.txt")
for (i in 1:length(X_train)){
X_train[[i]]<-gsub(" "," ",X_train[[i]])
X_train[[i]]<-substring(X_train[[i]], 2, nchar(X_train[[i]]))
X_train[i]<-strsplit(X_train[[i]], " ")
}
train_data<-do.call(rbind.data.frame, X_train)
colnames(train_data)<-features
train_data<-cbind(subject = subject_train, train_data)
train_data<-cbind(activity = y_train, train_data)
##Reading and organizing the test data
X_test<-readLines("test/X_test.txt")
y_test<-readLines("test/y_test.txt")
subject_test<-readLines("test/subject_test.txt")
for (i in 1:length(X_test)){
X_test[[i]]<-gsub(" "," ",X_test[[i]])
X_test[[i]]<-substring(X_test[[i]], 2, nchar(X_test[[i]]))
X_test[i]<-strsplit(X_test[[i]], " ")
}
test_data<-do.call(rbind.data.frame, X_test)
colnames(test_data)<-features
test_data<-cbind(subject = subject_test, test_data)
test_data<-cbind(activity = y_test, test_data)
##Merging the train and test data and selecting relevant columns
data<-rbind(train_data, test_data)
colselect<-features[grepl("mean\\(\\)|std\\(\\)", features)]
data<-data[,c("subject", "activity", colselect)]
##Cleaning the activity names
data$activity<-as.character(data$activity)
for (i in 1:dim(data)[1]){
data[i,2]<-activity[as.numeric(data[i,2])]
data[i,2]<-substring(data[i,2], 3, nchar(data[i,2]))
}
##Aggregating the data based on subject and activity, sorting and writing the output file
final_data<-aggregate(.~subject+activity,data=data, FUN=mean)
final_data<-final_data[order(as.numeric(as.character(final_data$subject))),]
write.table(final_data, "output.txt", row.name=FALSE)
|
49077493e434d372d3158205f6a14d8aeb01f89a | 3e1ceb11aee60a97fe51086ec887407a2159ee90 | /Lecture3/Lecture3.r | 3d8ca7db8adb55a577916253927efc0674cc5fce | [] | no_license | lvwarren/R | f1e5e8e3303de11d945c12828aad320d827e1f8c | 8c0c327a9be942d229f237f3a6f48ec3c1ed73f6 | refs/heads/master | 2020-12-28T14:50:00.681346 | 2020-02-05T06:01:50 | 2020-02-05T06:01:50 | 238,376,916 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,157 | r | Lecture3.r | # name value pairs like Python dictionary
# List allwos for different datatypes, character, numeric and logical
# Can have list of lists for database.
# vector would coerce evertying into same datum type
j = list(name="joe", salary=55000, union=T)
j
# component names are called tags
j$sal # can abbreviate to whatever extent is possible without causing ambiguity
j$name; j$union
j$s
jalt = list("joe", 55000, T)
jalt
jalt[1]; jalt[2]; jalt[3];
z = vector(mode="list")
z
z[["abc"]] = 3; # notice double bracket operator
z
j$salary
j[["salary"]]
j[[2]]
#named access, double-bracket access with name, double-bracket access with index
j[1:2]
j[1:3]
j2 = j[2]
j2
class(j2)
str(j2) # tells you what kind of 'structure'
# note the difference bewtween single and double bracket indexiting
z = list(a="abc", b=12)
z
z$c = "sailing"
z[[4]] = 28 # make a note of this and send to Dr. Yang topic, naming after the fact.
z[5:7] = c(F,T,T)
z
z$d = 28
z
# delete by setting to NULL
z$b = NULL
z
c(list("Joe", 55000, T),list(5)) # c works with lists as well as vectors
j
length(j) ; # gives number of name value pairs in the list
names(j)
ulj = unlist(j)
ulj
class(ulj)
ulj[1]
ulj[2]
z = list(a=5,b=12,c=13)
y = unlist(z)
class (z); class(y)
y
z
w = list(a=5,b="xyz")
wu = unlist(w)
class(wu)
w
wu
wu[1]; wu[2]
# vectos coerced to highest type in NULL < raw < logical < integer < real < complex < character < list
# watch types when applying the unlist operator
names(wu)
names(wu)
names(wu) = c("d", "e")
wu
names(wu[1])= ("f")
wu
wun = unname(wu)
wun
class(wun)
is.vector(wun)
w = list(a=5, b="xyz")
wu = unlist(w)
wn = unname(w)
wu
is.list(wu)
is.vector(wu)
is.list(wn)
is.vector(wn)
# list apply, applies a function to a list
foo = lapply(list(1:3, 25:29), median)
is.list(foo); is.vector(foo)
L = list(1:3,25:29)
L
S = sapply(list(1:3,25:29), median)
S
#use unlist on lapply to get sapply result
g = c("M", "F", "F", "I", "M", "M", "F")
g
lapply(c("M","F","I"), function(gender) which(g==gender))
#
# read data from file
# store in R object
# do your processing/ analysis
|
7f9ea0e396108bf48bdfdcf8dde16995ab923866 | bf6e6d9a51776287e03c531d122e83287617cdfb | /labsimplex.Rcheck/00_pkg_src/labsimplex/R/adjustVertex.R | 7f4c316874923b9b056ae0094f8ce74647ab3d85 | [] | no_license | Crparedes/labsimplex | 2ec465947373e7fffabeaf469354e61eaef1e07c | 8228dd1ac6adfe9a4da42c59bb3c2b9ba1c63046 | refs/heads/master | 2021-09-27T03:48:09.634111 | 2021-09-22T14:22:18 | 2021-09-22T14:22:18 | 179,175,356 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,339 | r | adjustVertex.R | #' Modify given coordinates of given vertices of a simplex
#'
#' Changes the coordinates of generated vertices when slightly differences
#' were impossible to avoid at the moment of setting the experiment
#' (e.g. small differences in mass components when preparing a mixture).
#'
#'
#' @param newcoords List with elements named like the vertices to be modified.
#' Each element must have a vector with the actual (ordered)
#' coordinates used in the experiment. \code{NA} may be used
#' to indicate coordinates that were unchanged.
#' @inheritParams generateVertex
#'
#' @return A 'smplx' type object with the modified simplex information.
#' @examples
#' simplex <- labsimplex(N = 3, start = c(7, 25, 0.15),
#' stepsize = c(0.2, 5, 0.02))
#' adjustVertex(simplex = simplex, newcoords = list(Vertex.1 = c(7, NA, NA),
#' Vertex.3 = c(7.2, NA, NA)),
#' overwrite = TRUE)
#' @author Cristhian Paredes, \email{craparedesca@@unal.edu.co}
#' @author Jesús Ágreda, \email{jagreda@@unal.edu.co}
#' @export
adjustVertex <- function(simplex, newcoords, overwrite = FALSE) {
name <- deparse(substitute(simplex))
checkMain(simplex = simplex)
if (any(summary(newcoords)[, 3] != "numeric")) {
stop("Only numeric or NA values are allowed for the adjusted coordinates")
}
# Vertices to be adjusted:
VerTBA <- attr(summary(newcoords), "dimnames")[[1]]
for (i in 1:length(VerTBA)) {
pos.RM <- match(tolower(gsub('\\.', '', VerTBA[i])),
tolower(gsub('\\.', '', attr(simplex$coords,
"dimnames")[[1]])))
if (is.na(pos.RM)) {
stop("At least one of submited vertex (list elements names) is not in the
simplex object to be adjusted")
}
VerTBA[i] <- pos.RM
}
VerTBA <- as.numeric(VerTBA)
for (i in 1:length(VerTBA)) {
# Coordinates that remain unchanged
newcoords[[i]][is.na(newcoords[[i]])] <- simplex$coords[VerTBA[i],
is.na(newcoords[[i]])]
# Replacement:
simplex$coords[VerTBA[i], ] <- newcoords[[i]]
}
if (overwrite) {
assign(name, simplex, envir = parent.frame())
} else {
cat("\n")
return(simplex)
}
}
|
83583cd53250b7e661f8946bd49b28a35c7c86c8 | ddbc421408e76f182518f36430e17468997a128b | /man/IdentifyIndelSignatures.Rd | 273068e253673b74d07a201e8f44a7ff6475cf97 | [] | no_license | Honglab-Research/MutaliskR | 25b9f05a7b93bb2d7d6e352065aacb9788c9adf4 | 9a4393b38489d2220068448861b02555dc6323be | refs/heads/main | 2023-04-05T05:04:49.239648 | 2021-03-30T06:44:07 | 2021-03-30T06:44:07 | 351,077,262 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 4,090 | rd | IdentifyIndelSignatures.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/main.R
\name{IdentifyIndelSignatures}
\alias{IdentifyIndelSignatures}
\title{Identify indel mutational signatures}
\usage{
IdentifyIndelSignatures(
input,
bsg,
sample.id = "Sample",
reference = GetPcawgIndelSignaturesData(version = "SigProfiler"),
target.signatures = GetPcawgIndelSignaturesNames(version = "SigProfiler"),
plot.theme = GetIndelSignaturesPlotTheme(),
analyze.variants.column.gene,
analyze.variants.column.group,
analyze.variants = FALSE,
n.cores = 2,
max.len = 25,
padding.len = 80,
combn.m = 3,
n.max.signatures = 7,
min.probability = 0.01,
zeta.value = 1e-10,
save = TRUE,
save.dir = NULL
)
}
\arguments{
\item{input}{Either VCF file path or data.frame. If the input is a data.frame, it must include the following columns: Chr, Pos, Ref, Alt.}
\item{bsg}{BSgenome object.}
\item{sample.id}{Sample ID that will be used to name output files (default: 'Sample').}
\item{reference}{A data.frame with the following columns: Mutation_Type, and names of indel signatures
(default: a data.fram returned from \code{\link{GetPcawgIndelSignaturesData(version = "SigProfiler")}}).}
\item{target.signatures}{Signatures to be considered for identification (default: an array returned from \code{\link{GetPcawgIndelSignaturesNames(version = "SigProfiler")}}).}
\item{plot.theme}{A data.frame returned from \code{\link{GetIndelSignaturesPlotTheme}}.}
\item{analyze.variants.column.gene}{Name of column in the data.frame corresponding to the gene name or ID (e.g. "Gene.refGene" if using ANNOVAR data).}
\item{analyze.variants.column.group}{Name of column in the data.frame corresponding to the variant group for plotting purposes (e.g. "Func.refGene" if using ANNOVAR data).}
\item{analyze.variants}{A boolean value that indicates whether variant-level signature analysis should be performed (default: FALSE).}
\item{n.cores}{Number of cores to use.}
\item{max.len}{Maximum number of bases allowed for a small insertion and deletion (indels bigger than this will be excluded; default: 25).}
\item{padding.len}{Number of bases to use for upstream and downstream sequences (default: 80).}
\item{combn.m}{Number of signatures to consider in each step. 'm' parameter in combn function (default: 3).}
\item{n.max.signatures}{Maximum number of signatures to identify. Recommended: n.max.signatures >= initial.exploration.combn.m (default: 7).}
\item{min.probability}{Minimum probability to attribute to a signature (default: 0.01).}
\item{zeta.value}{A float value that is added to the data frequency (default: 1e-10).}
\item{save}{Save resulting files if TRUE, otherwise do not save (default: TRUE).}
\item{save.dir}{Save directory path (default: NULL).}
}
\value{
A list with the following elements:
results: a data.frame with the following columns:
\item{Mutations_Count}{Number of mutations.}
\item{Signatures}{Identified mutational signatures separated by comma.}
\item{Signatures_Count}{Number of identified mutational signatures.}
\item{Signatures_Weights}{Normalized (0 to 1) weights of identified mutational signatures separated by comma.}
\item{Mutation_Types}{Mutation types separated by comma.}
\item{Mutation_Types_Groups}{Mutation type groups separated by comma.}
\item{Observed_Spectrum}{Normalized spectrum (frequency) of observed mutations separated by comma.}
\item{Reconstructed_Spectrum}{Normalized spectrum (frequency) of MLE reconstructed mutations separated by comma.}
\item{Residual_Spectrum}{Normalized spectrum (frequency) of residual mutations separated by comma.}
\item{Cosine_Similarity}{Cosine similarity score between Observed_Spectrum and Reconstructed_Spectrum.}
\item{RSS}{Raw residual sum of squares (derived from Residual_Spectrum).}
\item{RSS_Normalized}{Normalized residual sum of squares (derived from Residual_Spectrum).}
\item{BIC}{Bayesian information criterion of the identified model.}
results.variants: a data.frame with the following columns:
}
\description{
This function identifies indel mutational signatures.
}
|
b6e6e8bcbfe53a95023209e3dbce9cac9f2b48ef | 816247c509847002300485ff792778d607a7c119 | /man/gen_cambio_futuro.Rd | 936298602a5a26ebf1453ed8fd1acc02e0e8089b | [] | no_license | mgaldino/line4PPPsim | 93d93c593e22583e4d00df5371d4b93c9850c2cd | a026c6d5f83ffc25712eb3b6f11d13a7f530c98e | refs/heads/master | 2020-04-02T02:29:55.803353 | 2019-02-11T15:28:31 | 2019-02-11T15:28:31 | 153,912,413 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 566 | rd | gen_cambio_futuro.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gen_cambio_futuro.R
\name{gen_cambio_futuro}
\alias{gen_cambio_futuro}
\title{Taxa de cambio futura}
\usage{
gen_cambio_futuro(start_year = 2005, amp_erro = 2, mu = 0)
}
\arguments{
\item{start_year}{A number. the year the series starts}
\item{amp_erro}{A number. Amplitude of error}
\item{mu}{mean of normal error}
}
\value{
data frame with real yearly exchange rate and a column of date
}
\description{
gera projecao de taxa de cambio real futura
}
\examples{
gen_cambio_futuro()
}
|
cd580216897df5ffc3deaebdb00db03530c1b50c | 1f02b3d83bbdfb02a92fee402e2db63f446cf413 | /OLD/OLD IGNORE.R | 61304a6370000bcc681a2ed5e404aca8cdb10f6e | [] | no_license | gberg1303/goldbeRg | ec9f62817fcc71b84864d56d8d0a4d26068c3379 | ec5c662f19a2fdd4e5cec0fb6ec318cba0402eeb | refs/heads/master | 2023-04-03T03:40:05.153150 | 2021-04-08T22:38:39 | 2021-04-08T22:38:39 | 283,044,058 | 7 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,123 | r | OLD IGNORE.R | ### OLD Playoffs
dplyr::group_by(division) %>%
dplyr::mutate(
division_rank = rank(-wins, ties.method = "random"),
division_winner = ifelse(division_rank == 1, 1, 0)) %>%
dplyr::group_by(conference, division_winner) %>%
dplyr::mutate(
rest_conference_rank = rank(-wins, ties.method = "random")) %>%
dplyr::ungroup() %>%
dplyr::mutate(
wild_card = ifelse(division_winner == 0 & rest_conference_rank == 1 | division_winner == 0 & rest_conference_rank == 2, 1, 0),
playoff = ifelse(division_winner == 1 | wild_card == 1, 1, 0)
) %>%
dplyr::select(-rest_conference_rank, -division_rank)
#####
### Add QB Ratings to Dataset
Model_Dataset_New <- Model_Dataset %>%
# Add Home QB
dplyr::left_join(
starters %>% dplyr::ungroup() %>% dplyr::select(game_id, posteam, Composite) %>% dplyr::rename(home_qb = Composite),
by = c("home_team" = "posteam", "game_id")
) %>%
# Add Away QB
dplyr::left_join(
dplyr::starters %>% dplyr::ungroup() %>% dplyr::select(game_id, posteam, Composite) %>% dplyr::rename(away_qb = Composite),
by = c("away_team" = "posteam", "game_id")
)
|
c07ae77fe76f4db3fb0ddd79d54754ff5c16f408 | 2285ca4bcd0e51d8723a60ec0799558558c3bd98 | /Week4/Hypothesis_testing_demo.R | 5650b593bc2344ad18b85b353275308e8072fc6c | [] | no_license | ssj1739/BSRP2020 | 322876b1632d06717f188ca1022c5aab352c2bab | 767f0d19d11526fab84130adbcc68c57b1261d7a | refs/heads/master | 2022-11-26T06:54:06.328803 | 2020-08-06T18:20:22 | 2020-08-06T18:20:22 | 283,871,281 | 0 | 1 | null | 2020-08-06T18:10:38 | 2020-07-30T20:33:25 | R | UTF-8 | R | false | false | 5,794 | r | Hypothesis_testing_demo.R | #STATISTICS AND HYPOTHESIS TESTING DEMO
#Motivation: What is a population? What is a sample? Why might a sample not be the "same" as the population?
#suppose that you are doing a study on sepal length measurements of iris plants in Boston
#what is a population distribution?
iris_population <- iris
dim(iris_population)
hist(iris_population$Sepal.Length, 100)
#as scientists, what are some ways to describe/summarize a population's distribution?
mean(iris_population$Sepal.Length)
sd(iris_population$Sepal.Length)
median(iris_population$Sepal.Length)
#what is a sample distribution?
#how do we get a sample distribution from a population distribution?
iris_sample = iris_population[sample(1:nrow(iris_population), 20) ,]
#what are some ways to summarize/describe a sample distribution? is it a random variable?
##what are we trying to understand with this value?
mean(iris_sample$Sepal.Length)
iris_sample = iris_population[sample(1:nrow(iris_population), 20) ,]
mean(iris_sample$Sepal.Length)
iris_sample = iris_population[sample(1:nrow(iris_population), 20) ,]
mean(iris_sample$Sepal.Length)
iris_sample = iris_population[sample(1:nrow(iris_population), 20) ,]
mean(iris_sample$Sepal.Length)
#what are some factors that can change the sample mean?
iris_sample = iris_population[sample(1:nrow(iris_population), 50) ,]
mean(iris_sample$Sepal.Length)
#Let's do this a bunch of times: want to see whether the sample mean is consistently similar to the population mean
#
set.seed(0123)
n_expts = 10000
sample_size = 20
sample_means = numeric()
for(i in 1:n_expts) {
iris_sample = iris_population[sample(1:nrow(iris_population), sample_size) ,]
sample_means <- c(sample_means, mean(iris_sample$Sepal.Length))
}
iris_sample = iris_population[sample(1:nrow(iris_population), sample_size) ,]
hist(sample_means, 100)
#Does the peak of the sample mean distribution correspond to the population mean?
abline(v = mean(iris_sample$Sepal.Length), col = "red", lwd = 3)
abline(v = mean(iris_population$Sepal.Length), col = "blue", lwd = 3)
abline(v = mean(sample_means), col = "green", lwd = 3)
mean(iris_population$Sepal.Length)
mean(sample_means)
#in reality, we only get the sample ONCE (usually),
#and we don't know what the population mean is.
#so, we can't do the following:
#compare the sample mean to population mean
#compare the sample mean to the distribution of the sample means.
#it seems that we can't easily figure out what the population mean is going to be,
#given the sample mean.
#A fact from statistics: given a population mean, with a sample size,
#we can predict what the sample mean distribution looks like.
#For instance, a population mean of 0, take a sample size of 20:
t_distribution <- rt(n = 10000, df=20)
hist(t_distribution, 100)
#then, suppose we observe a mean of 2 in our sample:
abline(v = 2, col = "red", lwd = 3)
#what is the probability of observing 2 or any value more extreme?
mean(t_distribution > 2)
#not very likely (< .05), so the population mean is not very likely to be 0.
#We have, instead, worked backwards: instead of staring at the sample mean and trying
#to deduce the population mean, we assumed what the population mean is a particular value,
#deduced its properties, and see whether the sample mean fit the property.
#This is the concept of hypothesis testing.
#Have a hypothesis what the population (mean) is.
#The sample mean follows a distribution: the null distribution.
#Take a sample and look at it: where does it fall in the in the null distribution?
#If it is not a very likely value in the null distribution, then it is likely that
#the population mean is not that hypothesis.
#Back to the iris example.
#Suppose null hypothesis: population mean is 5.
null_hypothesis <- 5
t_distribution <- rt(10000, df = nrow(iris_sample) - 1)
hist(t_distribution, 100)
t_statistic <- (mean(iris_sample$Sepal.Length) - null_hypothesis) / sqrt(var(iris_sample$Sepal.Length) / nrow(iris_sample))
abline(v = t_statistic, col = "red", lwd = 3)
mean(t_distribution > t_statistic)
t.test(iris_sample$Sepal.Length, mu = 5)
#try another null hypothesis!
#Now, what if the null hypothesis is the population mean of 5.84?
null_hypothesis <- 5.84
t_statistic <- (mean(iris_sample$Sepal.Length) - null_hypothesis) / sqrt(var(iris_sample$Sepal.Length) / nrow(iris_sample))
t_distribution <- rt(10000,df = nrow(iris_sample) - 1)
hist(t_distribution, 100)
abline(v = t_statistic, col = "red", lwd = 3)
mean(t_distribution > t_statistic)
#Is it possible that by chance we observe something that will happen less than 5% of the time?
#############
#IN-CLASS EXERCISE:
#we came up with the following scientific hypothesis: there is a difference between sepal length between
#setosa and virginica.
library(tidyverse)
iris_setosa_population <- iris_population %>% filter(Species == "setosa")
iris_virginica_population <- iris_population %>% filter(Species == "virginica")
population_difference <- mean(iris_setosa_population$Sepal.Length) -
mean(iris_virginica_population$Sepal.Length)
#Take a sample of 20 from iris_setosa_population and iris_virginica_population.
#What is the difference in means?
#The difference in population means actually follows a T-distribution also.
#Based on our original scientific hypothesis, what should be the null hypothesis be?
#Here is the way to generate the null hypothesis and t_statistic
#(with variables iris_setosa_sample, iris_virginica_sample):
t_distribution <- rt(n = 10000, df = 76)
t_statistic <- (mean(iris_setosa_sample$Sepal.Length) - mean(iris_virginica_sample$Sepal.Length)) /
sqrt(var(iris_setosa_sample$Sepal.Length) / 20 + var(iris_virginica_sample$Sepal.Length) / 20)
#what is the p-value?
#verify with t.test()!
#Look back at Q4 of the homework. Can you do it now? |
674af00655cccb8f248f2bcf9fe1870433e193f1 | 5b9be397239dfb3383313d86be9b806792377f8a | /helperFunctions/NearestStdRuns.R | ff422e44ada050c3c4abe12c2b12630ab08307fc | [] | no_license | jgiacomo/AMSanalysis | f20df8698906618c1d224932db5d22ba1893e1b1 | 3efb11c404fe10a265dac5bc8c92d39546359712 | refs/heads/master | 2020-05-21T13:48:58.332486 | 2019-09-01T12:29:49 | 2019-09-01T12:29:49 | 61,062,731 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,615 | r | NearestStdRuns.R | NearestStdRuns <- function(df, Run, standardPos, n=6){
# Finds standard runs (from the list of standards) which are nearest in time
# to the unknown's run. The number of standard runs found is determined by
# n.
# Inputs
# df = data frame containing AMS run data
# run = the run number of the unknown
# standardPos = a list of the positions of standards
# n = number of nearest standard runs to find
#
# Outputs
# nearestStdRuns = a data frame of the nearest standard runs and the
# difference in time to the input run.
library(dplyr)
# Check that run times are formatted as date times (POSIXlt) and exit if not
if(!is(df$dateTime, "POSIXct")){
stop("Error: dateTime not of class POSIXct in function NearestStdRuns.")
}
# Parse out the standards data keeping only active runs
stdRuns <- df %>% filter(pos %in% standardPos, active==TRUE)
# In case the run in question is a standard, remove it from the list
stdRuns <- stdRuns[stdRuns$run != Run,]
# Find the run time for the run in question
smplRT <- df %>% filter(run == Run) %>% select(dateTime) %>% pull()
# Find the time differences (in seconds)
stdRuns$timeDiff <- abs(difftime(stdRuns$dateTime, smplRT, units="secs"))
# Find the nearest n standard runs to the run in question
nearestRunsIndex <- sort.int(stdRuns$timeDiff,index.return=TRUE)[[2]]
nearestRuns <- stdRuns[nearestRunsIndex,] %>% select(run,timeDiff) %>%
head(n)
return(nearestRuns)
} |
080d090cc8b7811c1712a0eb7d6d89c93166b820 | e8c93f0897c372b73be474041b8ec209605595d0 | /runMe.R | 17f531e5d712db3c754823e06ba6427cda5d4e25 | [] | no_license | lixun910/smacof | 94c5177cd84ad6ab2716c304e3dfbc1eec343abd | dbac961f87ee9697837a4b7e3a1cdfd26916e97c | refs/heads/main | 2023-02-24T00:08:53.871955 | 2021-01-27T22:10:03 | 2021-01-27T22:10:03 | 333,573,964 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 657 | r | runMe.R | dyn.load("smacof.so")
source("smacofR.R")
source("smacofRC.R")
source("utilsRC.R")
m <- 100
n <- 50
wv <- dv <- rep (1, n * (n - 1) / 2)
wm <- dm <- 1 - diag (n)
set.seed(12345)
user1 <- rep(0, m)
user2 <- rep(0, m)
user3 <- rep(0, m)
for (j in 1:m) {
xold <- rnorm(2*n)
t1 <- system.time(h1 <- smacofR(wm, dm, 2, xold = matrix(xold, n, 2), eps = 1e-6, itmax = 1000), gcFirst = TRUE)
t2 <- system.time(h2 <- smacofRC(wv, dv, 2, xold = xold, eps = 1e-6, itmax = 1000), gcFirst = TRUE)
t3 <- system.time(h3 <- smacofRCU(wv, dv, 2, xold = xold, eps = 1e-6, itmax = 1000), gcFirst = TRUE)
user1[j] <- t1[1]
user2[j] <- t2[1]
user3[j] <- t3[1]
} |
f815ccced99696412587106a35563fc9211a009c | b4b5a5998f2fecc590a5df4026c0d12d83d46cf7 | /_examples/earthquakes.R | 4ab97441b5ac8e88a254fa7c59ed791b34580275 | [
"MIT",
"BSD-3-Clause"
] | permissive | crazycapivara/deckgl | 50335a155c99307e40b21b2f82561f3cf15afb17 | 1741c8bc84d69e26694d670879911a0d2bb2c5c2 | refs/heads/master | 2023-04-03T06:19:35.377655 | 2023-03-26T10:46:36 | 2023-03-26T10:46:36 | 145,043,708 | 83 | 10 | NOASSERTION | 2023-03-26T10:46:41 | 2018-08-16T22:07:48 | R | UTF-8 | R | false | false | 557 | r | earthquakes.R | library(sf)
library(deckgl)
url <- "https://docs.mapbox.com/mapbox-gl-js/assets/significant-earthquakes-2015.geojson"
earthquakes <- st_read(url) %>%
dplyr::select(mag, geometry)
deckgl(
latitude = 20.7927,
longitude = 31.4606,
zoom = 0.5
) %>%
add_scatterplot_layer(
data = earthquakes,
get_position = ~geometry,
get_radius = "@=10000 * Math.pow(mag, 2)",
get_fill_color = "@=[Math.pow(mag, 3), 140, 10, 160]",
radius_min_pixels = 1,
radiusMaxPixels = 100,
radiusScale = 1,
filled = TRUE
) %>%
add_basemap()
|
d1cc568872b442cb832868d5e25b9beb93930451 | 7f72ac13d08fa64bfd8ac00f44784fef6060fec3 | /RGtk2/man/gtkToolbarSetOrientation.Rd | c7f5fbb3f3869a527305fe112cbbcb94ee0719ae | [] | no_license | lawremi/RGtk2 | d2412ccedf2d2bc12888618b42486f7e9cceee43 | eb315232f75c3bed73bae9584510018293ba6b83 | refs/heads/master | 2023-03-05T01:13:14.484107 | 2023-02-25T15:19:06 | 2023-02-25T15:20:41 | 2,554,865 | 14 | 9 | null | 2023-02-06T21:28:56 | 2011-10-11T11:50:22 | R | UTF-8 | R | false | false | 624 | rd | gtkToolbarSetOrientation.Rd | \alias{gtkToolbarSetOrientation}
\name{gtkToolbarSetOrientation}
\title{gtkToolbarSetOrientation}
\description{
Sets whether a toolbar should appear horizontally or vertically.
\strong{WARNING: \code{gtk_toolbar_set_orientation} has been deprecated since version 2.16 and should not be used in newly-written code. Use \code{\link{gtkOrientableSetOrientation}} instead.}
}
\usage{gtkToolbarSetOrientation(object, orientation)}
\arguments{
\item{\verb{object}}{a \code{\link{GtkToolbar}}.}
\item{\verb{orientation}}{a new \code{\link{GtkOrientation}}.}
}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.