blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
511124e5c9061dbd6ffed97ad37ba65a1b768161 | 2f0816e6c17f2286aee8eeacead002ac9434621e | /man/ts.sum.Rd | 45207593a6111d8122a965e61347226afe1168a9 | [] | no_license | DanielOllech/dsa | d0b6a5aeff09c1fa88246005079dcff55cfbe035 | 76eadbcf3325c815c27c3061f835fae5ae7ce07e | refs/heads/master | 2020-04-14T16:17:54.568443 | 2019-01-03T09:00:46 | 2019-01-03T09:00:46 | 163,948,049 | 4 | 2 | null | 2019-01-03T08:58:54 | 2019-01-03T08:58:53 | null | UTF-8 | R | false | true | 480 | rd | ts.sum.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ts_sum.R
\name{ts.sum}
\alias{ts.sum}
\title{Add time series}
\usage{
ts.sum(...)
}
\arguments{
\item{...}{list of ts time series that are added together}
}
\description{
Sequentially add a set of time series
}
\details{
This function is used internally in dsa()
}
\examples{
ts.sum(list(ts(rnorm(100,10,1)), ts(rnorm(100,10,1)), ts(rnorm(100,10,1))))
}
\author{
Daniel Ollech
}
|
bddcb0da3c7dc3a44ccd07b1fccb1a9112426a31 | d48b835b2f129026bfb6fcab4f21a11c4d6bdc47 | /aoc_22.R | 17f73af4949437b28594357d499d3ccf66c5ca35 | [] | no_license | alex-raw/adventofcode_2020 | 075c37d0eb78c54a64e3d851a3b8cd0cd6b1bf48 | d366405006a63bfe7c2b218d63def8e25fef83a6 | refs/heads/master | 2023-05-28T09:59:28.881114 | 2021-06-18T10:15:21 | 2021-06-18T10:15:21 | 318,232,713 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 530 | r | aoc_22.R | parse_cards <- function(x) {
d <- scan(x, comment.char = "P", quiet = TRUE) |> matrix(ncol = 2)
list(d[, 1], d[, 2])
}
combat <- function(a, b) {
n <- seq_along(b)
ans <- cbind(a[n], b)
win <- ans[, 1] > ans[, 2]
list(a = c(a[-n], t(ans[win, ])),
b = c(t(ans[!win, 2:1])))
}
play <- function(x) repeat {
n <- lengths(x)
if (any(n == 0)) return(x[[i]])
i <- which.max(n)
x <- combat(x[[i]], x[[-i]])
}
solve <- function(x) sum(x * length(x):1)
# One
parse_cards("data/aoc_22") |> play() |> solve()
|
90fae57a6a2a8c3f9379a68ed5030d1dd21984c8 | e555d5b2e5eaa8476fff16131f6671c46ed946d6 | /R programming/corr.R | 37a02ffae00fa925e0d8b4b97d5f30a37a399f0e | [] | no_license | ahsanGoheer/datasciencecoursera | 2eac3cfa9da1c68117ead40beca6e84e44de32c4 | b9cabf0bcf182f0eba35cc7b288d4503d1ceb46a | refs/heads/master | 2022-11-23T17:44:35.503204 | 2020-07-21T22:46:03 | 2020-07-21T22:46:03 | 280,212,832 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 670 | r | corr.R | corr<-function(directory,threshold=0)
{
currentWorkingDirectory<-paste('./',directory,sep="")
filesInDirectory<-list.files(currentWorkingDirectory)
complete_cases=complete(directory)
complete_cases<-complete_cases[complete_cases$nob>=threshold,]
result<-numeric(0)
if(nrow(complete_cases)>0)
{
for(i in complete_cases$id)
{
loaded_data<-read.csv(paste(currentWorkingDirectory,'/',filesInDirectory[i],sep=""))
filtered_data<-loaded_data[(!is.na(loaded_data$nitrate)),]
filtered_data<-filtered_data[(!is.na(filtered_data$sulfate)),]
result<-c(result,cor(filtered_data['sulfate'],filtered_data['nitrate']))
}
}
result
} |
2e8d5b77cf4ad96c827f0e6b8a8c014bd339e684 | fcd23fcee0a3db07627bc2edf6ccb9fb8503661c | /R/kcsmooth.R | f95749e285139c7c6f22e04695710486f7a79fd0 | [
"MIT"
] | permissive | arnijohnsen/arjtools | 1bacb9df0095dad157d575e5df921890192bd52f | a92fbd868961f5ea1155c9a458000b241b995a11 | refs/heads/master | 2021-01-10T13:58:55.279889 | 2017-04-17T10:32:16 | 2017-04-17T10:32:16 | 51,922,814 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,446 | r | kcsmooth.R | #' Smooth samples with KCsmart algorithm
#'
#' Wrapper to smooth aCGH data with algorithm from KCsmart.
#'
#' @param dat data.table with aCGH data. First two colums should be names chrom and pos
#' and contain information about the location of input probes. Chromosomes should be
#' names "1", "Y", etc.
#' @param mirrorLocs List containing the chromosome start, centromere and end positions
#' @param sigma The kernel width
#' @param sampleDensity The sample point matrix resolution
#' @param verbose If set to false, no progress information is displayed
#' @param maxmem This parameter controls memory usage, set to lower value to lower memory consumption
#' @param what character, determines what data should be returned. "pos" for only positive KCscore,
#' "neg" for only negative KCscore, "both" for both positive and negative KCscore.
#' @param groups Numeric vector. If NULL is passed (default), each sample is smoothed seperately.
#' If a numeric vector is passed, it indicates which samples should be grouped together before
#' smoothing. For example, groups = c(1,1,2) indicates that the first two samples will be grouped
#' before smothing and the third will be smoothed seperately.
#' If a non-NULL value is passed, the return data frame will have columns equal to the number of groups passed.
#' (This is the same as passing group = c(1,2,3,...)).
#' @return data.table with smoothed values. First column is chromosome, second column indicates if value is from positive
#' or negative smoothed values. All other columns are smoothed values for each sample or group.
#' @import data.table
#' @export
kcsmooth <- function(dat, mirrorLocs, sigma = 1e+06, sampleDensity = 50000, maxmem = 1000, verbose = T, what = "both", groups = NULL){
if(!(what %in% c("pos", "neg", "both"))){
stop("what must be pos, neg or both")
}
if(!(all(names(dat)[1:2] == c("chrom", "maploc")))){
setnames(dat, names(dat)[1:2], c("chrom", "maploc"))
}
if(stringr::str_detect(dat[1]$chrom, "^chr")){
dat[,chrom := gsub("chr", "", chrom)]
}
n <- dim(dat)[2]
if(is.null(groups)){
groups <- 1:(n-2)
}
n_groups <- length(unique(groups))
spm_to_dat <- function(spm, what){
spm_unlist <- unlist(spm@data)
dat <- data.table(
chrom = stringr::str_replace(names(spm_unlist), "\\..*", ""),
posneg = stringr::str_extract(names(spm_unlist), "[a-z]+"),
num = stringr::str_extract(names(spm_unlist), "[0-9]+$"),
value = spm_unlist)
if(what != "both"){
dat <- dat[posneg == what]
}
return(dat)
}
# Smooth first group
spm <- KCsmart::calcSpm(dat[,c(1:2, which(groups == 1)+2), with = F], hsMirrorLocs, sigma, sampleDensity, verbose)
all_dat <- spm_to_dat(spm, what)
# Smooth groups 2, 3, ...
if (n_groups > 1){
for(i in 2:n_groups){
spm <- KCsmart::calcSpm(dat[,c(1:2, which(groups == i)+2), with = F], hsMirrorLocs, sigma, sampleDensity, verbose)
all_dat[,paste("col", i, sep = ""):=spm_to_dat(spm, what)$value]
}
}
if(identical(groups, 1:(n-2))){
setnames(all_dat, c("chrom", "what", "num", names(dat)[3:n]))
} else {
setnames(all_dat, c("chrom", "what", "num", paste("group", 1:n_groups, sep = "")))
}
return(all_dat)
}
#' Create index from sig_region object
#'
#' Function to extract information from a sig_region object, to a list of all kcprobes within
#' those regions
#'
#' @param sig_regions A sig_regions object from kcsmart
#' @return data.table with 4 columns: number of gain/loss, chrom(osome) of region, whether its a gain or loss (pos/neg)
#' and the num(ber) of kcprobe (genomic location of kcprobes is just sampleDensity*(number-1))
#' @import data.table
#' @export
kcmakefilter <- function(sig_regions){
dat_list <- list()
n_pos <- length(sig_regions@gains)
for(i in 1:n_pos){
dat_list[[i]] <- data.table(region = i,
chrom = sig_regions@gains[[i]]$chromosome,
what = "pos",
num = sig_regions@gains[[i]]$x)
}
n_neg <- length(sig_regions@losses)
for(i in 1:n_neg){
dat_list[[i+n_pos]] <- data.table(region = i,
chrom = sig_regions@losses[[i]]$chromosome,
what = "neg",
num = sig_regions@losses[[i]]$x)
}
dat <- rbindlist(dat_list)
dat$num <- as.numeric(dat$num)
return(dat)
}
|
f5a3e3d2bc8708d6f55ab6fc8dd42aaa315f0c1e | c65c0bab4d633385efa249e38cf45818754afaff | /shinyapps/app27/app.R | a84c87519f0f73bca4fdfe9e34da8695196c854d | [] | no_license | imcullan/Shiny-Tutorial-Rgitbook | 06762c2ea4cf9900401f68a0c3a9964615376b93 | 1db7814f45b5e10846876a493d4946c73d270aa3 | refs/heads/master | 2021-01-22T18:32:35.081445 | 2016-08-26T17:40:30 | 2016-08-26T17:40:30 | 66,667,107 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 370 | r | app.R | library(shinyjs)
server <- function(input, output, session) {
observeEvent(input$button, {
toggle("myslider")
})
}
ui <- fluidPage(
useShinyjs(),
h3("A quick demonstration of shinyjs functionality"),
actionButton("button", "Toggle slider"),
sliderInput("myslider", "A slider:", min=0, max=1000, value=500)
)
shinyApp(ui = ui, server = server) |
f2841e5ac9de3dea5455906c90469ebda9247453 | 8b78eb1725b05a9fb8edd97d65efef62c22e4c4a | /perf_tests/indelible/indelible_configure.R | fcb6a0f2292ede183d37f0250f44c09c6a0aec1c | [] | no_license | lucasnell/jlp_ms | e728f5cf5eca69dc4ffdb43480e4fba7e71f1377 | 07328fa914397390f42e55f238c6bc4024bc6513 | refs/heads/master | 2020-04-30T18:41:41.268084 | 2020-04-17T14:43:21 | 2020-04-17T14:43:21 | 177,016,376 | 1 | 0 | null | 2020-02-28T14:54:16 | 2019-03-21T20:08:34 | TeX | UTF-8 | R | false | false | 3,126 | r | indelible_configure.R |
#'
#' This script is used inside `test.R` to make the trees and `configure.txt`
#' for indelible.
#' It's not meant to be run directly.
#'
#' The objects gsize, mdepth, and dir should exist in global environment before
#' running this script.
#'
stopifnot("gsize" %in% ls())
stopifnot("mdepth" %in% ls())
stopifnot("dir" %in% ls())
# Start new environment to do all this stuff without cluttering global:
env <- new.env()
# Set parameters
env$gsize <- gsize
env$mdepth <- mdepth
env$dir <- dir
# Do the configuring:
with(env, {
library(ape)
source("write_tree.R")
fn <- sprintf("../in_files/scrm_%i.tree", gsize %/% 1e6L)
full_file <- readLines(fn)
# numbers of bp per tree
nbp <- full_file[grepl("^\\[", full_file)]
nbp <- sapply(strsplit(nbp, "\\("), `[`, i = 1)
# Create vector to group gene trees into chromosomes
chroms <- numeric(sum(full_file == "//"))
j = 0
for (i in 1:length(full_file)) {
if (full_file[i] == "//") {
j = j + 1
next
}
if (grepl("^\\[", full_file[i])) chroms[j] <- chroms[j] + 1
}
# Starting and ending points for groups:
chroms <- cbind(c(1, 1 + head(cumsum(chroms), -1)), cumsum(chroms))
# Trees themselves:
trees <- read.tree(fn)
# Scale to have max depth of mdepth
trees <- lapply(trees, function(x) {
x$edge.length <- mdepth * x$edge.length / max(node.depth.edgelength(x))
return(x)
})
names(trees) <- NULL
# For jackalope version:
tree_strings <- apply(chroms, 1,
function(.xy) {
inds <- (.xy[1]):(.xy[2])
string <- paste0(nbp[inds], sapply(trees[inds], write_tree),
collapse = "\n")
paste0("//\n", string, "\n\n")
})
writeLines(paste0(tree_strings, collapse = "\n\n"),
paste0(dir, "tree.tree"))
# INDELible version:
n_trees <- length(trees)
tree_strs <- sapply(1:n_trees, function(i) {
sprintf("[TREE] t%02i %s\n", i, write_tree(trees[[i]]), mdepth)
})
part_strs <- sapply(1:n_trees, function(i) {
partition_size <- as.integer(gsub("\\[|\\]", "", nbp[[i]]))
sprintf("[PARTITIONS] region%02i [t%02i hky_model %i]\n", i, i, partition_size)
})
evolve_strs <- c("[EVOLVE] region01 1 out01\n",
sapply(2:n_trees,
function(i) sprintf(" region%02i 1 out%02i\n",
i, i)))
# cat(paste(tree_strs, collapse = ""))
# cat(paste(part_strs, collapse = ""))
# cat(paste(evolve_strs, collapse = ""))
hdr <- readLines("indelible_header.txt")
hdr <- sapply(hdr, function(x) paste0(x, "\n"))
lines <- c(hdr, "\n\n", tree_strs, "\n\n", part_strs, "\n\n", evolve_strs)
out_fn <- paste0(dir, "control.txt")
writeLines(paste(lines, collapse = ""), out_fn)
})
# Now remove the environment and everything inside:
rm(env)
|
a0452007300c6d34a809f76617b234bf3447d513 | c5c37812cb4d710bee0ff3b95a92cb43cba074a1 | /combined_gsea_plot.R | 0c06350d49751b36c7a2c7d02804472dca41a673 | [] | no_license | almreis/PTSD_RNA_mods | 954f4872d3f3f44ba944ff2ac236b2c3dda2208b | 2c929000847c16b184f41753f6cf0bd1ffc4097a | refs/heads/main | 2023-04-16T22:01:07.695615 | 2022-06-30T07:29:03 | 2022-06-30T07:29:03 | 508,541,223 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 975 | r | combined_gsea_plot.R | library(data.table)
library(readxl)
library(reshape2)
male <- data.table(read_excel("/Users/andre/Documents/GenomeTech/PTSD_experiment/gsea_analysis_biological_process.xlsx",sheet = "Male"))
female <- data.table(read_excel("/Users/andre/Documents/GenomeTech/PTSD_experiment/gsea_analysis_biological_process.xlsx",sheet = "Female"))
all <- rbind(male[,.(Term,Adjusted.P.value,Sex="Male")],female[,.(Term,Adjusted.P.value,Sex="Female")])
all <- data.table(dcast(all,Term~Sex,value.var = "Adjusted.P.value"))
all[,Term:=tstrsplit(Term,"(",fixed=TRUE)[1]]
all[,Mean:=ifelse(Male,Male+Female)/2]
all <- all[order(Mean,Male,Female)]
top10 <- all[1:10]
top10 <- data.table(melt(top10[,.(Term,Male,Female)],id.vars = "Term",variable.name = "Sex",value.name = "Adjuste.P.Value"))
top10$Term <- factor(top10$Term,levels=rev(all[1:10,Term]))
ggplot(top10,aes(Sex,Term,fill=-log10(Adjuste.P.Value)))+
geom_tile()+
theme_bw()+
coord_equal()+
labs(x=NULL,y=NULL,fill="P-value")
|
cea313598caecb2b970993902f5e18c5207991e0 | 3080a9517c95a23265a3cb5f5eb477cf8eb4bfae | /WeekendGraphs.R | a1be58671dbefc0b1708bff34d85db7c085973a8 | [] | no_license | MGreels/RepData_PeerAssessment1 | a93584f219a4d60ba064dbb4b67252c945758a0e | 336768fc6c8b8195758bec463d70276c212863a3 | refs/heads/master | 2020-07-06T07:45:08.804073 | 2019-08-20T11:20:02 | 2019-08-20T11:20:02 | 202,944,756 | 0 | 0 | null | 2019-08-18T00:50:19 | 2019-08-18T00:50:18 | null | UTF-8 | R | false | false | 500 | r | WeekendGraphs.R | library(ggplot2)
library(chron)
ImpIAWE <- aggregate(ImpDat$steps,
list(Interval = ImpDat$interval,
weekend = is.weekend(as.Date(ImpDat$date))),
mean)
ImpIAWE$WE <- factor(ImpIAWE$weekend, labels = c("Weekday", "Weekend"))
WEplot <- ggplot(data=ImpIAWE, aes(x = Interval, y = x)) +
geom_bar(stat = "identity") +
facet_grid(WE~.,)
WEplot
## Find Maximum interval average
IntAves$Interval[which.max(IntAves$x)] |
8026ac7465b2f871ed5f0141a3121055e967883b | d1be2734671691af7f260664d9a546f69440886f | /ReadFCS.R | 59c07887ebd78d68ad80ef3ef1be1433489c59fb | [
"MIT"
] | permissive | nrweir/FCanalysis | b73d5f0193dba223d25930b0420aff172e300635 | 56704c053978c55c15693921ea4703f659990f51 | refs/heads/master | 2021-01-15T15:31:04.012290 | 2016-06-27T02:03:22 | 2016-06-27T02:03:22 | 40,327,807 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 540 | r | ReadFCS.R | ReadFCS <- function(path){
# Loads an FCS file as a data.frame with measurements as headers.
#
# Args:
# path: The file path containing the FCS file to be read.
#
# Returns:
# A data.frame with columns being the different measured parameters and
# rows being individual measured events.
require(flowCore)
sample_ff <- read.FCS(path) # returns initial flowFrame object to reshape
sample_df <- data.frame(exprs(sample_ff))
sample_df$sample_ID <- description(sample_ff)$'TUBE NAME'
return(sample_df)
} |
b4b9816c3271ae429bc205a34d5b87a9d72811b6 | a37a96762298b306925e151679d7ab660a773aa0 | /R/archive/test_refDmati.R | d43ed21e4380af8bc9778e0e3191ae3ccc5524a3 | [] | no_license | chrissuthy/telemetry-informed-cost | 59033894593518c0813deab3e59743cc769b018c | 390a46482b7a1f3a98637aafe63137386938742a | refs/heads/master | 2023-06-22T20:23:21.405508 | 2021-07-14T21:48:15 | 2021-07-14T21:48:15 | 277,119,694 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 805 | r | test_refDmati.R | test_sbar_cell <- extract(local_ss_r, sbar, cellnumbers=T)[1]
Dac <- costDistance(
tr1Corr,
c(sbar_x, sbar_y),
local_ss %>% select(x,y) %>% as.matrix())
table(Dmat_i[test_sbar_cell,] - Dac)
ref.Dmat_i <- function(from, to, local_ss_r, Dmat_i){
from_cell <- as.numeric(raster::extract(x = local_ss_r, y = from, cellnumbers=T)[,1])
to_cell <- as.numeric(raster::extract(x = local_ss_r, y = to, cellnumbers=T)[,1])
result <- matrix(Dmat_i[from_cell, to_cell], nrow = nrow(from))
return(result)
}
ref.Dmat_i(
from = rbind(sbar,
to = local_ss %>% select(x,y) %>% as.matrix(),
local_ss_r, Dmat_i)
ref.Dmat_i(
from = local_ss %>% filter(cell == s.grid[i-1]) %>% select(x,y) %>% as.matrix(),
to = local_ss %>% select(x,y) %>% as.matrix(),
local_ss_r,
Dmat_i
)
|
3549937db9052e64e61ec84124fd3f676bf46bef | b51dc0b6dfbcbdbc6db973ea26d5483ea6719671 | /R/estimate_Rs.R | 7de5453cccac9e0cc0927122e19fe4bc3c7c7e7b | [] | no_license | Bigger-Physics/EpiEstim | a46ca13e569d1eea8da487c041758ee7bd108c1d | 05ad4b17a9b08764f0af0c9e16280d3246848fd5 | refs/heads/master | 2022-04-26T22:34:03.967862 | 2020-04-28T11:41:39 | 2020-04-28T11:41:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,023 | r | estimate_Rs.R | #' Estimated Instantaneous Reproduction Numbers by distinguishing
#' imported, infected by imported, and infected by local cases
#'
#' \code{estimate_Rs} estimates the reproduction numbers of an epidemic, given
#' the incidence time series and the serial interval distribution. By default,
#' the incidences are divided into three groups: imported, infected by imported,
#' and infected by local cases. Therefore the function needs three time series
#' and estimates two Instantaneous Reproduction Numbers. In a more general
#' framework, the incidences are divided into five groups: imported from a
#' designated region, imported from other regions, infected by imported from the
#' designated region, infected by imported from other regoins, and infected by
#' local cases. Therefore the function needs five time series and estimates
#' three Instantaneous Reproduction Numbers.
#'
#' @param incid One of the following
#' \itemize{
#'
#' \item{A dataframe of non-negative integers with three columns, so that
#' \code{incid$D_im} contains the incidence of imported cases, \code{incid$I_im}
#' contains the incidence infected by the imported cases, and \code{incid$I_lc}
#' contains the incidence infected by the local cases. If the dataframe
#' contains a column \code{incid$dates}, this is used for plotting.
#' \code{incid$dates} must contains only dates in a row.}
#'
#' \item{A dataframe of non-negative integers with five columns, so that
#' \code{incid$D_im} contains the incidence imported from a designated region,
#' \code{incid$O_im} contains the incidence imported from a other regions,
#' \code{incid$I_imd} contains the incidence infected by the imported from the
#' designated region, \code{incid$I_imo} contains the incidence infected by the
#' imported from other regions, and \code{incid$I_lc} contains the incidence
#' infected by the local cases. If the dataframe contains a column
#' \code{incid$dates}, this is used for plotting. \code{incid$dates} must
#' contains only dates in a row.}
#'
#' \item{An object of class \code{\link{incidence}}}
#'
#' }
#'
#' @param method One of "non_parametric_si", "parametric_si", "uncertain_si",
#' "si_from_data" or "si_from_sample" (see details).
#'
#' @param si_sample For method "si_from_sample" ; a matrix where each column
#' gives one distribution of the serial interval to be explored (see details).
#'
#' @param si_data For method "si_from_data" ; the data on dates of symptoms of
#' pairs of infector/infected individuals to be used to estimate the serial
#' interval distribution (see details).
#'
#' @param config An object of class \code{estimate_R_config}, as returned by
#' function \code{make_config}.
#'
#' @return {
#' an object of class \code{estimate_Rs}, with components:
#' \itemize{
#'
#' \item{model}{: The model applied to the incidence data, one of "2-region",
#' "3-region"}
#'
#' \item{R_im}{: an object of class \code{estimate_R} (2-region model)}
#'
#' \item{R_imd}{: an object of class \code{estimate_R} (3-region model)}
#'
#' \item{R_imo}{: an object of class \code{estimate_R} (3-region model)}
#'
#' \item{R_lc}{: an object of class \code{estimate_R} (both models)}
#'
#' \item{method}{: the method used to estimate R, one of "non_parametric_si",
#' "parametric_si", "uncertain_si", "si_from_data" or "si_from_sample"}
#'
#' \item{si_distr}{: a vector or dataframe (depending on the method) containing
#' the discrete serial interval distribution(s) used for estimation}
#'
#' \item{SI.Moments}{: a vector or dataframe (depending on the method)
#' containing the mean and std of the discrete serial interval distribution(s)
#' used for estimation}
#'
#' \item{I}{: the time series of total incidence (sum of all the cases)}
#'
#' \item{D_im}{: the time series of incidence imported from a designated region
#' (in both models)}
#'
#' \item{O_im}{: the time series of incidence imported from a other regions
#' (3-region model)}
#'
#' \item{I_im}{: the time series of incidence infected by the imported cases
#' (2-region model)}
#'
#' \item{I_imd}{: the time series of incidence infected by the imported from the
#' designated region (3-region model)}
#'
#' \item{I_imo}{: the time series of incidence infected by the imported from the
#' other regions (2-region model)}
#'
#' \item{I_lc}{: the time series of incidence infected by the local cases
#' (in both model)}
#'
#' \item{dates}{: a vector of dates corresponding to the incidence time series}
#' }
#' }
#'
#' @details
#' This function extends the original \code{estimate_Rs} by introducing a
#' multi-region model on the incidence data. By default (two-region model), the
#' incidences are divided into three groups: imported, infected by imported,
#' and infected by local cases. Therefore the function needs three time series
#' and estimates two Instantaneous Reproduction Numbers. In a more general
#' framework (three-region model), the incidences are divided into five groups:
#' imported from a designated region, imported from other regions, infected by
#' imported from the designated region, infected by imported from other regoins,
#' and infected by local cases. Therefore the function needs five time series
#' and estimates three Instantaneous Reproduction Numbers.
#'
#' @seealso \code{\link{estimate_R}} \code{\link{discr_si}} \code{\link{make_config}}
#'
#' @author Jinshan Wu \email{jinshanw@bnu.edu.cn}
#'
#' @references {
#' Furthre improved EpiEstim by distinguishing imported,infected by imported,
#' infected by local cases and its application to COVID-19 in China.
#' Cori, A. et al. A new framework and software to estimate time-varying
#' reproduction numbers during epidemics (AJE 2013).
#' Wallinga, J. and P. Teunis. Different epidemic curves for severe acute
#' respiratory syndrome reveal similar impacts of control measures (AJE 2004).
#' Reich, N.G. et al. Estimating incubation period distributions with coarse
#' data (Statis. Med. 2009)
#' }
#'
#' @importFrom incidence incidence
#' @export
#' @examples
#' ##
# SI_MEAN = 8.4
# SI_STD = 3.8
# T <- 50
# R_im <- rep(6, T)
# R_lc <- rep(2, T)
# sim_data <- simulate_Is(R_im, R_lc,
# mean_im = 10,
# mean_si = SI_MEAN, std_si = SI_STD)
# incid <- data.frame(D_im = sim_data$D_im,
# I_im = sim_data$I_im,
# I_lc = sim_data$I_lc)
# result <- estimate_Rs(incid,
# method="parametric_si",
# config = make_config(list(
# mean_si = SI_MEAN,
# std_si = SI_STD)))
# plot(result)
#'
estimate_Rs <- function(incid,
method = c(
"non_parametric_si",
"parametric_si",
"uncertain_si",
"si_from_data",
"si_from_sample"
),
si_data = NULL,
si_sample = NULL,
config = make_config(incid = incid, method = method)) {
ret <- process_Is(incid)
incid <- ret$incid
model <- ret$model
if (model == "2-region") {
incid_im <- data.frame(
dates = incid$dates,
local = incid$I_im,
imported = incid$D_im
)
config$group <- "imported"
result_R_im <- estimate_R(
incid = incid_im,
method = method,
si_data = si_data,
si_sample = si_sample,
config = config
)
incid_lc <- data.frame(
dates = incid$dates,
local = incid$I_lc,
imported = incid$I_im
)
config$group <- "all"
result_R_lc <- estimate_R(
incid = incid_lc,
method = method,
si_data = si_data,
si_sample = si_sample,
config = config
)
results <- list(R_im = result_R_im)
results$R_lc = result_R_lc
results$model = model
results$method <- result_R_lc$method
results$si_distr <- result_R_lc$si_distr
results$SI.Moments <- result_R_lc$SI.Moments
results$dates <- result_R_lc$dates
results$I <- rowSums(incid[, c("D_im", "I_im", "I_lc")])
results$D_im <- incid$D_im
results$I_im <- incid$I_im
results$I_lc <- incid$I_lc
} else if (model == "3-region") {
incid_imd <- data.frame(
dates = incid$dates,
local = incid$I_imd,
imported = incid$D_im
)
config$group <- "imported"
result_R_imd <- estimate_R(
incid = incid_imd,
method = method,
si_data = si_data,
si_sample = si_sample,
config = config
)
incid_imo <- data.frame(
dates = incid$dates,
local = incid$I_imo,
imported = incid$O_im
)
config$group <- "imported"
result_R_imo <- estimate_R(
incid = incid_imo,
method = method,
si_data = si_data,
si_sample = si_sample,
config = config
)
incid_lc <- data.frame(
dates = incid$dates,
local = incid$I_lc,
imported = incid$I_imd + incid$I_imo
)
config$group <- "all"
result_R_lc <- estimate_R(
incid = incid_lc,
method = method,
si_data = si_data,
si_sample = si_sample,
config = config
)
results <- list(R_imd = result_R_imd)
results$R_imo = result_R_imd
results$R_lc = result_R_lc
results$model = model
results$method <- method
results$dates <- result_R_imd$dates
results$I <-
rowSums(incid[, c("D_im", "O_im", "I_imd", "I_imo", "I_lc")])
results$D_im <- incid$D_im
results$O_im <- incid$O_im
results$I_imd <- incid$I_imd
results$I_imo <- incid$I_imo
results$I_lc <- incid$I_lc
} else {
stop("model must be either '2-region' or '3-region'.")
}
class(results) <- "estimate_Rs"
return(results)
}
# check and process incidence data
process_Is <- function(incid) {
if (inherits(incid, "incidence")) {
I_inc <- incid
incid <- as.data.frame(I_inc)
incid$I <- rowSums(incidence::get_counts(I_inc))
}
if (!is.data.frame(incid)) {
stop("incid must be a dataframe.")
}
model = ""
if (all(c("D_im", "I_im", "I_lc") %in% names(incid))) {
model = "2-region"
} else if (all(c("D_im", "O_im", "I_imd", "I_imo", "I_lc") %in% names(incid))) {
model = "3-region"
} else {
stop(
"incid must be a dataframe with 3 columns called
'D_im', 'I_im' and 'I_lc', or with 5 columns called
'D_im', 'O_im', 'I_imd', 'I_imo', and 'I_lc'."
)
}
incid[which(is.na(incid))] <- 0
date_col <- names(incid) == "dates"
if (any(date_col)) {
if (any(incid[, !date_col] < 0)) {
stop("incid must contain only non negative integer values.")
}
} else {
if (any(incid < 0)) {
stop("incid must contain only non negative integer values.")
}
}
if (!is.null(incid$dates)) {
incid$dates <- check_dates(incid)
} else {
incid$dates <- as.numeric(seq_len(nrow(incid)))
}
return(list(incid = incid, model = model))
}
|
575aefeef85f7cb3a19df303b7aa0ba70db1d0be | 8f8eac85cfbf8d3bc768318848ec964cb297b1cb | /casen/r-script/scripts/2_fix_comunas_2.R | c5f53ee109079316e54aa87e6c572eb44a77c1a1 | [] | no_license | jnaudon/datachile-etl | 5231a3762dd32f3f3def4d568fc63934d603cf8b | 8fa577378d38f8d63f6dfdb00ed515bbb439f154 | refs/heads/master | 2023-03-23T00:36:35.698292 | 2019-03-23T03:30:16 | 2019-03-23T03:30:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,357 | r | 2_fix_comunas_2.R | nomatch_90_03 <- comunas90_03 %>%
left_join(comunas2, by = c("comu2" = "comuna_name")) %>%
filter(is.na(comuna_datachile_id)) %>%
arrange(comu2)
nomatch_06 <- comunas06 %>%
left_join(comunas2, by = c("comu2" = "comuna_name")) %>%
filter(is.na(comuna_datachile_id)) %>%
arrange(comu2)
nomatch_09 <- comunas09 %>%
left_join(comunas2, by = c("comu2" = "comuna_name")) %>%
filter(is.na(comuna_datachile_id)) %>%
arrange(comu2)
nomatch_11_15 <- comunas11_15 %>%
left_join(comunas2, by = c("comu2" = "comuna_name")) %>%
filter(is.na(comuna_datachile_id)) %>%
arrange(comu2)
###
match_90_03 <- nomatch_90_03 %>%
select(-comuna_datachile_id) %>%
mutate(comu3 = c("con cn", "coihaique", "nueva imperial", "llay llay", "los lamos",
"los ngeles", "marchige", "san francisco de mostazal", "aysn", "teodoro schmidt", "til-til")) %>%
left_join(comunas2, by = c("comu3" = "comuna_name"))
match_06 <- nomatch_06 %>%
select(-comuna_datachile_id) %>%
mutate(comu3 = c("aysn", "con cn", "coihaique", "calera", "los lamos", "los ngeles",
"marchige", "san francisco de mostazal", "teodoro schmidt", "til-til")) %>%
left_join(comunas2, by = c("comu3" = "comuna_name"))
match_09 <- nomatch_09 %>%
select(-comuna_datachile_id) %>%
mutate(comu3 = c( "con cn", "coihaique", "calera", "los lamos", "marchige",
"san francisco de mostazal", "ollague", "til-til")) %>%
left_join(comunas2, by = c("comu3" = "comuna_name"))
match_11_15 <- nomatch_11_15 %>%
select(-comuna_datachile_id) %>%
mutate(comu3 = c( "alto bo bo", "con cn", "coihaique", "llay llay", "marchige",
"san francisco de mostazal", "paihuano", "til-til", "trehuaco")) %>%
left_join(comunas2, by = c("comu3" = "comuna_name"))
###
comunas90_03 <- comunas90_03 %>%
left_join(match_90_03 %>% select(comu, comu3), by = "comu") %>%
mutate(comu4 = ifelse(is.na(comu3), comu2, comu3)) %>%
left_join(comunas2, by = c("comu4" = "comuna_name")) %>%
select(comu, comu4, comuna_datachile_id) %>%
rename(comuna_casen_id = comu, comuna_name = comu4)
comunas06 <- comunas06 %>%
left_join(match_06 %>% select(comu, comu3), by = "comu") %>%
mutate(comu4 = ifelse(is.na(comu3), comu2, comu3)) %>%
left_join(comunas2, by = c("comu4" = "comuna_name")) %>%
select(comu, comu4, comuna_datachile_id) %>%
rename(comuna_casen_id = comu, comuna_name = comu4)
comunas09 <- comunas09 %>%
left_join(match_09 %>% select(comu, comu3), by = "comu") %>%
mutate(comu4 = ifelse(is.na(comu3), comu2, comu3)) %>%
left_join(comunas2, by = c("comu4" = "comuna_name")) %>%
select(comu, comu4, comuna_datachile_id) %>%
rename(comuna_casen_id = comu, comuna_name = comu4)
comunas11_15 <- comunas11_15 %>%
left_join(match_11_15 %>% select(comu, comu3), by = "comu") %>%
mutate(comu4 = ifelse(is.na(comu3), comu2, comu3)) %>%
left_join(comunas2, by = c("comu4" = "comuna_name")) %>%
select(comu, comu4, comuna_datachile_id) %>%
rename(comuna_casen_id = comu, comuna_name = comu4)
try(dir.create("ids"))
fwrite(comunas90_03, "ids/comunas90_03.csv")
fwrite(comunas06, "ids/comunas06.csv")
fwrite(comunas09, "ids/comunas09.csv")
fwrite(comunas11_15, "ids/comunas11_15.csv")
rm(list = ls(pattern = "nomatch_"))
rm(list = ls(pattern = "match_"))
|
ba3ad78eda9c53375dcc075ebd6e9dba010a5a21 | c55a8c78158fe88ee80a8db35bdbaa2e324ed0bb | /cachematrix.R | 1653f10fab1db40f489b7efda4a2c4269641175a | [] | no_license | Mary-Bobo/ProgrammingAssignment2 | c3abe32812315d42172f23b4fef8be0bb9a21375 | 669581a054375ad355709009be97e13c7acd25ee | refs/heads/master | 2021-05-03T05:05:30.911383 | 2018-02-13T13:56:37 | 2018-02-13T13:56:37 | 120,631,851 | 0 | 0 | null | 2018-02-07T15:19:53 | 2018-02-07T15:19:52 | null | UTF-8 | R | false | false | 2,307 | r | cachematrix.R | ## These two functions together serve to store the inverse matrix in cache.
## The first function creates a "matrix" - a list of functions that serve to operate with matrix and with its invertion.
## The second function searches for inverse for a "matrix" specified by argument.
## If the inverse hasn't been calculated yet, it will be calculated and stored to cache.
## Function makeCacheMatrix takes a "matrix" as an argument. After that, the inv variable is set to NULL value,
## because the inversion hasn't been calculated yet.
## The function returns a list of 4 functions to operate with a matrix and its inverse
makeCacheMatrix <- function(x = matrix()) {
inv <- NULL
set <- function(y) {
x <<- y
inv <<- NULL
}
get <- function() x
setinverse <- function(inverse) inv <<- inverse
getinverse <- function() inv
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## Function cacheSolve takes a "matrix" (created by makeCacheMatrix function) and tries to set the value from cached data
## if this operation is succesfull (an inverse has already been calculated), than the message "getting cached data" is displayed
## and function returns an inverse matrix from cahce.
## In the opposite case, function checks if the input matrix is square. If it's not the case, message about mistake is displayed
#and NULL value is returned (and also saved in cache)
## if the matrix is square, the inverse matrix is calculated using solve() function, the result is saved in cache and also
## returned by the funcion.
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inv <- x$getinverse()
if(!is.null(inv)) {
message("getting cached data")
return(inv)
}
data <- x$get()
if (nrow(data) != ncol(data)){
message("impossible to calculate invertion of rectangular matrix")
inv <- NULL
return(inv)
}
else inv <- solve(data, ...)
x$setinverse(inv)
inv
}
|
71d85ae1257af62944ec55962202c8d0743e1245 | 2ddd0d8d4fc8f009fa51d0fde61d1d40825d4d2d | /man/glm_poisson.Rd | 2b23d935e741459df39de6d991385bafa22478bb | [
"MIT",
"CC-BY-4.0",
"CC0-1.0"
] | permissive | januz/comsldpsy-1 | 7029dbc5c0dcbe7893ca5ee67c59b1f5f81c0018 | fdf6248c53fdbc7cef4885fb630d5d188d8ff081 | refs/heads/master | 2020-04-10T21:36:55.722283 | 2018-12-12T17:56:54 | 2018-12-12T17:56:54 | 161,301,150 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 569 | rd | glm_poisson.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/stats.R
\name{glm_poisson}
\alias{glm_poisson}
\title{Poisson regression}
\usage{
glm_poisson(data)
}
\arguments{
\item{data}{Input data}
}
\value{
tbl. A tidy data frame with p-value, coefficients, and CIs for
the model
}
\description{
Computes a general linear model with logit link (poisson regression)
with number of SLDs as predictor and the number of psychopathological
areas as outcome variable.
}
\seealso{
\code{\link[stats:glm]{stats::glm()}} \code{\link[=tidy_glm]{tidy_glm()}}
}
|
39f89afaff9380106f1270f43fb7b1a5cd6681e4 | 19145b264472edab8eda2cc85cc93722e4729dee | /Data Manipulation in R_Naveen.R | bb6444b5451598e3950bebd57bcd8858a49c3867 | [] | no_license | geeknaveen/Session-5-Data-Manipulation-in-R | 1b1aa5a06ee6cb7eb2bc63819df69be63a0ec1a0 | 8d1c11e7d0cfd6e45c38ada771acd5fa955389de | refs/heads/master | 2021-01-16T00:03:26.597511 | 2017-08-10T18:17:20 | 2017-08-10T18:17:20 | 99,952,428 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,917 | r | Data Manipulation in R_Naveen.R | # Session 5: Data Manipulation in R::::::::
# Creating col_names array for creating variable names
col_names<-c("Athlete", "Age", "Country", "Year", "Closing Date", "Sport", "Gold Medals", "Silver Medals",
"Bronze Medals", "Total Medals");
View(col_names)
# colnames(olympicdata)
# Reading Olympic data file into R:::::::::
setwd("F:/ACADGILD/Business Analytics With R/ASSIGNMENTS")
olympicdata <-read.csv("olympic_data.csv",stringsAsFactors = F, col.names = col_names, sep = "\t", na.strings = "")
View(olympicdata)
# 1) Consider only those participants who have all the data points
new_olympic_data <- olympicdata[complete.cases(olympicdata), ]
View(new_olympic_data)
# 2) Rank the participants in terms : . Swimming . Table Tennis . Shooting . Gymnastics . Total
# Medal
install.packAges("dplyr")
library(dplyr)
# Ranking Swimming participants
swimming_rank <- aggregate(Total.Medals ~ Athlete + Sport ,data=new_olympic_data,sum) %>%
filter(Sport=="Swimming") %>% arrange(-Total.Medals) %>%
mutate(Rank = 1:length(Total.Medals))
View(swimming_rank)
# Ranking Table Tennis participants
TT_rank <- aggregate(Total.Medals ~ Athlete + Sport ,data=new_olympic_data,sum) %>%
filter(Sport=="Table Tennis") %>% arrange(-Total.Medals) %>%
mutate(Rank = 1:length(Total.Medals))
View(TT_rank)
# Ranking Shooting Participants
Shooting_rank <- aggregate(Total.Medals ~ Athlete + Sport ,data=new_olympic_data,sum) %>%
filter(Sport=="Shooting") %>% arrange %>%
mutate(Rank = 1:length(Total.Medals))
View(Shooting_rank)
# Ranking Gymnastics Participants
Gymnastics_rank <- aggregate(Total.Medals ~ Athlete + Sport ,data=new_olympic_data,sum) %>%
filter(Sport=="Gymnastics") %>% arrange(-Total.Medals) %>%
mutate(Rank = 1:length(Total.Medals))
View(Gymnastics_rank)
# Ranking all participants in terms of total medals.
Total_medals_rank <- aggregate(Total.Medals ~ Athlete ,data=new_olympic_data,sum) %>%
arrange(-Total.Medals) %>%
mutate(Rank = 1:length(Total.Medals))
View(Total_medals_rank)
# 3) Rank the Categories in terms of Age.(Higher the Age,Higher the Rank)
quest_5.3<- newdata %>% select (Sport, Age) %>%
group_by(Sport) %>%
filter(Age == max(Age)) %>%
distinct() %>%
summarise(Age = max(Age)) %>%
arrange (desc(Age)) %>%
mutate(Rank = row_number(desc(Age)))
View(quest_5.3)
# 4) Identify Year wise top participants in terms of : . Swimming . Table Tennis . Shooting
# .Gymnastics . Total Medal
# Identifying Year wise top participants in terms of Swimming
Swimming_year <- aggregate(Total.Medals ~ Athlete+Sport+Year,data = new_olympic_data,max) %>%
filter(Sport=="Swimming") %>%
arrange(Year,-Total.Medals) %>% group_by(Year)
View(Swimming_year)
# Identifying Year wise top participants in terms of Table Tennis
TT_year <- aggregate(Total.Medals ~ Athlete+Sport+Year,data = new_olympic_data,max) %>%
filter(Sport=="Table Tennis") %>%
arrange(Year,-Total.Medals) %>% group_by(Year)
View(TT_year)
# Identifying Year wise top participants in terms of Shooting
Shooting_year <- aggregate(Total.Medals ~ Athlete+Sport+Year,data = new_olympic_data,max) %>%
filter(Sport=="Shooting") %>%
arrange(Year,-Total.Medals) %>% group_by(Year)
View(Shooting_year)
# Identifying Year wise top participants in terms of Gymnastics
Gymnastics_year<- aggregate(Total.Medals ~ Athlete+Sport+Year,data = new_olympic_data,max) %>%
filter(Sport=="Gymnastics") %>%
arrange(Year,-Total.Medals) %>% group_by(Year)
View(Gymnastics_year)
# Identifying Year wise top participants in terms of Total Medal
totalmedals_year<- aggregate(Total.Medals ~ Athlete+Year,data = new_olympic_data,sum) %>%
arrange(Year,-Total.Medals) %>% group_by(Year)
View(totalmedals_year)
|
d1e5e7e7383956195d8664a65e9cd07b8308323c | d70c3584eef021e768cf745bee36e38724b9a292 | /man/OneWaySurvfitObject.Rd | e156e2583e9c7518d07435cc6bf117c95f2a4899 | [] | no_license | cran/TwoWaySurvival | 4b0aa66db91345c382a9ee5460c1f147cf1c6ebc | f99d15e9cb9eaf9829ce2822821a51b13fe1b5cf | refs/heads/master | 2020-04-20T18:37:26.606415 | 2007-11-26T00:00:00 | 2007-11-26T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,298 | rd | OneWaySurvfitObject.Rd | \name{OneWaySurvfitObject}
\alias{OneWaySurvfitObject}
\title{Fitted one-dimensional Object}
\description{An object of class 'OneWaySurvfit' returned as a list by the 'OneWaySurvfitCreate' function
and representing a fitted one-way hazard model with varying coefficients. It has methods for generic functions print, plot and summary.
}
\value{
\item{fix.coef}{named vector of not penalized parameters of the model. It has the value 'NULL' if B-spline bases are chosen for modelling.}
\item{random.coef}{named vector of penalized parameters of the model.}
\item{penalty}{named vector of penalty values for the random parts of the model.}
\item{var.fix}{estimated variances of not penalized parameters of the model. It has the value 'NULL' if B-spline bases are chosen for modelling.}
\item{var.random}{estimated variances of the predicted values of (penalized) random components of the model.}
\item{log.lik.margin.start}{initial value of the marginal log-likelihood.}
\item{log.lik.margin}{the value of the marginal log-likelihood at optimum.}
\item{df}{estimated degrees of freedom for the parts of the model.}
\item{df.total}{total or summed estimated degrees of freedom.}
\item{niter.epoch}{number of outer loops in optimization routine.}
\item{varying.frame}{data frame with named columns, according to the names of the varying coefficients.
It is needed for plotting of the smooth components.}
\item{deviation.frame}{data frame with named columns, according to the names of the varying coefficients.
It is needed for plotting of the confidence bands of the smooth components.}
\item{grid.frame}{data frame with just one column, for survival time, needed for plotting issues.}
\item{p}{number of covariates; for factors including their categories (excluding reference category).}
\item{factor.names}{covariate names; for factors the names of categories (excluding reference category).}
}
\references{Kauerman G. (2005). Penalised Spline Fitting in Multivariable Survival Models with Varying Coefficients
\emph{Computational Statistics and Data Analysis}, 49, 169-186.}
\author{Pavel Khomski <pkhomski@wiwi.uni-bielefeld.de>}
\seealso{\code{\link{OneWaySurv}}, \code{\link{OneWaySurvfitCreate}}}
\keyword{misc}
|
1c4b354edb53fd307eb994edac9d56e3afade318 | 70b77e98bd3c4aaf7dd1d41f1ee027f09f568f90 | /man/prsMultitest.Rd | d0bc345de3722e915adef4d619cbeaacf9771e98 | [] | no_license | andreyshabalin/simPRS | ecc3c590dafa54a075bb93a3def59ea6a2b43ae1 | 8992d040b926e98cb2ac9cfd327cb2fc9b0532ed | refs/heads/master | 2020-03-31T23:10:57.423984 | 2019-02-19T16:45:03 | 2019-02-19T16:45:03 | 152,645,353 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,770 | rd | prsMultitest.Rd | \name{prsMultitest}
\alias{prsMultitest}
\title{
Average Over Multiple PRS Simulations
}
\description{
Perform a number of PRS simulations
with \code{\link{gwasFast}} and \code{\link{prsInf}}
and average over the results.
Supports parallelization on multicore CPUs.
}
\usage{
prsMultitest(signal, N, Nsim, nthreads = 0, minpv = 1e-20)
}
\arguments{
\item{signal}{
Vector of effect sizes for all SNPs. \cr
Can be generated with \code{\link{genSignal}}.
}
\item{N}{
GWAS sample size.
}
\item{Nsim}{
Number of simulations to perform.
}
\item{nthreads}{
Number of parallel threads to use. \cr
Zero (default) set it to the number of CPU cores. \cr
Setting \code{nthreads = 1} disables multithreading.
}
\item{minpv}{
The simulated PRS performance is recorded on the interval
from \code{minpv} to 1.
}
}
\details{
The function uses \code{parallel} package
for multithreaded calculations.
}
\value{
The function returns the vector of p-value thresholds
and corresponding average
asymptotic correlations of
the phenotype with PRS (\code{r}).
}
\author{
Andrey A Shabalin \email{andrey.shabalin@gmail.com}
}
%\seealso{
% See \code{\link[base]{order}}.
%}
\examples{
NTotalSNPs = 10000
NSignalSnps = 100
heritability = 0.2
signalDistr = "Same"
Ntrain = 10000
Ntest = 3000
signal = genSignal(
NSignalSnps = NSignalSnps,
NTotalSNPs = NTotalSNPs,
heritability = heritability,
signalDistr = signalDistr)
prsA = prsMultitest(signal = signal, N = Ntrain, Nsim = 30, nthreads = 1)
rci = rConfInt(r = prsA$r, N = Ntest)
prsPlot(pv = prsA$pv, r = prsA$r, rci)
}
|
07a0138bc864f65aee915b4906c37ce8f98d0f64 | 3120c1ba511efb1181f4a145d41d1a433f7e2de7 | /NextWord Predictor/ui.R | 9136479409eddfc4d92d188f2c0ef3f1cc179c9e | [] | no_license | mehtashubham/Data-Science-Specialisation---John-Hopkins | 38018b9523bae095a4eb51990084f9d4077324a3 | 2f9416f7977fe3e6d2dc48c8e7749144b4ebf18d | refs/heads/master | 2021-06-17T14:12:17.589366 | 2021-05-11T18:42:35 | 2021-05-11T18:42:35 | 205,664,674 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,683 | r | ui.R | ################# ~~~~~~~~~~~~~~~~~~~~~~~~~~ ######## ~~~~~~~~~~~~~~~~~~~~~~~~~~ #################
## ##
## Data Science Capstone Project ##
## ##
## Shubham Mehta ##
## ##
## Github Repo:https://github.com/mehtashubham/Data-Science-Specialisation---John-Hopkins ##
## ##
################# ~~~~~~~~~~~~~~~~~~~~~~~~~~ ######## ~~~~~~~~~~~~~~~~~~~~~~~~~~ #################
suppressPackageStartupMessages(c(
library(shinythemes),
library(shiny),
library(tm),
library(stringr),
library(markdown),
library(stylo)))
shinyUI(navbarPage("Coursera Data Science Capstone",
theme = shinytheme("flatly"),
############################### ~~~~~~~~1~~~~~~~~ ##############################
## Tab 1 - Prediction
tabPanel("Next Word Prediction",
tags$head(includeScript("./js/ga-shinyapps-io.js")),
fluidRow(
column(3),
column(6,
tags$div(textInput("text",
label = h3("Enter your text here:"),
value = ),
tags$span(style="color:grey",("Only English words are supported.")),
br(),
tags$hr(),
h4("The predicted next word:"),
tags$span(style="color:green",
tags$strong(tags$h3(textOutput("predictedWord")))),
br(),
tags$hr(),
h4("What you have entered:"),
tags$em(tags$h4(textOutput("enteredWords"))),
align="center")
),
column(3)
)
),
############################### ~~~~~~~~2~~~~~~~~ ##############################
## Tab 2 - About
tabPanel("About This Application",
fluidRow(
column(2,
p("")),
column(8,
includeMarkdown("./about/about.md")),
column(2,
p(""))
)
),
############################### ~~~~~~~~F~~~~~~~~ ##############################
## Footer
tags$hr(),
tags$br(),
tags$span(style="color:#808080",
tags$footer(("Created in 2017 by "),
tags$a(
href="http://www.linkedin.com/",
target="_blank",
"Shubham Mehta."),
tags$br(),
("Built with"), tags$a(
href="http://www.r-project.org/",
target="_blank",
"R"),
("&"), tags$a(
href="http://shiny.rstudio.com",
target="_blank",
"Shiny."),
align = "center"),
tags$head(tags$style(HTML("a {color: navy}"))),
tags$br()
)
)
)
|
410a53bcd67447ccb31287becd4a9083a6aacd27 | 2fe3f287057a6d92d9c342a0632e3864c43aa49f | /plot3.R | 329a22597a3fc3ba04660439a8fe9ec0951a8363 | [] | no_license | udi-volk/ExData_Plotting1 | 80eee4a8af0a6e8fa51d20ec0756359a0dffdfce | 8ecf04136cd09714498a7c01c51714b74a27e8b7 | refs/heads/master | 2021-01-22T04:56:57.879138 | 2014-11-09T20:15:24 | 2014-11-09T20:15:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 843 | r | plot3.R | plot3<-function(){
## Reads Data and filters the relavant lines from the data table
png("plot3.png")
dt=fread("household_power_consumption.txt",na.strings="?")
dt$Date=as.Date(dt$Date,format="%d/%m/%Y")
relevantDt=dt[dt$Date>=as.Date("2007-02-01"),]
relevantDt=relevantDt[relevantDt$Date<=as.Date("2007-02-02"),]
times = strptime(paste(as.character(relevantDt$Date),relevantDt$Time),format ="%Y-%m-%d %H:%M:%S")
plot(times,as.numeric(relevantDt$Sub_metering_1),type="n",ylab="Energy sub metering",xlab="")
lines(times,as.numeric(relevantDt$Sub_metering_1),col="black")
lines(times,as.numeric(relevantDt$Sub_metering_2),col="red")
lines(times,as.numeric(relevantDt$Sub_metering_3),col="blue")
legend("topright",lty=1,col=c("black","red","blue"),legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
} |
be798bded1cf3b848463cd70669471ea1e8b169b | bb4d685579a3a250eb5da6f9db4b7ffa50fd1b63 | /R/flipstrand.R | 79d8a7a20df4af84d164f8535342f576cae5843b | [] | no_license | drveera/predixcantools | 8b89ed682ace2613dec8f3e1fccc1298176a25d7 | ff01e7516b5884314f2d1cff57fd56e5976471d1 | refs/heads/master | 2021-06-09T05:36:16.218892 | 2016-12-21T12:04:14 | 2016-12-21T12:04:14 | 76,954,201 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 229 | r | flipstrand.R | #' flipstrand
#'
#' flipstrand
#'
#' @param x a vector of alleles
#'
#' @export
flipstrand <- function(x){
if(x == 'A'){return('T')}
if(x == 'T'){return('A')}
if(x == 'G'){return('C')}
if(x == 'C'){return('G')}
}
|
dc9d55f8d0fa5094f070c874849288d184e08fab | 1026b81a7ce98e2cb3ef70543d136d0b532877bb | /3.GettingAndCleaningData/Get-Clean-Data.R | 25099efc9481332b95734ce974cca74f14dc46af | [] | no_license | Fbarangan/Data-Science | 5cf4e00251fa7610e4c1253a7701354ea4e19c14 | ef20283c43e571b9b3c219f73a44015f84f190e5 | refs/heads/master | 2021-01-10T17:15:27.806805 | 2016-02-13T23:45:50 | 2016-02-13T23:45:50 | 51,669,591 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,047 | r | Get-Clean-Data.R | # Subsetting and Sorting Lecture
set.seed(13435)
X <- data.frame("var1" = sample(1:5), "var2" = sample(6:10), "var3"= sample(11:15))
X <- X[sample(1:5),] ; X$var2[c(1:3)] = NA
X
# Run to see result
X[,1]
X[,"var1"]
X[1:2, "var2"]
#Logical ands and or
X[X$var1 <= 3 & X$var3 > 11,]
X[X$var1 <=3 | X$var3> 15,]
# Dealing with missing Values. Will not return the NA's
X[which(X$var 2>8),]
# Sorting
sort(X$var1)
sort(X$var1, decreasing = TRUE)
# will sort and put NA last
sort(X$var2, na.last = TRUE)
# Ordering(also multiple) the first column close to the "order functon will be ordered first
X[order(X$var1, X$var3) ,]
# Ordering with plyr
library(plyr)
arrange(X, var1)
# Add desc function
arrange(X, desc(var1))
# Adding rows and column
X$var4 = rnorm(5)
# By using column bind cbind when X is place on the left, then you see it in left and vice-versa
Y <- cbind(X, rnorm(5))
Y <- cbind(rnorm(5), X)
#
|
e18acb6f7c7ac84ae6b5b517037ae48ea74b558b | 45cbaf5a86a811772d06da092df37229bbcc6af2 | /Filtrando sinais biomédicos.R | 437f068e403093111842c07c0bbc03fee369f3d8 | [] | no_license | kerolaynesilva/PSB | 48038a90ca5f2ecca9d2aa20f0d69789b9331b2a | 068faf55782a67b8539c63e8a81bc48da029c7c4 | refs/heads/master | 2020-05-01T12:34:22.125419 | 2019-03-24T21:11:14 | 2019-03-24T21:11:14 | 171,525,244 | 0 | 0 | null | null | null | null | ISO-8859-1 | R | false | false | 6,129 | r | Filtrando sinais biomédicos.R |
library(dygraphs)#biblioteca para plotagem de séries temporais (gráfico interativo)
library(signal)
library(ggplot2)
library(REdaS)
# Definição do diretório de trabalho
getwd() # get current working directory
setwd("C:/Users/samsung/Documents/Universidade/PET EngBiomédica/Workshop PET/Programas/")
# Lendo o arquivo texto na forma de uma tabela
df1 <- read.table("a1.txt",header = TRUE, sep = " ", skip = 6)
# alteração do nome das variáveis do dataframe
names(df1) <- c("AccX", "AccY", "MuscAnterior","MuscPosterior")
#criando o vetor de tempo
fs <- 500 #Hz
# Definição intervalo entre as amostras em segundos
dt <- 1/fs
t <- seq(from=0, to = dt*(length(df1$AccX)-1), by=dt)
df1 <- cbind(t,df1)
dygraph(df1[c("t","MuscAnterior")]) %>% dyRangeSelector()
dygraph(df1[c("t","MuscPosterior")]) %>% dyRangeSelector()
dfpost <- data.frame(time=df1$t, MuscPosterior=df1$MuscPosterior)
dfant <- data.frame(time=df1$t, MuscAnterior=df1$MuscAnterior)
#Letra A:-----------------------------------
#Filtro Butterworth---------------------------------------------------------------------------------------------
Fs <- 500 # frequência de amostragem em Hz
#Primeiro filtro: passa alta para atenuar frequências abaixo de 0.2 Hz---------------------------
nh <- 3 # ordem do filtro
Fc_high <- 0.2 # frequência de corte em Hz
Wh <- Fc_high/(Fs/2)
bt_high <- butter(nh, Wh, type = "high") # cálculo dos coeficientes do fitro
freqz(filt = bt_high, Fs = Fs)
# fazendo a filtragem por meio do uso da função filtfilt
y_filt_high <- filtfilt(filt = bt_high, dfpost$MuscPosterior)
y_filt_high2 <- filtfilt(filt = bt_high, dfant$MuscAnterior)
dfplot1 <- data.frame(time = t, dfpost$MuscPosterior, yf = y_filt_high)
dygraph(dfplot1)
dfplot2 <- data.frame(time = t, dfant$MuscAnterior, yf = y_filt_high2)
dygraph(dfplot2)
#Segundo filtro: rejeita faixa para atenuar 60 Hz da rede elétrica---------------------------------
nr <- 4
Fc_l <- 59 # frequência de corte inferior em Hz
Fc_u <- 61 # frequência de corte superior em Hz
Wrej <- c(Fc_l, Fc_u)/(Fs/2)
bt_rej <- butter(nr, Wrej, type = "stop") # cálculo dos coeficientes do fitro
freqz(filt = bt_rej, Fs = Fs)
# fazendo a filtragem por meio do uso da função filtfilt
y_filt_rej <- filtfilt(filt = bt_rej, dfplot1$yf)
y_filt_rej2 <- filtfilt(filt = bt_rej, dfplot2$yf)
dfplot3 <- data.frame(time = t, dfplot1$yf, yf = y_filt_rej)
dygraph(dfplot3)
dfplot4 <- data.frame(time = t, dfplot2$yf, yf = y_filt_rej2)
dygraph(dfplot4)
#Retificando o sinal antes de passar pelo filtro passa baixa
dfplot3 <- abs(dfplot3)
dygraph(dfplot3)
dfplot4 <- abs(dfplot4)
dygraph(dfplot4)
#Terceiro filtro: passa baixa para atenuar frequências abaixo de 2 Hz---------------------------
nl <- 4 # ordem do filtro
Fc_low <- 2 # frequência de corte em Hz
Wl <- Fc_low/(Fs/2)
bt_low <- butter(nl, Wl, type = "low") # cálculo dos coeficientes do fitro
freqz(filt = bt_low, Fs = Fs)
# fazendo a filtragem por meio do uso da função filtfilt
y_filt_low <- filtfilt(filt = bt_low, dfplot3$yf)
y_filt_low2 <- filtfilt(filt = bt_low, dfplot4$yf)
dfplot5 <- data.frame(time = t, dfplot3$yf, yf = y_filt_low)
dygraph(dfplot5)
dfplot6 <- data.frame(time = t, dfplot4$yf, yf = y_filt_low2)
dygraph(dfplot6)
#Plotando a detecção de envoltório
dygraph(data.frame(time=dfplot5$time, envoltorio=dfplot5$yf, df1$MuscPosterior))
dygraph(data.frame(time=dfplot6$time, envoltorio=dfplot6$yf, dfant$MuscAnterior))
dygraph(data.frame(time=dfplot6$time, envoltorio=dfplot6$yf, dfplot2$yf))
#Filtro Chebyshev---------------------------------------------------------------------------------------------
Fs <- 500 # frequência de amostragem em Hz
#Primeiro filtro: passa alta para atenuar frequências abaixo de 0.2 Hz---------------------------
nc <- 3 # ordem do filtro
Fc <- 0.2 # frequência de corte em Hz
W <- Fc/(Fs/2)
Rp <- 0.5
cheb <- cheby1(nc, Rp, W, type = "high") # cálculo dos coeficientes do fitro
freqz(filt = cheb, Fs = Fs)
# fazendo a filtragem por meio do uso da função filtfilt
y_filt_high_cheb <- filtfilt(filt = cheb, dfpost$MuscPosterior)
y_filt_high_cheb2 <- filtfilt(filt = cheb, dfant$MuscAnterior)
dfplot5 <- data.frame(time = t, dfpost$MuscPosterior, yf = y_filt_high_cheb)
dygraph(dfplot5)
dfplot6 <- data.frame(time = t, dfant$MuscAnterior, yf = y_filt_high_cheb2)
dygraph(dfplot6)
#Segundo filtro: rejeita faixa para atenuar 60 Hz da rede elétrica---------------------------------
Ws <- c(Fc_l, Fc_u)/(Fs/2)
cheb_stop <- cheby1(nc, Rp, Ws, type = "stop") # cálculo dos coeficientes do fitro
freqz(filt = cheb_stop, Fs = Fs)
# fazendo a filtragem por meio do uso da função filtfilt
y_filt_cheb_stop <- filtfilt(filt = cheb_stop, dfplot5$yf)
y_filt_cheb_stop2 <- filtfilt(filt = cheb_stop, dfplot6$yf)
dfplot7 <- data.frame(time = t, dfplot5$yf, yf = y_filt_cheb_stop)
dygraph(dfplot7)
dfplot8 <- data.frame(time = t, dfplot6$yf, yf = y_filt_cheb_stop2)
dygraph(dfplot8)
#Retificando o sinal antes de passar pelo filtro passa baixa
dfplot7 <- abs(dfplot7)
dygraph(dfplot7)
dfplot8 <- abs(dfplot8)
dygraph(dfplot8)
#Terceiro filtro: passa baixa para atenuar frequências abaixo de 2 Hz---------------------------
Fc_low <- 2 # frequência de corte em Hz
Wcl <- Fc_low/(Fs/2)
Rp <- 0.5
cheb_low <- cheby1(nc, Rp, Wcl, type = "low") # cálculo dos coeficientes do fitro
freqz(filt = cheb_low, Fs = Fs)
# fazendo a filtragem por meio do uso da função filtfilt
y_filt_low_cheb <- filtfilt(filt = cheb_low, dfplot7$yf)
y_filt_low_cheb2 <- filtfilt(filt = cheb_low, dfplot8$yf)
dfplot9 <- data.frame(time = t, dfplot7$yf, yf = y_filt_low_cheb)
dygraph(dfplot9)
dfplot10 <- data.frame(time = t, dfplot8$yf, yf = y_filt_low_cheb2)
dygraph(dfplot10)
#Plotando a detecção de envoltório
dygraph(data.frame(time=dfplot9$time, envoltorio=dfplot9$yf, df1$MuscPosterior))
dygraph(data.frame(time=dfplot10$time, envoltorio=dfplot10$yf, dfant$MuscAnterior))
|
f997cb5133360ed905a9ce634d43fc4bc88be92e | 7764cb88a1bd855e85f95e9dccd11fa218781a37 | /tests/testthat/test-tttable.R | 23d6ca644b87905e17769fbd353b15815b309873 | [
"GPL-3.0-or-later",
"LicenseRef-scancode-warranty-disclaimer",
"LGPL-2.1-or-later",
"GPL-1.0-or-later",
"LGPL-2.0-or-later",
"GPL-3.0-only",
"AGPL-3.0-or-later",
"LicenseRef-scancode-other-copyleft",
"LicenseRef-scancode-unknown-license-reference",
"CC-BY-4.0"
] | permissive | jalsalam/tttable | b9c750c2d3a6205ae0df0055b2a6a2068c16f329 | 942407c79807b242518090e3fbe4e48cbd38f1e8 | refs/heads/master | 2020-03-22T14:45:06.621032 | 2018-07-21T17:23:45 | 2018-07-21T17:23:45 | 140,202,724 | 0 | 0 | CC-BY-4.0 | 2018-07-08T21:07:10 | 2018-07-08T21:07:09 | null | UTF-8 | R | false | false | 152 | r | test-tttable.R | context("test-tttable.R")
test_that("basic tttable construction and printing 'works' ", {
t1 <- expect_silent(tttable(toy, arr("V1", "V2")))
t1
})
|
0d44b9725b1a0b684e8034cf16a6f8c0929d8392 | 6664dde83396ead890728581a4ff4624ea762f5a | /ncdf4/man/ncdim_def.Rd | fa8d86fe932dd422efaec0f73697c7918324ad3f | [] | no_license | pmjherman/r-ncdf4-build-opendap-windows | c12834081dcb2db2ea127b47716e25c3a3e2abd2 | aa451e9ecb34c072bde96ad3f73d405280dfdd01 | refs/heads/master | 2022-01-22T00:56:39.183344 | 2019-07-15T07:21:05 | 2019-07-15T07:21:05 | 125,648,978 | 5 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,977 | rd | ncdim_def.Rd | \name{ncdim_def}
\alias{ncdim_def}
\title{Define a netCDF Dimension}
\description{
Defines a netCDF dimension. This dimension initially only exists in memory.
The dimension is later added to a netCDF variable using a call to \code{ncvar_def()},
and written to disk using \code{nc_create()}.
}
\usage{
ncdim_def( name, units, vals, unlim=FALSE,
create_dimvar=TRUE, calendar=NA, longname=name )
}
\arguments{
\item{name}{Name of the dimension to be created (character string).
The dimension name can
optionally have forward slashes in it, in which case the dimension will be defined
in the indicated group. For example, a dimension
named \code{model3/run1/Longitude} will define a group named \code{model3}, with a
subgroup named \code{run1}, which will hold a dimension named \code{Longitude}. Using
groups forces a netcdf version 4 file to be written. Note that older software might not
be able to read netcdf version 4 files.}
\item{units}{The dimension's units (character string).}
\item{vals}{The dimension's values (vector of numeric type). If integers are passed, the associated dimensional variable will be integer type; otherwise, it will be double precision.}
\item{unlim}{If TRUE, this dimension is unlimited. Unlimited dimensions are convenient for storing, for example, data that extends over time;
the time dimension can be made unlimited, and extended as needed. Or, an unlimited dimension could be the number of stations, and extended
as more stations come on-line. Note that in netCDF version 4, multiple dimensions can be unlimited. In netCDF version 3, there could
only be one unlimited dimension, typically the time dimension.}
\item{create_dimvar}{If TRUE, a dimensional variable (aka coordinate variable) will be created for this dimension. Note: if this is set to FALSE, then 'units' must be an empty string. It is good practice to always leave this as TRUE.}
\item{calendar}{If set, the specified string will be added as an attribute named "calendar" to the dimension variable. Used almost exclusively with unlimited time dimensions. Useful values include "standard" (or "gregorian"), "noleap" (or "365_day"), and "360_day"). }
\item{longname}{If set, AND create_dimvar is TRUE,
then the created dimvar will have a long_name attribute with this value.}
}
\value{
An object of class \code{ncdim4} that can later be passed to
\code{ncvar_def()}.
}
\references{
http://dwpierce.com/software
}
\details{
This routine creates a netCDF dimension in memory. The created dimension can then
later be passed to the routine \code{ncvar_def()} when defining a variable.
Note that this interface to the netCDF library by default includes that more than the
minimum required by the netCDF standard. I.e., the netCDF standard allows
dimensions with no units or values. This call encourages creating dimensions
that have units and values,
as it is useful to ensure that all dimensions have units and
values, and considerably easier to include them in this call than it is
to add them later. The units and values are implemented through "dimensional
variables," which are variables with the same name as the dimension. By default, these
dimensional variables are created automatically -- there is no need for the
user to create them explicitly. Dimensional variables are standard practice
in netCDF files. To suppress the creation of the dimensional variable for
the dimension, set passed parameter create_dimvar to FALSE. As a
check, if create_dimvar is FALSE, you must ALSO pass an empty string ('') as the unit, and the values
must be simple integers from 1 to the length of the dimension (e.g., 1:10 to
make a dimension of length 10).
This empahsizes that without a dimensional variable, a netCDF file cannot
store a dimension's units or values.
The dimensional variable is usually created as a double precision floating
point. The other possibility is to pass integer values (using \code{as.integer},
for example), in which case the dimensional variable with be integer.
The return value of this function is an object of class \code{ncdim4}, which
describes the newly created dimension.
The \code{ncdim} object is used for more than just creating a new
dimension, however.
When opening an existing file, function \code{\link[ncdf4]{nc_open}} returns a
\code{ncdf4} class object, which itself has a list of \code{ncdim} objects
that describe all the dimensions in that existing file.
The \code{ncdim} object has the following fields, which are all read only:
1) name, which is a character string containing the name of the dimension;
2) units, which is a character string containing the units for the dimension,
if there are any (technically speaking, this is the "units" attribute of the
associated coordinate variable); 3) vals, which is a vector containing the
dimension's values (i.e., the values of the associated coordinate variable,
or, if there is none, an integer sequence from 1 to the length of the dimension);
3) len, which is the length of this dimension; 4) unlim, which is a boolean
indicating whether or not this is an unlimited dimension; 5) (optional) calendar,
which is set if and only if the on-disk dimvar had an attribute named
"calendar" (in which case, it is set to the value of that attribute).
}
\author{David W. Pierce \email{dpierce@ucsd.edu}}
\seealso{
\code{\link[ncdf4]{ncvar_def}}, \code{\link[ncdf4]{nc_create}}
}
\examples{
# Define some straightforward dimensions
x <- ncdim_def( "Lon", "degreesE", 0.5:359.5)
y <- ncdim_def( "Lat", "degreesN", as.double(-89:89))
t <- ncdim_def( "Time", "days since 1900-01-01", 1:10, unlim=TRUE)
# Make a variable with those dimensions. Note order: time is LAST
salinity <- ncvar_def("Salinity", "ppt", list(x,y,t), 1.e30 )
# Create a netCDF file with this variable
ncnew <- nc_create( "salinity.nc", salinity )
nc_close(ncnew)
# Now, illustrate some manipulations of the ncdim object.
filename <- "salinity.nc"
nc <- nc_open( filename )
print(paste("File",filename,"contains",nc$ndims,"dimensions"))
for( i in 1:nc$ndims ) {
print(paste("Here is information about dimension number",i,":"))
d <- nc$dim[[i]]
print(paste(" Name :",d$name))
print(paste(" Units :",d$units))
print(paste(" Length:",d$len))
print(" Values:")
print(d$vals)
print(paste(" Unlimited:",d$unlim))
}
}
\note{It is good practice, but not necessary, to pass the dimension's values
to this routine when the dimension is created. It is also possible to
write them later with a call to 'ncvar_put', using as the dimension
name as the 'varid' in the call. This is useful when creating large variables
with long unlimited dimensions; it can take a long time to write out the
unlimited dimension's values. In this case, it can be more efficient to
step through the file, writing one timestep at a time, and write that
timestep's dimensional value at the same time.
}
\keyword{utilities}
|
c2e47355dd814bf180a288a7fa9db2db322bf7d9 | 3af4cf29ebe0b92f6096d2b292ac2f72fbfc3249 | /R/explore_distance_matrix.R | 0c2b9c6413441645aaa6a0f67a2b7a7eb4e2cfcd | [] | no_license | hapebe/Partitioning2D | 64100c58c2680cb644beb49c354f24c4f7087f27 | ff9d5df39de7bf6268e943bb9b20589bc86fb9e2 | refs/heads/main | 2023-06-01T22:46:14.984971 | 2021-06-20T16:39:10 | 2021-06-20T16:39:10 | 347,616,507 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,605 | r | explore_distance_matrix.R | datafilepath <- "D:/home/hapebe/self-made/coding/Partitioning2D/Data/"
# subject <- "11-square ODD"
# subject <- "30-square ODD"
subject <- "30-square EUC"
# subject <- "HP-Test3 ODD"
# subject <- "HP-Test3 EUC"
# df <- read.csv(paste0(datafilepath, "11-square-oddDistances.txt"), header=TRUE, sep="\t", na="")
# df <- read.csv(paste0(datafilepath, "30-square-oddDistances.txt"), header=TRUE, sep="\t", na="")
df <- read.csv(paste0(datafilepath, "30-square-eucDistances.txt"), header=TRUE, sep="\t", na="")
# df <- read.csv(paste0(datafilepath, "HP-Test3-oddDistances.xl.txt"), header=TRUE, sep="\t", na="")
# df <- read.csv(paste0(datafilepath, "HP-Test3-eucDistances.xl.txt"), header=TRUE, sep="\t", na="")
str(df) ; df$distance
hist(df$distance, breaks = 400, main = "Histogram of Inter-Point Distance", xlab = subject, col = "lightblue")
hist(df$distance, main = "Histogram of Inter-Point Distance", xlab = subject, col = "lightblue", freq=FALSE)
summary(df$distance)
median1 <- median(df$distance) ; median1
mean1 <- mean(df$distance) ; mean1
sd1 <- sd(df$distance) ; sd1
# add a histogram curve, see https://stackoverflow.com/questions/35403643/multiple-histogram-with-overlay-standard-deviation-curve-in-r
curve(dnorm(x, mean=mean1, sd=sd1), col="darkblue", lwd=2, add=TRUE, yaxt="n")
abline(v = median1, col="red")
abline(v = mean1)
abline(v = mean1+sd1, lty = 2)
abline(v = mean1-sd1, lty = 2)
freqs <- as.data.frame(table(df$distance))
colnames(freqs) <- c("Point-to-Point ODD", "Frequency")
freqs
# write.table(freqs, paste0(datafilepath, "11-square-oddDistances-freqs.txt"), sep="\t")
|
0312e0b361442f8cc3e6b4bcc7556e9de709a84b | 5dad0fbb98b20d4675ef673d26fff97d0da2b725 | /project3/deepbayes/R/num_node_example.R | db609546deb0e9ee9165921b3bde6c11c486263a | [] | no_license | mutual-ai/thesis | 8ded08d18824f4b50da301f12b5097f0acf5c522 | b005fdbc7407ee0ad8de0e10c392d72294eeca78 | refs/heads/master | 2020-12-25T23:18:50.164402 | 2016-04-06T20:58:27 | 2016-04-06T20:58:27 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,983 | r | num_node_example.R | #####
# This example function is written to demonstrate interfacing BayesOpt as a
# hyperparameter tuning framework for (deep) neural networks with Caffe.
# We take the simple set of nets where we have a 3-d input layer, a hidden layer
# with n nodes where n is the hyperparameter to be tuned, and a 2-d output layer.
#####
num_node_example <- function() {
N <- 10000
k <- 5
in_dim <- 3
out_dim <- 2
num_nodes <- 10
num_node_vals <- seq(1, 1000)
prefix <- "num_node"
cat("Simulating data...\n")
sim <- simulate_fc_data(in_dim, out_dim, num_nodes, # See data_util.R
N, 3333, TRUE)
X <- sim[[1]]
y <- sim[[2]]
cat("Splitting into folds...\n")
kfold_split(k, X, y, prefix) # See data_util.R
cat("Creating net and solver protos...\n")
create_protos_num_node(N, k, num_node_vals, prefix) # See below
t <- proc.time()
budget <- 25
opt_out <- optimize(matrix(num_node_vals,
1, length(num_node_vals)),
k,
budget,
prefix,
solver_path,
log_path)
T <- proc.time()
t2 <- proc.time()
brute_out <- brute_force(matrix(num_node_vals,
1, length(num_node_vals)),
k,
prefix,
solver_path,
log_path)
T2 <- proc.time()
cat("bayesopt:\n")
cat(T - t, "\n")
cat("brute force:\n")
cat(T2 - t2, "\n")
}
# This function takes in a dataset size N, number of folds k, range of
# num_nodes values, and prefix name. Creates the net and solver proto files
# corresponding to this example for tuning num_nodes.
create_protos_num_node <- function(N, k, num_node_vals, prefix) {
# Create a net .prototxt for each fold. Note here that the hyperparameter
# we tune is embedded in the network structure; it is not a value specified
# in the solver. Therefore, we need k * m net protos (1 for each fold),
# where m is the number of different num_node_vals.
# Retrieve the template prototxt
net_template_file <- file(paste(net_path,
'/num_node_train_valid_template.prototxt',
sep=''))
for (i in 1:k) {
for (n in num_node_vals) {
template_lines <- readLines(net_template_file)
# Change the name of the net
template_lines[1] <- paste('name: "fold', i, '"', sep='')
# Change the training dataset
template_lines[8] <- paste(' source: "',
dataset_path,
'/',
prefix,
'_train',
i, '.txt"', sep='')
# Change the validation dataset
template_lines[21] <- paste(' source: "',
dataset_path,
'/',
prefix,
'_valid',
i, '.txt"', sep='')
# Change the num_output (number of nodes) in the hidden layer
template_lines[34] <- paste(' num_output: ', n, sep='')
# Name the file
this_fold_net_filename <- paste(net_path,
'/fold', i,
'_num_node_', n,
'_train_valid.prototxt', sep='')
file.create(this_fold_net_filename)
this_fold_net_file <- file(this_fold_net_filename)
writeLines(template_lines,
this_fold_net_file)
close(this_fold_net_file)
}
}
# Create k * m solver .prototxt files, one for each net .prototxt.
# Note that the only thing changing from solver to solver is the net
# referred to, no other hyperparameters within the solver are changed.
solver_template_file <- file(paste(solver_path,
'/num_node_m_fold_k_solver.prototxt',
sep=''))
for (n in num_node_vals) {
for (i in 1:k) {
template_lines <- readLines(solver_template_file)
# Change the net referred to be the one pointing at this fold
template_lines[1] <- paste('net: "',
net_path,
'/fold', i,
'_num_node_', n,
'_train_valid.prototxt"', sep='')
# Change the test_iter (test_iter * batch_size = test_size)
test_iter <- (N / k) / 100
template_lines[2] <- paste('test_iter: ', test_iter, sep='')
# Change the snapshot prefix
template_lines[13] <- paste('snapshot_prefix: "',
model_path,
'/fold', i,
'_num_node_', n, '"', sep='')
this_solver_filename <- paste(solver_path,
'/num_node_',
n,
'_fold_',
i,
"_solver.prototxt",
sep='')
file.create(this_solver_filename)
this_solver_file <- file(this_solver_filename)
writeLines(template_lines,
this_solver_file)
close(this_solver_file)
}
}
} |
fc417de6864dd14a6a4f2f284e7212dc5e2e1d86 | 6cbb51fe996e65a51a8d9f2f35e3159721933f25 | /man/runSeuratScaleData.Rd | 17dc7d1b6bdb7536eceeddb7e128ac346ec9bdcf | [
"MIT"
] | permissive | compbiomed/singleCellTK | 927fb97e257ba89cddee9a90f9cb7cb375a5c6fb | 990e89e7ccfbf663f23c793454f72fb8c6878a32 | refs/heads/master | 2023-08-11T09:17:41.232437 | 2023-07-26T20:43:47 | 2023-07-26T20:43:47 | 68,756,293 | 144 | 89 | NOASSERTION | 2023-09-06T18:22:08 | 2016-09-20T21:50:24 | R | UTF-8 | R | false | true | 1,457 | rd | runSeuratScaleData.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/seuratFunctions.R
\name{runSeuratScaleData}
\alias{runSeuratScaleData}
\title{runSeuratScaleData
Scales the input sce object according to the input parameters}
\usage{
runSeuratScaleData(
inSCE,
useAssay = "seuratNormData",
scaledAssayName = "seuratScaledData",
model = "linear",
scale = TRUE,
center = TRUE,
scaleMax = 10,
verbose = TRUE
)
}
\arguments{
\item{inSCE}{(sce) object to scale}
\item{useAssay}{Assay containing normalized counts to scale.}
\item{scaledAssayName}{Name of new assay containing scaled data. Default
\code{seuratScaledData}.}
\item{model}{selected model to use for scaling data. Default \code{"linear"}.}
\item{scale}{boolean if data should be scaled or not. Default \code{TRUE}.}
\item{center}{boolean if data should be centered or not. Default \code{TRUE}}
\item{scaleMax}{maximum numeric value to return for scaled data. Default
\code{10}.}
\item{verbose}{Logical value indicating if informative messages should
be displayed. Default is \code{TRUE}.}
}
\value{
Scaled \code{SingleCellExperiment} object
}
\description{
runSeuratScaleData
Scales the input sce object according to the input parameters
}
\examples{
data(scExample, package = "singleCellTK")
\dontrun{
sce <- runSeuratNormalizeData(sce, useAssay = "counts")
sce <- runSeuratFindHVG(sce, useAssay = "counts")
sce <- runSeuratScaleData(sce, useAssay = "counts")
}
}
|
0704cc300c19d94f2c0a8997b26ef4641e717d7d | 6fc9b991042c5719e174fd02a5ebff5cebc38194 | /plot2.r | d0c659aa88872069d7bf17d11148ac117275c2e7 | [] | no_license | cthetford/ExData_Plotting1 | c7bc18128f7136183bdc447f0ebd1aa7c3735563 | 235e780ebe66f652c393b4e7bb45de1733b6cbe8 | refs/heads/master | 2020-12-11T03:34:13.794476 | 2015-04-11T20:50:31 | 2015-04-11T20:50:31 | 33,779,074 | 0 | 0 | null | 2015-04-11T14:38:50 | 2015-04-11T14:38:49 | null | UTF-8 | R | false | false | 499 | r | plot2.r |
h<-read.table("household_power_consumption.txt",
header=TRUE,
colClasses = c("character", "character", rep("numeric",7)),
sep=";",
na.strings="?")
pdata<- h[h$Date %in% c("1/2/2007","2/2/2007"),]
pdata$Time <- strptime(paste(pdata$Date,pdata$Time),format = "%d/%m/%Y %H:%M:%S", tz = "")
pdata$Date <- as.Date(pdata$Date,"%d/%m/%Y")
plot(pdata$Time,pdata$Global_active_power,type="l",
ylab="Global Active Power (kilowatts)",
xlab="")
|
cc6f60e0bdcd6d2976844a08fc55a0a1ea851ac1 | 112664a804d0caaeef62bb432bfe973521c1f100 | /man/validate_age_bp_cp.Rd | 8fbc8c061c6dd54ed444d2d13a107f42984b134f | [] | no_license | ttsukianto/birdproofr | c88737568a06464727e7676676825f90c062e339 | c0b897d13cf705e8b85f8e517641c59d935da5ba | refs/heads/master | 2020-04-12T05:17:50.294351 | 2019-02-03T19:42:39 | 2019-02-03T19:42:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 474 | rd | validate_age_bp_cp.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data_validation.R
\name{validate_age_bp_cp}
\alias{validate_age_bp_cp}
\title{Check that age and BP/CP match. Age 2, 4, and 0 should always have 0 for both
BP and CP}
\usage{
validate_age_bp_cp(df)
}
\arguments{
\item{df}{bird data frame}
}
\value{
data frame of rows with age/BP/CP issues
}
\description{
Check that age and BP/CP match. Age 2, 4, and 0 should always have 0 for both
BP and CP
}
|
09284389e7685800926721ccb2daccfd5643964a | 4c4b5fbb7f9a0fef790364eba4b92bb7a578c9d5 | /man/OS_type.Rd | 08f2a6624da4da6c2adee3714bf765a0c34bfc58 | [] | no_license | renozao/repotools | b559bca28d571a1310c9ecf77b355010f46eb8cb | 0f76e52253c08063084074061f8ca2c05e8a4818 | refs/heads/master | 2021-05-16T03:08:28.467104 | 2018-11-19T20:20:09 | 2018-11-19T20:20:09 | 18,404,374 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 452 | rd | OS_type.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/api.R
\name{OS_type}
\alias{OS_type}
\title{Determinate Type of Operating System}
\usage{
OS_type()
}
\value{
a single character string: code{'unix'}, code{'mac'}, or code{'windows'}
depending on the OS of the calling machine.
}
\description{
Returns the type of OS.
}
\examples{
\dontshow{
options(R_CHECK_RUNNING_EXAMPLES_=TRUE) ## roxygen generated flag
}
OS_type()
}
|
6e288fc61f67065a3614cd5f5bdfea1a93ccc1a9 | 878aa28161ed778da05902113a9a18fbb2738319 | /Data manage/Make DLM figures.R | a6fd74188d5cbc319f78ecd7c248ab10fc0fdf18 | [] | no_license | klwilson23/Keogh | a6f9f3ccb24d10ce08d694eaa8cdecae8dd06dbf | e499c087c267d3e3a89c8edfe4b088248f6338ec | refs/heads/master | 2023-04-11T02:09:36.009864 | 2022-01-21T21:39:32 | 2022-01-21T21:39:32 | 152,797,463 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 20,520 | r | Make DLM figures.R | source("some functions.R")
library(reshape2)
library(gridExtra)
library(ggpubr)
library(ggplot2)
library(MARSS)
library(broom)
library(wesanderson)
keoghDLM <- readRDS("Results/keoghDLM.rds")
wesAnderson <- "Darjeeling1"
keogh <- readRDS("Keogh_newJuv_enviro.rds")
keogh_long <- subset(keogh,Year<=2015 & Year>=1976)
keogh_long <- subset(keogh_long,Species!="Chum")
keogh_long$oceanSalmon <- residuals(lm(oceanSalmon~seals:Species,data=keogh_long))
#keogh_long$seals <- log(keogh_long$seals)
keogh_long$b_yr <- as.vector(matrix(t(keoghDLM$states),nrow=1,byrow=TRUE))
keogh_long$b_UI <- as.vector(matrix(t(keoghDLM$states+2*keoghDLM$states.se),nrow=1,byrow=TRUE))
keogh_long$b_LI <- as.vector(matrix(t(keoghDLM$states-2*keoghDLM$states.se),nrow=1,byrow=TRUE))
Nspecies <- length(unique(keogh_long$Species))
adults <- subset(keogh_long,select = c(Year,Species,Stock))
adults <- reshape(adults,direction = "wide",idvar="Year",timevar="Species")
recruits <- subset(keogh_long,select = c(Year,Species,Recruits))
recruits <- reshape(recruits,direction = "wide",idvar="Year",timevar="Species")
years <- recruits$Year
Nyears <- nrow(recruits)
colr <- wes_palette(wesAnderson,Nspecies,type=c("discrete"))
juv_enviro <- subset(keogh_long,select = c(Year,Species,sumTemp,sumRain,winTemp,winRain,freshCoho,freshSteel,freshCutt,freshDolly,freshPink))
fresh_enviro <- reshape(juv_enviro,direction = "wide",idvar="Year",timevar="Species")
adult_enviro <- subset(keogh_long,select = c(Year,Species,seals,npgo,mei,oceanSalmon,juvCohort))
ocean_enviro <- reshape(adult_enviro,direction = "wide",idvar="Year",timevar="Species")
freshEnviroNew <- fresh_enviro
sdCovarsFresh <- attr(scale(fresh_enviro[,-1],center=TRUE,scale=TRUE),"scaled:scale")
mnCovarsFresh <- attr(scale(fresh_enviro[,-1],center=TRUE,scale=TRUE),"scaled:center")
freshCovarScale <- scale(fresh_enviro[,-1],center=TRUE,scale=TRUE)
#covarScale[is.na(covarScale)] <- 0
oceanEnviroNew <- ocean_enviro
sdCovarsOcean <- attr(scale(ocean_enviro[,-1],center=TRUE,scale=TRUE),"scaled:scale")
mnCovarsOcean <- attr(scale(ocean_enviro[,-1],center=TRUE,scale=TRUE),"scaled:center")
oceanCovarScale <- scale(ocean_enviro[,-1],center=TRUE,scale=TRUE)
# make plot of environment
margins <- c(0.5,0.5,0.5,1.1)
p1 <- ggplot(data = keogh_long) +
geom_line(aes(x=Year, y=Recruits,colour=Species)) +
geom_point(aes(x=Year, y=Recruits,colour=Species)) +
#geom_smooth(data=keogh_long,aes(x=Year, y=Recruits))+
xlab("Year") + ylab("Recruits") +
facet_wrap(~Species,scales="free") +
scale_colour_manual(values=wes_palette(n=Nspecies, name=wesAnderson)) +
theme_minimal() +
theme(legend.position="none",strip.text.x = element_blank(),plot.margin=unit(margins,"line"))
p2 <- ggplot(data = keogh_long) +
geom_line(aes(x=Year, y=Stock,colour=Species)) +
geom_point(aes(x=Year, y=Stock,colour=Species)) +
#geom_smooth(data=keogh_long,aes(x=Year, y=Stock))+
xlab("Year") + ylab("Adults") +
facet_wrap(~Species,scales="free") +
scale_colour_manual(values=wes_palette(n=Nspecies, name=wesAnderson)) +
theme_minimal() +
theme(legend.position="none",strip.text.x = element_blank(),plot.margin=unit(margins,"line"))
megaP <- ggarrange(p1,p2,ncol=1,nrow=2,legend="top",common.legend=TRUE)
pAnnotated <- annotate_figure(megaP,bottom=text_grob(wrapper("Recruits and spawners since 1976 on the Keogh River",width=125),color="black",hjust=0,x=0.01,face="italic",size=10))
ggsave("Figures/stock and recruitment trends.jpeg",plot=pAnnotated,units="in",height=6,width=9,dpi=800)
# make plot of environment
margins <- c(0.5,0.5,0.5,1.1)
p1 <- ggplot(data = keogh_long) +
geom_line(aes(x=Year, y=seals,colour=Species)) +
geom_smooth(data=keogh_long,aes(x=Year, y=seals))+
xlab("Year") + ylab("Seal densities") +
scale_colour_manual(values=wes_palette(n=Nspecies, name=wesAnderson)) +
theme_minimal() +
theme(legend.position="none",strip.text.x = element_blank(),plot.margin=unit(margins,"line"))
p2 <- ggplot(data = keogh_long) +
geom_line(aes(x=Year, y=oceanSalmon,colour=Species)) +
geom_smooth(data=keogh_long,aes(x=Year, y=oceanSalmon))+
xlab("Year") + ylab("North Pacific salmon (mt)") +
scale_colour_manual(values=wes_palette(n=Nspecies, name=wesAnderson)) +
theme_minimal() +
theme(legend.position="none",strip.text.x = element_blank(),plot.margin=unit(margins,"line"))
p3 <- ggplot(data = keogh_long) +
geom_line(aes(x=Year, y=npgo,colour=Species)) +
geom_smooth(data=keogh_long,aes(x=Year, y=npgo))+
xlab("Year") + ylab("North Pacific gyre oscillation") +
scale_colour_manual(values=wes_palette(n=Nspecies, name=wesAnderson)) +
theme_minimal() +
theme(legend.position="none",strip.text.x = element_blank(),plot.margin=unit(margins,"line"))
megaP <- ggarrange(p1,p2,p3,ncol=1,nrow=3,legend="top",common.legend=TRUE)
pAnnotated <- annotate_figure(megaP,bottom=text_grob(wrapper("Trends in marine and coastal conditions for Pacific salmonids in the Keogh River",width=125),color="black",hjust=0,x=0.01,face="italic",size=10))
ggsave("Figures/Seal and salmon rebuild.jpeg",plot=pAnnotated,units="in",height=7,width=6,dpi=800)
# make plot of environment
margins <- c(0.5,0.5,0.5,1.1)
p1 <- ggplot(data = keogh_long) +
geom_line(aes(x=Year, y=sumTemp,colour=Species)) +
geom_smooth(data=keogh_long,aes(x=Year, y=sumTemp))+
xlab("Year") + ylab("Summer air temperatures") +
scale_colour_manual(values=wes_palette(n=Nspecies, name=wesAnderson)) +
theme_minimal() +
theme(legend.position="none",strip.text.x = element_blank(),plot.margin=unit(margins,"line"))
p2 <- ggplot(data = keogh_long) +
geom_line(aes(x=Year, y=winRain,colour=Species)) +
geom_smooth(data=keogh_long,aes(x=Year, y=winRain))+
xlab("Year") + ylab("Winter rainfall (mm)") +
scale_colour_manual(values=wes_palette(n=Nspecies, name=wesAnderson)) +
theme_minimal() +
theme(legend.position="none",strip.text.x = element_blank(),plot.margin=unit(margins,"line"))
p3 <- ggplot(data = keogh_long) +
geom_line(aes(x=Year, y=freshCoho,colour=Species)) +
geom_smooth(data=keogh_long,aes(x=Year, y=freshCoho))+
xlab("Year") + ylab("Competing Coho salmon") +
scale_colour_manual(values=wes_palette(n=Nspecies, name=wesAnderson)) +
theme_minimal() +
theme(legend.position="none",strip.text.x = element_blank(),plot.margin=unit(margins,"line"))
megaP <- ggarrange(p1,p2,p3,ncol=1,nrow=3,legend="top",common.legend=TRUE)
pAnnotated <- annotate_figure(megaP,bottom=text_grob(wrapper("Trends in freshwater conditions for Pacific salmonids in the Keogh River",width=125),color="black",hjust=0,x=0.01,face="italic",size=10))
ggsave("Figures/freshwater trends.jpeg",plot=pAnnotated,units="in",height=7,width=6,dpi=800)
# make plot of trends in recruitment
margins <- c(0.5,0.5,0.5,1.1)
p1 <- ggplot(data = keogh_long) +
geom_point(aes(x=seals, y=b_yr,colour=Species)) +
geom_smooth(data=keogh_long,aes(x=seals, y=b_yr))+
#geom_ribbon(aes(x=seals, ymin=b_LI, ymax=b_UI), linetype=2, alpha=0.5) +
xlab("Seal densities (N/km)") + ylab("Strength of density-dependence") +
scale_colour_manual(values=wes_palette(n=Nspecies, name=wesAnderson)) +
facet_wrap(~Species,scales="free") +
theme_minimal() +
theme(legend.position="none",strip.text.x = element_blank(),plot.margin=unit(margins,"line"))
megaP <- ggarrange(p1,ncol=1,nrow=1,legend="top",common.legend=TRUE)
pAnnotated <- annotate_figure(megaP,bottom=text_grob(wrapper("Trends in density-dependence along seal densities.",width=125),color="black",hjust=0,x=0.01,face="italic",size=10))
ggsave("Figures/recruitment trends with seals.jpeg",plot=pAnnotated,units="in",height=5.5,width=8.5,dpi=800)
margins <- c(0.5,0.5,0.5,1.1)
p1 <- ggplot(data = keogh_long) +
geom_point(aes(x=sumTemp, y=b_yr,colour=Species)) +
geom_smooth(data=keogh_long,aes(x=sumTemp, y=b_yr))+
#geom_ribbon(aes(x=seals, ymin=b_LI, ymax=b_UI), linetype=2, alpha=0.5) +
xlab("Summer air temperatures") + ylab("Strength of density-dependence") +
scale_colour_manual(values=wes_palette(n=Nspecies, name=wesAnderson)) +
facet_wrap(~Species,scales="free") +
theme_minimal() +
theme(legend.position="none",strip.text.x = element_blank(),plot.margin=unit(margins,"line"))
megaP <- ggarrange(p1,ncol=1,nrow=1,legend="top",common.legend=TRUE)
pAnnotated <- annotate_figure(megaP,bottom=text_grob(wrapper("Trends in density-dependence with summer air temperatures.",width=125),color="black",hjust=0,x=0.01,face="italic",size=10))
ggsave("Figures/recruitment trends with summer air temperatures.jpeg",plot=pAnnotated,units="in",height=5.5,width=8.5,dpi=800)
margins <- c(0.5,0.5,0.5,1.1)
p1 <- ggplot(data = keogh_long) +
geom_point(aes(x=winRain, y=b_yr,colour=Species)) +
geom_smooth(data=keogh_long,aes(x=winRain, y=b_yr))+
#geom_ribbon(aes(x=seals, ymin=b_LI, ymax=b_UI), linetype=2, alpha=0.5) +
xlab("Winter rainfall (mm)") + ylab("Strength of density-dependence") +
scale_colour_manual(values=wes_palette(n=Nspecies, name=wesAnderson)) +
facet_wrap(~Species,scales="free") +
theme_minimal() +
theme(legend.position="none",strip.text.x = element_blank(),plot.margin=unit(margins,"line"))
megaP <- ggarrange(p1,ncol=1,nrow=1,legend="top",common.legend=TRUE)
pAnnotated <- annotate_figure(megaP,bottom=text_grob(wrapper("Trends in density-dependence with winter rainfall.",width=125),color="black",hjust=0,x=0.01,face="italic",size=10))
ggsave("Figures/recruitment trends with winter rainfall.jpeg",plot=pAnnotated,units="in",height=5.5,width=8.5,dpi=800)
margins <- c(0.5,0.5,0.5,1.1)
p1 <- ggplot(data = keogh_long) +
geom_point(aes(x=oceanSalmon, y=b_yr,colour=Species)) +
geom_smooth(data=keogh_long,aes(x=oceanSalmon, y=b_yr))+
#geom_ribbon(aes(x=seals, ymin=b_LI, ymax=b_UI), linetype=2, alpha=0.5) +
xlab("North Pacific salmon (mt)") + ylab("Strength of density-dependence") +
scale_colour_manual(values=wes_palette(n=Nspecies, name=wesAnderson)) +
facet_wrap(~Species,scales="free") +
theme_minimal() +
theme(legend.position="none",strip.text.x = element_blank(),plot.margin=unit(margins,"line"))
megaP <- ggarrange(p1,ncol=1,nrow=1,legend="top",common.legend=TRUE)
pAnnotated <- annotate_figure(megaP,bottom=text_grob(wrapper("Trends in density-dependence with North Pacific salmon biomass.",width=125),color="black",hjust=0,x=0.01,face="italic",size=10))
ggsave("Figures/recruitment trends with pacific salmon.jpeg",plot=pAnnotated,units="in",height=5.5,width=8.5,dpi=800)
margins <- c(0.5,0.5,0.5,1.1)
p1 <- ggplot(data = keogh_long) +
geom_point(aes(x=freshCoho, y=b_yr,colour=Species)) +
geom_smooth(data=keogh_long,aes(x=freshCoho, y=b_yr))+
#geom_ribbon(aes(x=seals, ymin=b_LI, ymax=b_UI), linetype=2, alpha=0.5) +
xlab("Coho densities in freshwater") + ylab("Strength of density-dependence") +
scale_colour_manual(values=wes_palette(n=Nspecies, name=wesAnderson)) +
facet_wrap(~Species,scales="free") +
theme_minimal() +
theme(legend.position="none",strip.text.x = element_blank(),plot.margin=unit(margins,"line"))
megaP <- ggarrange(p1,ncol=1,nrow=1,legend="top",common.legend=TRUE)
pAnnotated <- annotate_figure(megaP,bottom=text_grob(wrapper("Trends in density-dependence with freshwater coho abundance.",width=125),color="black",hjust=0,x=0.01,face="italic",size=10))
ggsave("Figures/recruitment trends with freshwater coho.jpeg",plot=pAnnotated,units="in",height=5.5,width=8.5,dpi=800)
# make Ricker plot
jpeg("Figures/Ricker model 1.jpeg",width=5,height=6,units="in",res=800)
layout(matrix(1:2,nrow=2))
par(mar=c(5,4,1,1))
alpha <- 4
beta <- -0.002
Smax <- 1/-beta
Rmax <- alpha*Smax*exp(beta*Smax)
K <- -log(alpha)/beta
curve(alpha*x*exp(beta*x),from=0,to=2*Smax,xlab="Spawner abundance",ylab="Recruits",lwd=2,col=colr[1],ylim=c(0,Rmax))
points(Smax,Rmax,pch=21,bg=colr[1])
curve(log(alpha)+beta*x,from=0,to=2*Smax,xlab="Spawner abundance",ylab="log(Recruits/Spawner)",lwd=2,col=colr[1],ylim=range(log(alpha)+beta*2*Smax,log(alpha)+beta*1e-3))
dev.off()
jpeg("Figures/Ricker model 2.jpeg",width=5,height=6,units="in",res=800)
layout(matrix(1:2,nrow=2))
par(mar=c(5,4,1,1))
alpha <- 4
beta <- -0.002
Smax <- 1/-beta
Rmax <- alpha*Smax*exp(beta*Smax)
K <- -log(alpha)/beta
alpha2 <- 4*0.5
beta2 <- -0.002
Smax2 <- 1/-beta2
Rmax2 <- alpha2*Smax2*exp(beta2*Smax2)
K2 <- -log(alpha2)/beta2
curve(alpha*x*exp(beta*x),from=0,to=2*Smax,xlab="Spawner abundance",ylab="Recruits",lwd=2,col=colr[1],ylim=c(0,Rmax))
curve(alpha2*x*exp(beta2*x),from=0,to=2*Smax,lwd=2,col=colr[2],ylim=c(0,Rmax2),add=TRUE)
points(Smax,Rmax,pch=21,bg=colr[1])
points(Smax2,Rmax2,pch=21,bg=colr[2])
curve(log(alpha)+beta*x,from=0,to=2*Smax,xlab="Spawner abundance",ylab="log(Recruits/Spawner)",lwd=2,col=colr[1],ylim=range(log(alpha)+beta*2*Smax,log(alpha)+beta*1e-3))
curve(log(alpha2)+beta2*x,from=0,to=2*Smax,lwd=2,col=colr[2],ylim=range(log(alpha2)+beta2*2*Smax2,log(alpha2)+beta2*1e-3),add=TRUE)
dev.off()
jpeg("Figures/Ricker model 3.jpeg",width=5,height=6,units="in",res=800)
layout(matrix(1:2,nrow=2))
par(mar=c(5,4,1,1))
alpha <- 4
beta <- -0.002
Smax <- 1/-beta
Rmax <- alpha*Smax*exp(beta*Smax)
K <- -log(alpha)/beta
alpha2 <- 4*0.5
beta2 <- -0.002
Smax2 <- 1/-beta2
Rmax2 <- alpha2*Smax2*exp(beta2*Smax2)
K2 <- -log(alpha2)/beta2
alpha3 <- 4
beta3 <- -0.002*2
Smax3 <- 1/-beta3
Rmax3 <- alpha3*Smax3*exp(beta3*Smax3)
K3 <- -log(alpha3)/beta3
curve(alpha*x*exp(beta*x),from=0,to=2*Smax,xlab="Spawner abundance",ylab="Recruits",lwd=2,col=colr[1],ylim=c(0,Rmax))
curve(alpha2*x*exp(beta2*x),from=0,to=2*Smax,lwd=2,col=colr[2],ylim=c(0,Rmax2),add=TRUE)
curve(alpha3*x*exp(beta3*x),from=0,to=2*Smax,lwd=2,col=colr[3],ylim=c(0,Rmax2),add=TRUE)
points(Smax,Rmax,pch=21,bg=colr[1])
points(Smax2,Rmax2,pch=21,bg=colr[2])
points(Smax3,Rmax3,pch=21,bg=colr[3])
curve(log(alpha)+beta*x,from=0,to=2*Smax,xlab="Spawner abundance",ylab="log(Recruits/Spawner)",lwd=2,col=colr[1],ylim=range(log(alpha)+beta*2*Smax,log(alpha)+beta*1e-3))
curve(log(alpha2)+beta2*x,from=0,to=2*Smax,lwd=2,col=colr[2],add=TRUE)
curve(log(alpha3)+beta3*x,from=0,to=2*Smax,lwd=2,col=colr[3],add=TRUE)
dev.off()
# multiple species: dolly varden, cutthroat trout, pink salmon, coho salmon
# including process and observation error
# precipitation covariates only affect observation model
# time-varying beta & alpha
# run a DLM on stock-recruitment for steelhead only
ln_RS_sh <- log(recruits$Recruits.Steelhead/adults$Stock.Steelhead)
ln_RS_dv <- log(recruits$`Recruits.Dolly Varden`/adults$`Stock.Dolly Varden`)
ln_RS_ct <- log(recruits$Recruits.Cutthroat/adults$Stock.Cutthroat)
ln_RS_pk <- log(recruits$Recruits.Pink/adults$Stock.Pink)
ln_RS_co <- log(recruits$Recruits.Coho/adults$Stock.Coho)
dat <- rbind(ln_RS_sh,ln_RS_dv,ln_RS_ct,ln_RS_pk,ln_RS_co)
keoghDLM$model
keoghDLM$states
mean(colSums(keoghDLM$states))
rowMeans(keoghDLM$states)
species <- unique(keogh_long$Species)
x <- species[4]
refYear <-
alphas <- exp(coef(keoghDLM)$D)
betas <- keoghDLM$states + sapply(species,function(x){sum(coef(keoghDLM)$C[grep(paste(x,"_",sep=""),row.names(coef(keoghDLM)$C))])})
names(alphas) <- names(betas) <- species
jpeg("Figures/recruitment.jpeg",width=7.5,height=5,units="in",res=800)
layout(matrix(1:6,nrow=2,ncol=3,byrow=TRUE))
par(mar=c(5,4,1,1))
for(i in 1:length(species))
{
plot(keogh_long$Stock[keogh_long$Species==species[i]],keogh_long$Recruits[keogh_long$Species==species[i]],pch=21,bg=colr[i],xlab=paste(species[i],"adults",sep=" "),ylab=paste(species[i],"recruits",sep=" "),ylim=range(c(0,keogh_long$Recruits[keogh_long$Species==species[i]]),na.rm=TRUE),xlim=c(0,max(keogh_long$Stock[keogh_long$Species==species[i]],na.rm=TRUE)))
for(t in 1:Nyears)
{
alpha <- alphas[i]
betas <- keoghDLM$states[i,t]
curve(alpha*x*exp(betas*x),add=TRUE,xlab=paste(species[i],"adults",sep=" "),ylab=paste(species[i],"recruits",sep=" "),ylim=range(c(0,keogh_long$Recruits[keogh_long$Species==species[i]]),na.rm=TRUE),col=adjustcolor(colr[i],alpha=0.5))
}
}
dev.off()
keoghAllfit <- augment(keoghDLM, interval="confidence")
keoghAllfit$Year <- keoghAllfit$t + 1975
keoghAllfit$Species <- keoghAllfit$.rownames
keoghAllfit$Species <- keogh_long$Species
margins <- c(0.5,0.5,0.5,1.1)
p <- ggplot(data = keoghAllfit) +
geom_line(aes(Year, .fitted)) +
geom_ribbon(aes(x=Year, ymin=.conf.low, ymax=.conf.up), linetype=2, alpha=0.5) +
geom_point(data=keoghAllfit, mapping = aes(x=Year, y=y,colour=Species)) +
xlab("Year") + ylab("ln (recruits per spawner)") + facet_wrap(~Species,scales="free") +
scale_colour_manual(values=wes_palette(n=Nspecies, name=wesAnderson)) +
theme_minimal() +
theme(legend.position="none",strip.text.x = element_blank(),plot.margin=unit(margins,"line"))
megaP <- ggarrange(p,ncol=1,nrow=1,legend="top",common.legend=TRUE)
pAnnotated <- annotate_figure(megaP,bottom=text_grob(wrapper("MARSS model fits to Keogh River stock-recruitment data",width=125),color="black",hjust=0,x=0.01,face="italic",size=10))
ggsave("Model fits.jpeg",plot=pAnnotated,units="in",height=5,width=7,dpi=800)
# temporal trends:
jpeg("Figures/temporal trends.jpeg",width=8,height=6,units="in",res=800)
m <- 1
titles <- c("Steelhead","Dolly Varden","Cutthroat","Pink","Coho")
layout(matrix(1:6,nrow=2,ncol=3,byrow=TRUE))
states <- 1:5
for(j in 1:Nspecies)
{
state <- states[j]
# temporal trends in beta
par(mar=c(5,5,3,1))
mn <- keoghDLM$states[state,]
se <- keoghDLM$states.se[state,]
plot(years,mn,xlab="",ylab="Density-dependence",bty="n",xaxt="n",type="n",ylim=c(min(mn-2*se),max(mn+2*se)),cex.lab=1.1)
lines(years, rep(0,Nyears), lty="dashed")
lines(years, mn, col=colr[j], lwd=3)
lines(years, mn+2*se, col=colr[j])
lines(years, mn-2*se, col=colr[j])
abline(v=1991,lwd=3)
axis(1,at=seq(min(years),max(years),5),cex=2)
mtext("Brood year", 1, line=2,cex=0.9)
title(titles[j],font=2,cex=0.9,line=1)
}
dev.off()
jpeg("Figures/capacity trends.jpeg",width=8,height=6,units="in",res=800)
layout(matrix(1:6,nrow=2,ncol=3,byrow=TRUE))
states <- 1:5
for(j in 1:Nspecies)
{
state <- states[j]
# temporal trends in beta
par(mar=c(5,5,3,1))
# temporal trends in carrying capacity
Smax <- pmax(0,1/-(keoghDLM$states[state,]),na.rm=TRUE)
Rmax <- alphas[j,1]*Smax*exp(keoghDLM$states[state,]*Smax)
Kt <- -log(alphas[j,1])/keoghDLM$states[state,]
mn <- ifelse(Rmax<0,NA,Rmax)
mn <- ifelse(mn >800000,NA,mn)
plot(years,mn,xlab="",ylab="Freshwater capacity",bty="n",xaxt="n",type="n",cex.lab=1.1,ylim=range(c(0,mn),na.rm=TRUE))
lines(years, rep(0,Nyears), lty="dashed")
lines(years, mn, col=colr[j], lwd=3)
axis(1,at=seq(min(years),max(years),5),cex=2)
mtext("Brood year", 1, line=2,cex=0.9)
abline(v=1991,lwd=3)
title(titles[j],font=2,cex=0.9,line=1)
}
dev.off()
# plot seals:
jpeg("Figures/effect of seals.jpeg",width=7,height=5.5,units="in",res=800)
species <- c("Steelhead","Dolly Varden","Cutthroat","Pink","Coho")
layout(matrix(1:6,nrow=3,ncol=2,byrow=TRUE))
par(mar=c(5,4,1,1))
for(i in 1:length(species)){
plot(keogh_long$seals[keogh_long$Species==species[1]],keoghDLMspecies$states[1,],pch=21,bg=ifelse(years>1990,adjustcolor(colr[i],1),adjustcolor(colr[i],0.5)),xlab="Seal densities",ylab="Strength of density-dependence")
}
dev.off()
corr_mat <- coef(keoghDLMspecies,type="matrix")$Q
for(i in 1:nrow(coef(keoghDLMspecies,type="matrix")$Q)){
for(j in 1:nrow(coef(keoghDLMspecies,type="matrix")$Q))
{
corr_mat[i,j] <- coef(keoghDLMspecies,type="matrix")$Q[i,j]/(sqrt(diag(coef(keoghDLMspecies,type="matrix")$Q)[i])*sqrt(diag(coef(keoghDLMspecies,type="matrix")$Q)[j]))
}
}
jpeg("Figures/steelhead recruitment trends.jpeg",width=7,height=5,units="in",res=800)
layout(matrix(1,nrow=1))
par(mar=c(5,4,1,1))
alpha <- exp(coef(keoghDLM)$D[1,1])
beta <- keoghDLM$states[1,1]
Smax <- 1/-beta
Rmax <- alpha*Smax*exp(beta*Smax)
K <- -log(alpha)/beta
alpha2 <- exp(coef(keoghDLM)$D[1,1])
beta2 <- keoghDLM$states[1,20]
Smax2 <- 1/-beta2
Rmax2 <- alpha2*Smax2*exp(beta2*Smax2)
K2 <- -log(alpha2)/beta2
alpha3 <- exp(coef(keoghDLM)$D[1,1])
beta3 <- keoghDLM$states[1,40]
Smax3 <- 1/-beta3
Rmax3 <- alpha3*Smax3*exp(beta3*Smax3)
K3 <- -log(alpha3)/beta3
curve(alpha*x*exp(beta*x),from=0,to=2*Smax,xlab="Spawner abundance",ylab="Recruits",lwd=2,col=colr[1],ylim=c(0,Rmax))
curve(alpha2*x*exp(beta2*x),from=0,to=2*Smax,lwd=2,col=colr[2],ylim=c(0,Rmax2),add=TRUE)
curve(alpha3*x*exp(beta3*x),from=0,to=2*Smax,lwd=2,col=colr[3],ylim=c(0,Rmax2),add=TRUE)
points(Smax,Rmax,pch=21,bg=colr[1])
points(Smax2,Rmax2,pch=21,bg=colr[2])
points(Smax3,Rmax3,pch=21,bg=colr[3])
dev.off()
|
648b394203cf4b77c5175812d75513ed9e33cdfb | a2b00b021859e3075b7ff8cfb698c797710ada82 | /phylogeny/manipulate_trees.R | 40275f577a6cb3581cc81676b3ad367b7d6ebf50 | [] | no_license | singhal/Spheno_Gene_Flow | fb9fd936a3970b74592ed0be22e28cdc1fc7cdb2 | b0056a3464d4a6cdfffba10037bfa679c3e5dbdd | refs/heads/master | 2020-03-20T22:41:50.035111 | 2018-06-18T22:01:43 | 2018-06-18T22:01:43 | 137,811,166 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,069 | r | manipulate_trees.R | library(ggtree)
# lineage data
d = read.csv("~/Dropbox/Sphenomorphine_Gene_Flow/data/metadata/sphenomorphine_species.csv", stringsAsFactors=F)
s = d[complete.cases(d$sanger_sample),]
# read in tree data
td = read.beast("/Users/Sonal/Dropbox/Sphenomorphine_Gene_Flow/data/phylogeny/beast_ucln_31July17/mtcds_pos1.sub.tre")
td = fortify(td)
# read in tree
t = read.nexus("/Users/Sonal/Dropbox/Sphenomorphine_Gene_Flow/data/phylogeny/beast_ucln_31July17/mtcds_pos1.sub.tre")
t$node.label = td[td$node > Ntip(t), "posterior"]
# drop outgroups
outs = d[d$outgroup == TRUE, "sanger_sample"]
outs = outs[complete.cases(outs)]
t = drop.tip(t, outs)
# drop tips either matched to 2 otus or 2 species
keep = s[s$SPECIES_KEEP == TRUE, "sanger_sample"]
t1 = drop.tip(t, setdiff(t$tip.label, keep))
t1$tip.label = d[match(t1$tip.label, d$sanger_sample), "SPECIES"]
write.tree(t1, "/Users/Sonal/Dropbox/Sphenomorphine_Gene_Flow/data/phylogeny/beast_ucln_31July17//species.tre")
# drop tips either matched to 2 otus or 2 species
keep = s[s$OTU_KEEP == TRUE, "sanger_sample"]
t1 = drop.tip(t, setdiff(t$tip.label, keep))
t1$tip.label = d[match(t1$tip.label, d$sanger_sample), "OTU"]
write.tree(t1, "/Users/Sonal/Dropbox/Sphenomorphine_Gene_Flow/data/phylogeny/beast_ucln_31July17//otu.tre")
# get sampling probs
length(unique(d[complete.cases(d$sanger_sample), "OTU"])) / length(unique(d$OTU))
length(unique(d[complete.cases(d$sanger_sample), "SPECIES"])) / length(unique(d$SPECIES))
trees = read.nexus("~/Dropbox/Sphenomorphine_Gene_Flow/data/phylogeny/beast_ucln_31July17/mtcds_pos1.sub.trees")
trees1 = trees[sample(1:length(trees), 100, replace=F)]
trees2 = vector('list', 100)
for (i in 1:length(trees1)) {
t = trees1[[i]]
t = drop.tip(t, outs)
keep = s[s$OTU_KEEP == TRUE, "sanger_sample"]
t1 = drop.tip(t, setdiff(t$tip.label, keep))
t1$tip.label = d[match(t1$tip.label, d$sanger_sample), "OTU"]
trees2[[i]] = t1
}
class(trees2) <- "multiPhylo"
write.tree(trees2, "~/Dropbox/Sphenomorphine_Gene_Flow/data/phylogeny/beast_ucln_31July17/otu_posterior.trees")
|
ee161cebf4f5b3cba78def95195611e21a0f0820 | 6401b1aa210eda691262cc363205a8eea01f50f0 | /man/fars_summarize_years.Rd | 6e2007649a3dc563f45f0e37be230b0832420d16 | [
"MIT"
] | permissive | davidrmh/week4DavidMontalvan | 2aae5985d8c88e8c18da0fd0bb1a59c84f3d4d51 | 2b3fd2c6b4ddd98e448f8bd87ff6e15b60bef59b | refs/heads/master | 2022-11-12T23:53:02.214107 | 2020-06-25T00:24:54 | 2020-06-25T00:24:54 | 274,786,386 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 508 | rd | fars_summarize_years.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fars_functions.R
\name{fars_summarize_years}
\alias{fars_summarize_years}
\title{Creates a summary for a set of years}
\usage{
fars_summarize_years(years)
}
\arguments{
\item{years}{Numeric (integer) vector with the years
you want to summarize.}
}
\value{
tibble with the summarized data
}
\description{
This functions takes a numeric vector
that contains the years that you want
to summarize
}
\seealso{
\link{fars_read_years}
}
|
28b39ee98bed9db778ed8852f9c037451d04de42 | 6fd1034c746a1cd5367f17b36c2a90a0c04e9e5d | /man/is_quosureish.Rd | 0b2043c9c993fd2aef5b2f811b9c15ba55c0b1cb | [] | no_license | COMODr/rlang | cdd8ab6bbc50b86c640a66061adf456aa9d13e82 | 2276162a26ba48cf0932fc9514f7d1842d8f44bc | refs/heads/master | 2020-06-03T19:26:52.047101 | 2019-06-12T12:47:05 | 2019-06-12T12:47:05 | 191,701,920 | 1 | 0 | null | 2019-06-13T06:12:58 | 2019-06-13T06:12:57 | null | UTF-8 | R | false | true | 960 | rd | is_quosureish.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/lifecycle-retired.R
\name{is_quosureish}
\alias{is_quosureish}
\alias{as_quosureish}
\title{Test for or coerce to quosure-like objects}
\usage{
is_quosureish(x, scoped = NULL)
as_quosureish(x, env = caller_env())
}
\arguments{
\item{x}{An object to test.}
\item{scoped}{A boolean indicating whether the quosure is scoped,
that is, has a valid environment attribute. If \code{NULL}, the scope
is not inspected.}
\item{env}{The environment in which the expression should be
evaluated. Only used for symbols and calls. This should typically
be the environment in which the expression was created.}
}
\description{
\Sexpr[results=rd, stage=render]{rlang:::lifecycle("defunct")}
These functions are deprecated as of rlang 0.2.0 because they make
the assumption that quosures are a subtype of formula, which we are
now considering to be an implementation detail.
}
\keyword{internal}
|
8ebfc5d7d59a65a299bd20b3be42cf54a14e541e | 89b8646ff2f9f52549072a733cc0421591b866a5 | /hw/hw01/tests/p16.R | 209e8a6dfdcfd4426730087e625f9d30da2bc1ab | [] | no_license | ph142-ucb/ph142-su21 | 8307f46602223085a106565ed66419b681d36f55 | b172cc932c752aa76daa8aad98a884f698b8b529 | refs/heads/main | 2023-07-02T18:58:41.901353 | 2021-08-10T17:30:27 | 2021-08-10T17:30:27 | 368,049,908 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 674 | r | p16.R | library(testthat)
test_metadata = "
cases:
- hidden: false
name: p16a
points: 0.3333333333333333
- hidden: false
name: p16b
points: 0.3333333333333333
- hidden: false
name: p16c
points: 0.3333333333333333
name: p16
"
test_that("p16a", {
expect_true(is.data.frame(avg_by_vore))
print("p16a: Checking avg_by_vore is a dataframe.")
})
test_that("p16b", {
expect_true(ncol(avg_by_vore) == 2 &&
nrow(avg_by_vore) == 5)
print("p16b: Checking avg_by_vore has 5 rows and 2 columns.")
})
test_that("p16c", {
expect_true(identical(names(avg_by_vore), c("vore", "sleep_avg")))
print("p16c: Checking column names are vore and sleep_avg.")
})
|
604bb2fef354c8a511d4295f4d342f751f204592 | f680ff6e25a828bbac8b244f9061a3fa71125836 | /man/commonSamples.Rd | 03c81718bb23b0335cb6eebb5b3f5116af3378b7 | [
"MIT"
] | permissive | isglobal-brge/MultiDataSet | db7348454ffc202469b7354d0b7252eedc7659a4 | c4bea804bd9b8b53b8d96928d9148f09787f475e | refs/heads/master | 2021-10-07T17:58:31.164306 | 2021-10-07T13:34:50 | 2021-10-07T13:34:50 | 80,110,013 | 2 | 0 | MIT | 2021-01-29T10:27:17 | 2017-01-26T11:45:31 | R | UTF-8 | R | false | true | 1,831 | rd | commonSamples.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/1_MultiDataSet-ClassGenerics.R
\name{commonSamples}
\alias{commonSamples}
\title{Method to select samples that are present in all datasets.}
\usage{
commonSamples(object, unify.names = FALSE)
}
\arguments{
\item{object}{\code{MultiDataSet} that will be filtered.}
\item{unify.names}{Logical indicating if sample names of the sets should be unified.}
}
\value{
A new \code{MultiDataSet} with only the common samples.
}
\description{
This method subsets the datasets to only contain the samples that are in all datasets. All sets
will have the samples in the same order, taking into account that there can be duplicates.
}
\details{
If unify.names is TRUE, the sample names of the sets will be unified using the id column of
phenodata. This option is only possible when there are no duplicated ids.
}
\examples{
multi <- createMultiDataSet()
eset <- new("ExpressionSet", exprs = matrix(runif(9), ncol = 3))
fData(eset) <- data.frame(chromosome = c("chr1", "chr1", "chr1"),
start = c(1, 5, 10),end = c(4, 6, 14),
stringsAsFactors = FALSE)
sampleNames(eset) <- c("S1", "S2", "S3")
pData(eset) <- data.frame(id = c("S1", "S2", "S3"))
rownames(pData(eset)) <- c("S1", "S2", "S3")
multi <- add_genexp(multi, eset, dataset.name = "g1")
eset <- new("ExpressionSet", exprs = matrix(runif(8), ncol = 2))
fData(eset) <- data.frame(chromosome = c("chr1", "chr1", "chr1", "chr1"),
start = c(1, 14, 25, 104),end = c(11, 16, 28, 115),
stringsAsFactors = FALSE)
sampleNames(eset) <- c("S1", "G2")
pData(eset) <- data.frame(id = c("S1", "G2"))
rownames(pData(eset)) <- c("S1", "G2")
multi <- add_genexp(multi, eset, dataset.name="g2")
commonSamples(multi)
}
|
0f589da05bd5d17d93c3755debe7c15ae86eeb97 | aa9563fb769372b219e83b53430e0afc65fe02c0 | /factors.R | a67823ee8e5d9bd849dee28f8431bb137c3aaa9f | [] | no_license | peterkabai/dataScience | e357d35b01c610a38488109bb2ef4f197b15286c | 99652c4c05ca8f26df8760c67b5882d13935a324 | refs/heads/master | 2020-04-09T09:54:41.677461 | 2019-05-09T04:33:22 | 2019-05-09T04:33:22 | 152,819,355 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,312 | r | factors.R | # Factors, lists and matrices
# Lets start wuth factors
# Create a vector 'gear' of ordered factor values from the
# column 'gear' of the mtcars dataset.
gear = ordered(mtcars$gear)
# Display the levels of the factor.
gear
# Now let's do some lists
# Create a list x having component 'a' of value "foo",
# component 'b' of value 1:4, and component 'c' of
# value function(x) 1.5 * x^2 to c.
x = list(a="foo", b=1:4, c=function(x){1.5 * x^2})
x
# Get the length of the 'a' component of list x,
# selecting the component by name, then the 'b' component.
length(x$a)
length(x$b)
# Get the second and third elements of component b, accessing the
# list component by its index number.
x[[2]][c(2,3)]
# Apply function component 'c' of list x to value 2.5
x$c(2.5)
# Define a function avgsum that takes a numeric vector x, and
# returns a list with two elements: 'a', which is the average
# value of x, and 'b', which is the sum of x. Use 'stop' or
# 'stopifnot' to handle the case of an empty list.
avgsum = function(x) {
stopifnot(length(x) > 0)
return( list(a=mean(x), b=sum(x) ))
}
# Apply the function to 1:1000, and get the second
# component of the result.
avgsum(1:1000)
# Create a list y containing two vectors: 'a', with value 1:1000,
# and 'b', with value 1001:2000
y = list(a=1:1000, b=1001:2000)
# Show the first elements of component b of list y.
head(y$b)
# Finally, here we'll do matrices
# Create a 100 x 3 matrix of random values between 1 and 10
x = matrix(sample(1:10, 300, TRUE), nrow=100, ncol=3)
x
# Set the column names of x to "x1", "x2", "x3".
colnames(x) = c("x1", "x2", "x3")
# Show the first rows of matrix x.
head(x)
# Compute the dimensions of matrix x.
dim(x)
# Create a 3 x 1 matrix b containing values 1.2, -0.24, 0.61
b = matrix(c(1.2,-0.24,0.61))
b
# Confirm the dimensions of matrix b.
dim(b)
# Perform matrix multiplication of x and b. Before doing this,
# make sure the understand the dimension of the resulting matrix.
r = x %*% b
r
# Check the dimensions of the resulting matrix.
dim(r)
# Look at the first values of the resulting matrix.
r[1]
# Compute the average value of matrix x.
mean(x)
# Figure out how to create a 3-dimensional plot of the
# feature vectors in matrix x.
library(scatterplot3d)
scatterplot3d(x, main="Plot of Matrix X")
|
f4fe739f4341928e92a0d16eb2a3b25452d58e78 | 327be677334f53b4658f33c33e909133e47a0dff | /cachematrix.R | 01d04d4720f07f5a93c4a7f41021f735bb51b7fd | [] | no_license | alishaqazi/ProgrammingAssignment2 | db6d6b719df7f82d87247786d791f0fadb4bdce7 | 9610d3e22e2f862b3040fe5cf2f75e94fd0b154d | refs/heads/master | 2021-05-16T04:00:47.135881 | 2017-10-03T17:50:02 | 2017-10-03T17:50:02 | 105,674,679 | 0 | 0 | null | 2017-10-03T16:25:08 | 2017-10-03T16:25:07 | null | UTF-8 | R | false | false | 1,026 | r | cachematrix.R | ## I have created makeCacheMatrix to make a special "matrix" object, so that cacheSolve can take makeCacheMatrix as an object.
## Therefore, this will lead cacheSolve to retrieve the inverse.
## makeCacheMatrix below will take a matrix as an argument, and will return a list that can be used by cacheSolve.
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(inverse) m <<- inverse
getinverse <- function() m
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse
}
## cacheSolve below will compute the inverse of what is returned by makeCacheMatrix above.
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
81ac423e9fbdff9c6a59e7b13d81ed3f72cad18e | 453f8f4ad202fa9b9fe7da7690781c4b12d45216 | /GSDF.TWCR/man/TWCR.get.slice.at.month.Rd | 887aa7ea8310e4b00ebea7af5c234d8118e60ed6 | [
"Apache-2.0"
] | permissive | oldweather/GSDF | 96c7478c740028a3daac226d33a130811776a3ab | 1f7ae58c9395b96aadea68a2870507bf6cbffe6d | refs/heads/master | 2021-01-17T03:45:42.929022 | 2017-09-07T13:52:32 | 2017-09-07T13:52:32 | 16,053,979 | 0 | 1 | null | null | null | null | UTF-8 | R | false | true | 958 | rd | TWCR.get.slice.at.month.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GSDF.TWCR.R
\name{TWCR.get.slice.at.month}
\alias{TWCR.get.slice.at.month}
\title{Get slice at month.}
\usage{
TWCR.get.slice.at.month(variable, year, month, height = NULL,
opendap = TRUE, version = 2, type = "mean")
}
\arguments{
\item{variable}{'prmsl', 'prate', 'air.2m', 'uwnd.10m' or 'vwnd.10m' - or any 20CR variable}
\item{height}{Height in hPa - leave NULL for monolevel}
\item{opendap}{TRUE for network retrieval, FALSE for local files (faster, if you have them).}
\item{type}{- 'mean', 'spread', 'normal', or 'standard.deviation'.
Note that standard deviations are not available over opendap.}
}
\value{
A GSDF field with lat and long as extended dimensions
}
\description{
Get a 2D horizontal slice of a selected variable (as a GSDF field) for a given month.
}
\details{
Interpolates to the selected height when the selected height is not that of a 20RC level.
}
|
a87c87b718f73ea239ac463948835eacac3b3f7b | 79e00cf80dbbae367df735d746624153f2293895 | /run_genscan.R | 1a25dba8e0ccbe7f5a298dbe82a243ce4366f3f9 | [] | no_license | hippover/GI_a2 | 68a92a7c1a00a56d606ff8422f5067c0bf3b39ff | b24472890e6e52d26d6dfd51ddeb1beffb7d2285 | refs/heads/master | 2020-04-05T09:03:28.684855 | 2018-11-19T13:08:00 | 2018-11-19T13:08:00 | 156,740,801 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,387 | r | run_genscan.R | # Launch genscan on species chromosomess
library(seqinr)
library(data.table)
Dsim <-'chromosomes/dsim-all-chromosome-r2.01.fasta'
Dmoj <- 'chromosomes/dmoj-all-chromosome-r1.04.fasta'
Dsec <- 'chromosomes/dsec-all-chromosome-r1.3.fasta'
Dere <- 'chromosomes/dere-all-chromosome-r1.05.fasta'
files=list(Dsim=Dsim,Dmoj=Dmoj,Dsec=Dsec,Dere=Dere)
genscan <- '/local/data/public/genome_informatics_2018/programs/genscan/'
setwd('/local/data/public/g1/')
for (j in 1:length(files)){
print("----------------------")
f <- files[[j]]
species <- names(files)[[j]]
print(f)
print(species)
# We only take the first sequence,
# which is much longer than all the others (I don't know what they are)
read <-read.fasta(f)
chr <- read[1][[1]]
# We split this long sequences in short ones
seq_length <- 3e5
seq_overlap <- 3e4
n_slices <- as.integer(length(chr) / (seq_length-seq_overlap))
print(paste(length(chr),"bases cut in",n_slices,"slices"))
# For each sequence, we write a fasta file
# We then run genscan on it, and save the results
for (i in 1:n_slices){
write.fasta(chr[((i-1)*seq_length - (i-1)*seq_overlap):
((i*seq_length-1)-(i-1)*seq_overlap)],
paste((i-1)*seq_length - (i-1)*seq_overlap,":",i*seq_length-(i-1)*seq_overlap,sep=""),
file.out=paste('chromosomes/',species,'/slices/slice_',i,'.fa',sep=""),
open="w",as.string=FALSE)
outex<-system(paste(genscan,'genscan',' ',
genscan,'lib/HumanIso.smat',' ',
'chromosomes/',species,'/slices/slice_',i,'.fa',' -cds',sep="")
,intern=TRUE)
if (i %% 10 == 0) print(paste("ran genscan on slice",i,"over",n_slices))
out_file <- file(paste("chromosomes/",species,"/annotations/slice_",i,".txt",sep=""),open="w")
write(outex,out_file)
close(out_file)
}
# launch perl script
# which parses the results and creates one csv containing all the lines
print("-----------")
print("Launching perl script")
print(paste("perl parse_genscan_1.pl",
seq_length,
seq_overlap,
n_slices,
paste0('/local/data/public/g1/Genscan/',species,'_genes_temp.csv'),
species))
perl<-system(paste("perl parse_genscan_1.pl",
seq_length,
seq_overlap,
3,#n_slices,
paste0('/local/data/public/g1/Genscan/',species,'_genes_temp.csv'),
species)
,intern=TRUE)
print("Perl is done")
# Then, load this table and remove duplicates or genes cut in the slicing process
# (only leave their full version)
seqs <- data.table(read.csv(paste0('/local/data/public/g1/Genscan/',species,'_genes_temp.csv')))
print(paste(nrow(seqs),"sequences"))
genes <- seqs[,list(start=min(gene_start),end=max(gene_end),strand=max(strand)),by=gene_id]
genes <- genes[rank(genes$end)]
genes <- genes[,list(id=first(gene_id),num=length(gene_id)),by=list(start,strand)]
print(paste("remove",sum(genes$num > 1),"overlaps"))
seqs <- seqs[seqs$gene_id %in% genes$id]
# FInally, save the table for analysis
write.csv(seqs,file=paste0('/local/data/public/g1/Genscan/',species,'_genes.csv'))
print(paste("wrote file containing",nrow(seqs),"sequences for",nrow(genes),"genes"))
} |
3e1d8b44b8d68c79299fca85b41dbb62eb9000e0 | c8d87ef759a18c8eeb1eb2c5a77ea6bb9541df5c | /prob.012.R | 5ccfae940a17f0556c4643e0aa49177b6bfbb631 | [] | no_license | mittens-sk/study.R.euler | 6164a27789a5fa447988986f9ea59d23f8c84fc5 | c0fb2958ac88927bb30d6ca6d968de9e071d91c5 | refs/heads/master | 2020-07-28T18:17:18.280411 | 2019-10-07T07:50:45 | 2019-10-07T07:50:45 | 209,490,592 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 589 | r | prob.012.R | source('functions.R')
d<-0
i<-0
max<-0
find<-500
while(length(d)<find) {
i<-i+1
a<-sum(seq(1,i))
d<-divisors(a)
l<-length(d)
if(max<l) {
max<-l
print(paste0(i," : ",l))
}
}
print(a)
print(d)
#[1] "1 : 2"
#[1] "3 : 4"
#[1] "7 : 6"
#[1] "8 : 9"
#[1] "15 : 16"
#[1] "24 : 18"
#[1] "32 : 20"
#[1] "35 : 24"
#[1] "63 : 36"
#[1] "80 : 40"
#[1] "104 : 48"
#[1] "224 : 90"
#[1] "384 : 112"
#[1] "560 : 128"
#[1] "935 : 144"
#[1] "1224 : 162"
#[1] "1664 : 168"
#[1] "1728 : 192"
#[1] "2015 : 240"
#[1] "2079 : 320"
#[1] "5984 : 480"
#[1] "12375 : 576"
#> print(a)
#[1] 76576500 |
699a0760126177d5294e24aa455cdecfff046579 | 2ea5a78617175fb105f825e1be66ea97e69a8eb2 | /CDARS2/R/CDARSMerge.R | 126978c6848920cc46cc1d50c651905eff17b357 | [] | no_license | songssssss/CDARS | 8abe11850c3ccc34151ac5d0edd731ecea49d8c2 | e54849b0f2ed980c0e4d92c0214509ee1f5277f7 | refs/heads/main | 2023-03-20T19:33:33.322844 | 2021-03-18T04:44:06 | 2021-03-18T04:44:06 | 348,942,257 | 0 | 0 | null | 2021-03-18T04:37:01 | 2021-03-18T04:37:00 | null | UTF-8 | R | false | false | 2,221 | r | CDARSMerge.R | #' @title Quick merge CDARS files
#' @description Merge CDARS raw xlsx files of the same types (column names) under the same folder automatically, save to SPSS format
#' @param class classes of files to be combined
#' @param to_csv convert to csv if TRUE, or to SPSS
#' @export
#' @import readxl
#' @seealso \code{\link[readxl]{read_xlsx}}
#' @seealso \code{\link[readxl]{excel_sheets}}
#' @keywords
#' @return NULL
#' @details supports xlsx only
#' @examples \dontrun{
#' # CDARSMerge(class = c("Dx","Px"))
#' # CDARSMerge(class = c("alt","DeAth"),TRUE)
#' }
#'
CDARSMerge<-function(class,to_csv=FALSE){
for(i in 1:length(class)){
#get all relevant file names
file<-dir()[regexpr(tolower(class[i]),tolower(dir()))>0®expr("xlsx",tolower(dir()))>0]
print(paste("Number of",class[i],"files:",length(file)))
dat<-data.frame()
for(j in 1:length(file)){
#get worksheets' name
sheetName<-readxl::excel_sheets(file[j])
for(k in 1:length(sheetName)){
#loop through worksheets
tmp_dat<-as.data.frame(readxl::read_xlsx(file[j],sheet=sheetName[k]))
if(nrow(tmp_dat)>0){
tmp_dat %>%
dplyr::mutate_all(as.character)
if(length(grep("Reference Key",tmp_dat[,1]))>0){
tmp_dat_name<-gsub(" ","",paste(tmp_dat[grep("Reference Key",tmp_dat[,1]),]))
tmp_dat <- tmp_dat[(grep("Reference Key", tmp_dat[,1]) + 1):dim(tmp_dat)[1],]
names(tmp_dat) <- tmp_dat_name
}
if(length(grep("No. of Records",tmp_dat[,1]))>0){
tmp_dat<-tmp_dat[1:(grep("No. of Records",tmp_dat[,1])-1),]
}
names(tmp_dat)<-gsub("[.():,-]","",names(tmp_dat))
dat<-bind_rows(dat,tmp_dat)
print(paste("File",j,"processed:",file[j],"worksheet:",sheetName[k]))
}
}
}
print(paste("Name of columns:",names(dat)))
if(to_csv){
write.csv(dat,file=paste(class[i],".csv",sep=""),row.names=FALSE,na="")
}else{
names(dat)<-gsub(" ","",names(dat))
haven::write_sav(dat,paste(class[i],".sav",sep=""))
print(paste("Done with",length(file),"file of",class[i]))
}
}
}
|
061ccc9f58cc528fd2046392f3000d1c5da3e386 | c02038fc9c9c3a0f23a6df73dd220a4384610fcd | /plot4.R | aa8c4f34c91ef30a1b3993437cf2d6ac63c92d59 | [] | no_license | emilyyii/ExploratoryDataAnalysis_Project01 | e92124002f5353a2b42901c36ff67fd6af456a38 | 2b1f8d66f9f32af9d261b4ab934da07770204338 | refs/heads/master | 2021-04-28T20:05:39.347179 | 2018-02-18T20:38:04 | 2018-02-18T20:38:04 | 121,914,853 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 752 | r | plot4.R | # plot 4
png("plot4.png", width = 480, height = 480)
par(mfrow = c(2,2))
# 1
plot(data$DateTime, data$Global_active_power, type = "l", xlab = "", ylab = "Global Active Power")
# 2
plot(data$DateTime, data$Voltage, type = "l", xlab = "datetime", ylab = "Voltage")
# 3
plot(data$DateTime, data$Sub_metering_1, col = "black", type = "l", xlab = "", ylab = "Energy sub metering")
lines(data$DateTime, data$Sub_metering_2, col = "red", type = "l")
lines(data$DateTime, data$Sub_metering_3, col = "blue", type = "l")
legend("topright", col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty = 1, lwd = 2, bty = "n")
# 4
with(data, plot(DateTime, Global_reactive_power, xlab = "datetime", type = "l"))
dev.off() |
7c5503d8a5d6a2de3b4192e4e37a61397a9bf17d | 13110ac3fe1f3de135975f586e3b995ecb4588d2 | /man/upmplot.Rd | d1fd745b1eefc75185fa37ca8eec7e0f330bebd6 | [] | no_license | biostata/tpidesigns | e933b32cd99cc522e9afdbdbf09210e1cc5e439b | 215a886f48d0dc7dd3ebd838e3f32fa1e1c73fa1 | refs/heads/master | 2022-03-15T23:15:50.532759 | 2019-12-04T04:07:13 | 2019-12-04T04:07:13 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 4,271 | rd | upmplot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/upm.R
\name{upmplot}
\alias{upmplot}
\title{Graphical plot of Unit Probability MASS}
\usage{
upmplot(x, n, pt, e1 = 0.05, e2 = 0.05, design = c("mtpi", "mmtpi"),
w, a1 = NULL, b1 = NULL, a2 = NULL, b2 = NULL)
}
\arguments{
\item{x}{Total Number of events (In Dose Escalation Oncology Trials, this may be defined as
number of people who have experienced Dose Limiting Toxicities through administration of current Dose Level)}
\item{n}{Trial size (In Dose Escalation Oncology Trials, this may be defined as
total number of people who have been administered current Dose Level (missing responses will be excluded). Necessarily n will be greater than or equal to x}
\item{pt}{Target toxicity proportion to achieve in current Dose Level (Less Toxicity means under- dosing, where as more toxicity means over - dosing)}
\item{e1}{Amount of variation that can be allowed to the left of the pt value to conclude that target toxicity has been achieved.
Default value is 0.05. This means, that if a Posterior Toxicity (DLT) mean takes a value within the range (pt - e1, pt), toxicity for the cohort (of size >= 3) will be achieved.}
\item{e2}{Amount of variation that can be allowed to the right of the pt value to conclude that target toxicity has been achieved.
Default value is 0.05. This means, that if a Posterior Toxicity (DLT) mean takes a value within the range (pt, pt + e2), toxicity for the cohort (of size >= 3) will be achieved.}
\item{design}{The Design that is implemented in the trials. This arguement includes values "mtpi" and "mmtpi"}
\item{w}{Weight on the first Beta distribution of the mixture Prior}
\item{a1}{alpha parameter ( > 0) for 1st Beta distribution, must be input properly when \eqn{w = 0 or 1}}
\item{b1}{beta parameter ( > 0) for 1st Beta distribution, must be input properly properly when \eqn{w = 0 or 1}}
\item{a2}{alpha parameter ( > 0) for 2nd Beta distribution, will not be used if \eqn{w = 0 or 1}}
\item{b2}{beta parameter ( > 0) for 2nd Beta distribution, will not be used if \eqn{w = 0 or 1}}
}
\value{
A graph that includes Probability Distributions of the Dose Limiting Toxocity Rate and value of Unit Probability Mass at corresponding intervals.
}
\description{
\code{upmplot} Produces a graphical plot of Unit Probability Mass for a given set of parameters.
}
\details{
Unit Probability MASS or UPM(a,b) = \eqn{(F(b) - F(a))/(b - a)}, defined for an interval (a,b), when X~F().
In this function, F() is assumed to be Cumulative Beta distribution function or mixture of two cumulative Beta distribution functions.
Hence, \eqn{F(x) = w * pbeta(x, a1, b1) + (1 - w) * pbeta(x, a2, b2)}, pbeta is cumulative Beta distribution.
If F() consists of a single Beta distribution, and not a mixture, then the convention here assumed is
to input \eqn{w = 1} and a1, b1 , or \eqn{w = 0} and a2,b2
}
\section{Decision Making Based on UPM values}{
For modified Toxicity Probability Interval (mTPI) Design, the toxicity range (0,1) is divided into
three ranges, (1) Under-Dosing Interval [0, pt - e1), (2) Target-Toxicity Interval [pt - e1, pt - e2], (3) Over-Dosing Interval (pt + e2, 1].
UPM is calculated for the the above intervals and Decision is taking accordingly,\cr if the UPM is maximum for interval (1),
then the strength of the current Dosage is escalated,\cr if its maximum for Interval (2), then more patients are administered with
current dose,\cr if the UPM is maximum in interval (3), then strength of the current Dose is de-escalated.\cr For Modified Toxicity Interval Design-2 (mTPI -2, encoded as "mmtpi")
the intervals (1) and (3) are again divided into another sub- intervals and same steps are followed.\cr But, before that, we must ensure that the Dose is not severely toxic
and hence it is advised to run the \code{\link{decisiontpi}} function to know about the severity of current Dose.The graphical display will be meaningful only if \code{\link{decisiontpi}} does not return the value "DU"
}
\examples{
require(ggplot2)
n = 13 #must be a value >= 3
x = sample.int(n, 1)
upmplot(x = 5, n = 7, pt = 0.3, design = "mmtpi", w = 0.1, a1 = 1, a2 = 1, b1 = 4, b2 = 6)
}
\seealso{
\code{\link{UPM}}, \code{\link{weights_formulate}}
}
|
f0a4c6a5a974e40ff15f6d59a3d341c109c41086 | 6f7a46350db1faad9882ce7cdf4fce12ac9284fd | /R/get_exchange_rates.R | e5948b0786de84b7eaec4d2c39f2c464531c923a | [] | no_license | Deborah-Jia/m_r_skills | 2f28d8b0fa70b058b9b5edf9e998060aac1330f3 | a53658b3181028a32e78ec9e14c5dbade383d946 | refs/heads/main | 2023-05-26T21:40:56.387382 | 2021-06-10T19:36:23 | 2021-06-10T19:36:23 | 366,491,809 | 0 | 0 | null | 2021-05-31T21:30:18 | 2021-05-11T19:27:22 | R | UTF-8 | R | false | false | 1,621 | r | get_exchange_rates.R | #' Look up the historical values of base currency in another currency.
#' @param base currency symbol
#' @param symbols currency symbol
#' @param start_date date
#' @param end_date date
#' @inheritParams get_usdhuf
#' @return \code{data.table} object
#' @export
#' @importFrom checkmate assert_numeric
#' @importFrom logger log_error log_info
#' @importFrom data.table data.table
#' @importFrom httr GET content
#' @examples
#' get_exchange_rates()
#' get_exchange_rates(base= 'USD', symbols='EUR')
#' get_exchange_rates(base= 'USD', symbols='GBP', start_date= "2021-04-24", end_date= "2021-05-10")
get_exchange_rates <- function(base= 'USD',
symbols='HUF',
start_date = Sys.Date() - 30,
end_date= Sys.Date(),
retried=0){
tryCatch({
response <- GET(
'https://api.exchangerate.host/timeseries',
query= list(
base= base,
symbols=symbols,
start_date = start_date,
end_date= end_date
)
)
exchange_rates <- content(response)$rates
rates <- data.table(
date=as.Date(names(exchange_rates)),
rate=as.numeric(unlist(exchange_rates)))
assert_numeric(rates$rate, lower = 0)
}, error=function(e){
log_error(e$message)
Sys.sleep(1+retried^2)
get_exchange_rates(base= 'USD',
symbols='HUF',
start_date = Sys.Date() - 30,
end_date= Sys.Date(),
retried = retried+1)
})
log_info('1 {base} = {rate} {symbols}')
rates
}
|
2b142dfb80b12ce23aeb1e160f416685b6398d47 | 08cd2a1676d27ac01291479de37273998d017864 | /00_script_for_rasp.R | ed0bcc3744dd4b912efc97673b4a8f47dd4fb70c | [] | no_license | FDenker/Legacy-patent-crawler | cd45e437a22ce8a6c7fd21f945a21527c31571ee | 2d367f41fd6e37143af5d9026272e1965137654f | refs/heads/master | 2022-04-09T20:17:45.860588 | 2020-03-20T13:09:34 | 2020-03-20T13:09:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,051 | r | 00_script_for_rasp.R | #### script for raspberry pi###
#rstudioapi::jobRunScript("C:/Users/Frederic Denker/OneDrive - Zeppelin-University gGmbH/Dokumente/Semester 7/clean_patent_project/Patent_project/01_uspto_crawler_full_script.R", importEnv = TRUE)
devtools::source_url("https://github.com/FDenker/us_ger_patent_scraping/blob/master/01_uspto_crawler_full_script.R?raw=TRUE")
start <- 44
end <- 45
##For which of the groups do want to run this##
for (i in start:end) {
#source("01_uspto_crawler_full_script.R")
group_number <- i
start_line <- (1+(group_number-1)*500)
end_line <- ((group_number*500))
print(start_line)
print(end_line)
system.time({ data <- crawl_of_list_function(linkmaker(start_line,end_line))})
saveRDS(data, file = paste0("/usr/share/R/data_group_",group_number, ".rds"))
print(paste0("Saving group number: ",group_number, " successfull"))
}
#rstudioapi::jobRunScript("C:/Users/frede/OneDrive - Zeppelin-University gGmbH/Dokumente/Semester 7/us_ger_patent_scraping/00_script_for_rasp.R", importEnv = TRUE)
|
5c824ad7b8f1a8443388c7a4f788955250e6c474 | 1fdb812e8be81ba42963e82fff1e9201517c1a12 | /src/setup.R | 0535f91344ec21b665880a2008e76cda1efdc8c8 | [] | no_license | adamstuller/weather-data-analysis | dafe298041ef255de98103b93035b2641a7fb210 | 30e4c588c4e5637b524d6502eddf28f3b6413f75 | refs/heads/main | 2023-05-08T16:58:48.492254 | 2021-05-24T20:08:19 | 2021-05-24T20:08:19 | 345,736,727 | 0 | 0 | null | 2021-05-24T16:14:37 | 2021-03-08T17:21:24 | HTML | UTF-8 | R | false | false | 608 | r | setup.R | library(lubridate)
library(tidyverse)
library(Hmisc)
library(data.table)
library(funModeling)
library(corrplot)
library(tsibble)
library(fable)
library(imputeTS)
library(EnvStats)
library(feasts)
library(factoextra)
library(fpp3)
# mode function
getmode <- function(v) {
uniqv <- unique(v)
uniqv[which.max(tabulate(match(v, uniqv)))]
}
NA_values <- c("", '999', '9999', '99999', '999999', '+999', '+9999','+99999', '+999999', '009999')
process_col <- function(val, scaling = 1) {
if (is.na(as.numeric(val)) || as.numeric(val) %in% NA_values){
val <- NA
}
return (as.numeric(val)/scaling)
}
|
d0fae484fc75e7c43424bb9ab7f4596c29f19915 | 78feba5ff1ee498c62aa5540e68c7debb9194d82 | /man/cells.Rd | 6f3db83c73bc6e181a52340e5909847fb20d90c9 | [] | no_license | mhpedersen/tblr | 902e25901596beb4fa17ed0cad74f3148c9166c7 | 7c700df2bc30abf774c6cba1855ac5db4d202547 | refs/heads/master | 2021-01-18T18:37:18.662232 | 2016-05-30T21:15:32 | 2016-05-30T21:15:32 | 60,035,934 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 177 | rd | cells.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tbl.R
\name{cells}
\alias{cells}
\title{cells}
\usage{
cells(coords, ...)
}
\description{
cells
}
|
8b87d5ecdf86af54dcefe754d3ce303fac76fb60 | b8088b488a998bff1e92a42ae432e0823b4c3fb1 | /man/nltm-internal.Rd | be7576acaf6746e6f7a0dd2fe4859b2a0b0d72c3 | [] | no_license | cran/nltm | 2b49164db963f6967a8fc0c821e32b54566c6ba0 | ae661486d463b133e4cad46be19dcce0942cc68a | refs/heads/master | 2023-03-16T07:18:48.259534 | 2022-04-11T13:10:09 | 2022-04-11T13:10:09 | 17,719,132 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 716 | rd | nltm-internal.Rd | \name{nltm-internal}
\alias{nltm-internal}
\alias{nltm.fit}
\alias{profileLikR}
\alias{boundary}
\alias{counts}
\alias{eventTimes}
\alias{initSurvival}
\alias{nPredictor}
\alias{reverseCumsum}
\alias{cureModel}
\title{Internal non-linear transformation model functions}
\description{Internal non-linear transformation model functions.}
\usage{
nltm.fit(x1, x2, y, model, init, control, verbose)
profileLikR(beta, x1, x2, status, count, s0, model, cure, tol, nvar1,
nvar2, nobs, npred, verbose)
boundary(x1, x2, npred, cure, bscale)
counts(time, status)
eventTimes(y)
initSurvival(count, cure)
nPredictor(model)
reverseCumsum(a)
cureModel(model)
}
\details{These are not to be called by the user.}
\keyword{internal}
|
b480d27db7814ddda6a8312808bc87a668c585ac | 5868298703355b6796bfeeb8139ca518428b8d0f | /Print Test.R | dc3f5773c555f090005c4e4933de8e55ee3126b5 | [] | no_license | RayCatNineNine/TestRespo | d24342fd20dc6d397a25ca2e9ec27e61302bf453 | 98b709a36027b0ba521e0c41b76691c8eb0c899e | refs/heads/master | 2023-03-08T00:12:39.812396 | 2021-02-10T03:03:39 | 2021-02-10T03:03:39 | 337,599,894 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 77 | r | Print Test.R | print("This file was created within RStudio")
Print("And now it lives here") |
b45a86eff69ca9ce742d7b2da8dc626214e087c2 | 1ff3a51b463c951aa02ef40a89c5a884c94f9516 | /R/print.summary.covfm.R | 8f08a2dc9970415edc460fbc1ed69826cf2e126a | [] | no_license | cran/fit.models | 3a250a89603637cfd2296b4cf25f6bcc8e38eda6 | 2548545703702dbc11c8a2b9ceda8da77777386e | refs/heads/master | 2021-01-10T01:00:23.547075 | 2020-08-02T13:30:02 | 2020-08-02T13:30:02 | 17,696,066 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,721 | r | print.summary.covfm.R | #' @export
print.summary.covfm <- function(x, corr = FALSE, digits = max(3, getOption("digits") - 3),
print.distance = FALSE, ...)
{
n.models <- length(x)
mod.names <- names(x)
if(!("corr" %in% names(match.call())[-1]) && !is.null(ac <- attr(x, "corr")))
corr <- ac
acc <- if(corr) vcor else vcov
calls <- lapply(x, display.call)
cat("\nCalls: \n")
for(i in names(calls))
cat(paste0(i, ": ", calls[[i]], "\n"))
p <- dim(acc(x[[1]]))[1]
i1 <- rep(seq(p), times = p)
i2 <- rep(seq(p), each = p)
cov.index <- paste("[", paste(i1, i2, sep = ","), "]", sep = "")
cov.index <- matrix(cov.index, p, p)
cov.index <- cov.index[row(cov.index) >= col(cov.index)]
cov.unique <- t(sapply(x, function(u) (v <- acc(u))[row(v) >= col(v)]))
dimnames(cov.unique) <- list(mod.names, cov.index)
cat("\nComparison of Covariance/Correlation Estimates:\n")
cat(" (unique correlation terms) \n")
print(cov.unique, digits = digits, ...)
loc <- t(sapply(x, center))
loc.names <- names(center(x[[1]]))
dimnames(loc) <- list(mod.names, loc.names)
cat("\nComparison of center Estimates: \n")
print(loc, digits = digits, ...)
evals <- t(sapply(x, function(u) u$evals))
eval.names <- names(x[[1]]$evals)
dimnames(evals) <- list(mod.names, eval.names)
cat("\nComparison of Eigenvalues: \n")
print(evals, digits = digits, ...)
have.dist <- sapply(x, function(u) !is.null(u$dist))
if(print.distance && all(have.dist)) {
dists <- t(sapply(x, function(u) u$dist))
dimnames(dists) <- list(mod.names, names(x[[1]]$dist))
cat("\nComparison of Mahalanobis Distances: \n")
print(dists, digits = digits, ...)
}
invisible(x)
}
|
4d9755b48c80464861b74a0925d677c15fc075c2 | 75ec20adb7fc7f8df4bfc2a45a8d45a3da69d857 | /scripts/project/par.R | fdc5776aee70c31c0ad2a895a8bb73539225b7f8 | [] | no_license | dreanod/redseachl | a824ebfe8334b07f5ed7030aeaef486b44b262a0 | e0e89f4210199667740faddb66b85ee3270c4ae3 | refs/heads/master | 2016-09-05T14:47:11.006842 | 2015-05-07T05:53:32 | 2015-05-07T05:53:32 | 25,761,215 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,394 | r | par.R | library(raster)
library(yaml)
out_dir <- 'data/par/projected'
clusters <- raster('derived/clustering/with_seasonal_filling/clusters.grd')
par <- brick('data/par/aggregate/par.grd')
dir.create(out_dir, recursive=TRUE)
config <- yaml.load_file('scripts/load/config.yml')
ext <- config$red_sea_extent
ext <- extent(c(ext$lon_min, ext$lon_max, ext$lat_min, ext$lat_max))
par <- crop(par, ext)
N <- nlayers(par)
par_1 <- c()
par_2 <- c()
par_3 <- c()
par_4 <- c()
for (i in 1:N) {
r <- raster(par, layer=i)
par_1 <- c(par_1, mean(r[clusters == 1], na.rm=TRUE))
par_2 <- c(par_2, mean(r[clusters == 2], na.rm=TRUE))
par_3 <- c(par_3, mean(r[clusters == 3], na.rm=TRUE))
par_4 <- c(par_4, mean(r[clusters == 4], na.rm=TRUE))
}
# add dates
date_from_filename <- function(f) {
b <- basename(f)
b <- strsplit(b, '[.]')[[1]]
b <- b[1]
y <- substring(b, 2, 5)
d <- as.numeric(substring(b, 6, 8))
t <- as.Date(d - 1, origin=paste(y, '-01-01-', sep=''))
return(t)
}
FILES <- list.files('data/par/clean', full.names=TRUE,
pattern='*.grd')
d <- date_from_filename(FILES[1])
for (f in FILES) {
d <- c(d, date_from_filename(f))
}
d <- d[2:(N+1)]
y = format(d, '%Y')
w = c()
for (i in 1:N) {
w <- c(w, (i+23 - 1)%%46 + 1)
}
df <- data.frame(year=y, week=w, par_1, par_2, par_3, par_4, row.names=d)
write.csv(df, paste(out_dir, 'par.csv', sep='/')) |
5c01a60f51f985381232f9c93fbf67a17ec6fa5c | d01ef90f1b72ae02ac5055c7c93c69fcf5a7be5c | /pedigree/genotyped.relatives.R | 128ede744249e1f18f2c471bf8328a417f79b1fc | [] | no_license | ngon/LgSm-DataProcessing | 3b8dde9e4e75f1e2e6791ec342cda80b6d54070f | 37c816f5257dcc87802124e87dbd9e445f6d146f | refs/heads/master | 2020-07-04T15:07:14.876067 | 2015-11-21T02:52:33 | 2015-11-21T02:52:33 | 22,928,887 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,194 | r | genotyped.relatives.R | ### PURPOSE: MAKE A LIST OF SIB PAIRS GENOTYPED FROM F50-55, AND MAKE A LIST OF
### TRIOS/FAMILIES GENOTYPED FROM F39-43. THESE CAN BE USED FOR ERROR CHECKING
### FURTHER DOWN THE LINE.
### THIS WAS USED TO CREATE /PEDIGREE/GENOTYPEDRELATIVES.R
load("./pedigree/info.phenoSamples.RData")
info <- info[order(info$gen, info$dam),]
# get info for genotyped samples only (ids)
info$id <- info$id - 0.1
info <- info[info$id %in% ids,]
sibPairs <- tapply(info$id, INDEX=info$dam, FUN=as.vector, simplify=FALSE)
for (i in seq_along(sibPairs)) {
if (length(sibPairs[[i]]) < 2) {
sibPairs[i] <- NULL
}
}
# MORE VERBOSE THAN TAPPLY
# retain only mice with siblings
# sibs.tmp <- c()
# sibs <- list()
# for (i in 1:(length(info$dam)-1)) {
# if (info$dam[i] == info$dam[i+1]) {
# sibs.tmp <- rbind(sibs.tmp, info[i,], info[i+1,])
# } # gets df containing sib pair info
# if (info$dam[i] == info$dam[i+1]) {
# sibs[[i]] <- c(info[i,1], info[i+1,1])
# } # makes list of sibs for each dam
# }
# sibs <- sibs[!sapply(sibs, is.null)]
# names(sibs) <- unique(sibs.tmp$dam)
# get trios for F39-43
ped <- read.table("./pedigree/pedforQTLRel.txt", sep="\t", header=T)[1:5]
ids <- ids + 0.1
ped <- ped[ped$id %in% ids,]
sires <- ped[ped$sire %in% ids,]
dams <- ped[ped$dam %in% ids,]
trios <- dams[dams$id %in% sires$id,]
trios[c(1,2,3)] <- trios[c(1,2,3)] - 0.1
trios <- trios[1:3]
trios <- trios[order(trios$sire),]
fams <- tapply(trios$id, INDEX=trios$fams, FUN=as.vector, simplify=FALSE)
sires2 <- tapply(trios$sire, INDEX=trios$sire, FUN=as.vector, simplify=FALSE)
dams2 <- tapply(trios$dam, INDEX=trios$dam, FUN=as.vector, simplify=FALSE)
# sires to append
siresOrdered <- c()
for (i in seq_along(sires2)) {
siresOrdered <- c(siresOrdered, sires2[[i]][1])
}
# dams to append
damsOrdered <- c()
for (i in seq_along(dams2)) {
damsOrdered <- c(damsOrdered, dams2[[i]][1])
}
# list of families with all genotyped members
for (i in seq_along(fams)) {
fams[[i]] <- append(fams[[i]], c(siresOrdered[i], damsOrdered[i]))
}
# save(fams, sibPairs, file="./pedigree/genotypedRelatives.RData")
######## 7-8-15 ########
# I renamed all of the elements of fams after the dam (last ID in each vector).
# The code below describes how I did this. The reason I did it is to make
# running plot.haplotypes.R (plot.haplos) easier in the future. That is what
# the rest of this code pertains to. To see it in context, look in the comments
# of plot.haplotypes.R.
# ped<- read.table("./pedigree/pedforQTLRel.txt", header=T)
# mamaMouse <- c()
# for (family in seq_along(fams)){
# mamaMouse <- c(mamaMouse, fams[[family]][length(fams[[family]])])
# }
# mamaMouse <- mamaMouse + 0.1
# mmGen <- ped[ped$id %in% mamaMouse,]
# mmGen <- mmGen[c(1,5)]
# identical(mmGen$id, mamaMouse)
# test <- sort(mamaMouse, decreasing=FALSE)
# identical(test, mamaMouse) # yes, mamaMouse is in decreasing order
# mmGen <- mmGen[order(mmGen$id),]
# identical(mmGen$id, mamaMouse) # now TRUE
# mmGen$id <- mmGen$id - 0.1
# names(fams) <- mmGen$id
# # SAVING FAMS WITH DAMS AS LIST NAMES FOR FUTURE USE
# save(fams, sibPairs, file="./pedigree/genotypedRelatives.RData")
|
fcfc668b89b70c137a4390a5e8bfb6e7d6fb7d80 | 911fe2b00bd17e220c2116c292fe95603ffe1e7a | /preprocessing/BMIQ.R | 0cc9c7b8ecee0011f24ce63612fcb4da3bdf5b89 | [] | no_license | sunnytien/methyl_age | 3772b4eb1115de4a9697f57e4458b850b0c747ff | 07c35e202d56a06193c2a4270d1c0907e9bb4db0 | refs/heads/master | 2020-06-24T14:32:51.548812 | 2015-06-09T15:14:30 | 2015-06-09T15:14:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,093 | r | BMIQ.R | library("wateRmelon")
library("dplyr")
library(IlluminaHumanMethylation450kanno.ilmn12.hg19)
library("BatchJobs")
data(IlluminaHumanMethylation450kanno.ilmn12.hg19)
bmiq.normalization = function(filename, types, subsample=NULL){
require("wateRmelon")
# filename handling
betas = get(load(filename))
base = gsub(".Rdata$", "", filename)
outfile = paste(base, "_BMIQ.Rdata", sep="")
# sanity check
betas[betas < 0] = 0
betas[betas > 1] = 1
betas = betas[!is.na(betas)]
betas = betas[!is.null(betas)]
# making sure beta and types match
betas = betas[names(betas) %in% names(types)]
beta.types = types[names(betas)]
# conduct normalization and save
betas.normed = BMIQ(betas, beta.types)
save(betas.normed, file=outfile)
return(T)
}
## get files for normalization
files = list.files("~/data/methyl_age/GEO/GSM",
recursive=T,
full.names=T) %>%
grep("Rdata$", ., value=T) %>%
grep("BMIQ", ., value=T, invert=T)
cat(paste("Normalizing", length(files), "files\n"))
## getting types for sites
types = IlluminaHumanMethylation450kanno.ilmn12.hg19@data$Manifest$Type %>%
as.character %>%
factor %>%
as.numeric
names(types) = rownames(IlluminaHumanMethylation450kanno.ilmn12.hg19@data$Manifest)
## discarding sites in ChrX and ChrY
## BMIQ funtion will automatically discard sites
## for which there is no type
chrXY = IlluminaHumanMethylation450kanno.ilmn12.hg19@data$Locations %>%
as.data.frame %>%
mutate(probe=rownames(.)) %>%
filter(chr %in% c("chrY", "chrX"))
## discarding random probes
random = IlluminaHumanMethylation450kanno.ilmn12.hg19@data$Other %>%
as.data.frame %>%
mutate(probe=rownames(.)) %>%
filter(Random_Loci!="")
## discarding multiple matching probes
types = types[!(names(types) %in% chrXY$probe)]
types = types[!(names(types) %in% random$probe)]
## conduct normalization
reg = makeRegistry("BMIQ", packages=c("wateRmelon"))
batchMap(reg, bmiq.normalization, files, more.args=list(types=types))
submitJobs(reg, chunk(findNotSubmitted(reg), n.chunks=50))
|
211c29a662929aab55c66ba666daa921663f187f | da03bdcef07889532366cdb90c3ea64db0bf8a16 | /man/dfSatterthwaite.Rd | e4db27892c4c75f2138c8424e1f50b4a2768209a | [] | no_license | cran/equivalenceTest | 4e3bb00ded51d5bcb216d9c10d6304297b7cee66 | aae9bb4ee112a8fd7b69dc719e726b82c9ffd16a | refs/heads/master | 2020-04-27T00:02:30.395743 | 2019-03-04T17:30:03 | 2019-03-04T17:30:03 | 173,922,116 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 692 | rd | dfSatterthwaite.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/equivTest_FixedMargin.R
\name{dfSatterthwaite}
\alias{dfSatterthwaite}
\title{Compute the Satterthwaite approximation of degree of freedom for t distribution}
\usage{
dfSatterthwaite(s1, n1, n1s, s2, n2, n2s)
}
\arguments{
\item{s1}{sample standard deviation for group 1}
\item{n1}{sample size for group 1}
\item{n1s}{adjusted sample size for group 1}
\item{s2}{sample standard deviation for group 2}
\item{n2}{sample size for group 2}
\item{n2s}{adjusted sample size for group 2}
}
\value{
degree of freedom
}
\description{
Compute the Satterthwaite approximation of degree of freedom for t distribution.
}
|
1d175b21842021078478c804243b37f2eae62c50 | 1068d66263215693b773faaa4eda37e8fe733b39 | /make_incr_DEM_from_SRTM.R | fc6e3a8a813c24453b815a63ec80713b273caf3a | [] | no_license | bschumac/Analyse_netcdf_Modeloutput | 3b2794656aa5113ede46b334a240fa1a54274735 | 772fa718af19661fa4cdc85195a21db5a52c1bb6 | refs/heads/master | 2021-01-18T19:34:25.862760 | 2019-10-16T04:06:47 | 2019-10-16T04:06:47 | 72,073,808 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,002 | r | make_incr_DEM_from_SRTM.R | #### This script will calculate the RMSE of Precipitation of the TRMM Data compared
#### to the real measured data.
if (!require(raster)){install.packages('raster')}
library(sp)
library(raster)
if (!require(ncdf4)){install.packages("ncdf4", type = "source", configure.args="--with-netcdf-include=/usr/include")}
library(ncdf4)
library(rgdal)
if(!require(caret)){install.packages('caret')}
library(caret)
if(!require(mapview)){install.packages('mapview')}
library(mapview)
library(hydroGOF)
#install.packages("hydroGOF")
library(chron)
library(stringr)
#installed.packages("rgl")
#install.packages("RSAGA")
library(RSAGA)
library(RColorBrewer)
library(rgl)
library(maptools)
#####################################################
filebase_path <- "/media/dogbert/XChange/Masterarbeit/Analyse_Modeloutput/"
#####################################################
filebase_dem <- paste0(filebase_path, "DEM/")
filebase_dem_uz <- paste0(filebase_dem, "SRTM/")
filebase_model <- paste0(filebase_path, "raster")
filebase_code <- paste0(filebase_path, "code/Analyse_netcdf_Modeloutput/")
filebase_results <- paste0(filebase_path, "results/DEM")
source(paste0(filebase_code,"analyse_fun.R"))
fls_SRTM <- list.files(paste0(filebase_dem,"SRTM_ORG"), full.names = TRUE, pattern="")
for (i in (1:length(fls_SRTM))){
unzip(fls_SRTM[i], exdir = filebase_dem_uz, overwrite=TRUE)
}
# copy tif files by hand to SRTM/tif
fls_SRTM_tif <- list.files(paste0(filebase_dem_uz,"tif"), full.names = TRUE, pattern="")
for (i in seq(1,length(fls_SRTM_tif))){
SRTM_dem_act <- raster(fls_SRTM_tif[i])
vals_dem <- values(SRTM_dem_act)
vals_dem <- replace(vals_dem, vals_dem<= 0, NA)
values(SRTM_dem_act) <- vals_dem
SRTM_agg_act <- aggregate(SRTM_dem_act, fact=218, fun=mean, filename=paste0(paste0(filebase_dem_uz,"tif"),
"/Tile_agg", i,".tif"),
overwrite=TRUE)
}
# merge raster tiles with qgis by hand
SRTM_full_agg <- raster(paste0(filebase_dem_uz,"SRTM_Domain_kili_agg.tif"))
#replace vals with NA
vals_dem <- values(SRTM_full_agg)
vals_dem <- replace(vals_dem, vals_dem<= 0, NA)
values(SRTM_full_agg) <- vals_dem
netcdf_topo <- read_modeloutput(paste0(filebase_model,"/20km_gul_6_6/Kiliman_20km_Apr_May2014_DOMAIN000.nc" ), variable = "topo")
SRTM_full_agg_res <- resample(SRTM_full_agg, netcdf_topo, method="ngb")
writeRaster(SRTM_full_agg_res, filename= paste0(filebase_results,"/SRTM_20km.tif"), overwrite=TRUE)
# increase vals of SRTM
vals_dem <- values(SRTM_full_agg_res)
vals_dem[ vals_dem<=500 & !is.na(vals_dem)] <- vals_dem[ vals_dem<=500 & !is.na(vals_dem)]+((vals_dem[ vals_dem<=500 & !is.na(vals_dem)]/100)*15)
vals_dem[ vals_dem>500 & vals_dem<=1500 &!is.na(vals_dem)] <- vals_dem[ vals_dem>500 & vals_dem<=1500 &!is.na(vals_dem)]+((vals_dem[ vals_dem>500 & vals_dem<=1500 &!is.na(vals_dem)]/100)*20)
vals_dem[ vals_dem>1500 & vals_dem<=2500 &!is.na(vals_dem)] <- vals_dem[ vals_dem>1500 & vals_dem<=2500 &!is.na(vals_dem)]+((vals_dem[ vals_dem>1500 & vals_dem<=2500 &!is.na(vals_dem)]/100)*25)
vals_dem[ vals_dem>2500 &!is.na(vals_dem)] <- vals_dem[ vals_dem>2500 &!is.na(vals_dem)]+((vals_dem[ vals_dem>2500 &!is.na(vals_dem)]/100)*25)
#Replace NA with 0
values(SRTM_full_agg_res) <- vals_dem
vals_dem <- values(SRTM_full_agg_res)
vals_dem <- replace(vals_dem, is.na(vals_dem) , 0)
values(SRTM_full_agg_res) <- vals_dem
writeRaster(SRTM_full_agg_res, filename=paste0(filebase_results,"/SRTM_20km_incr.nc"), overwrite= TRUE)
writeRaster(SRTM_full_agg_res, filename=paste0(filebase_results,"/SRTM_20km_incr.tif"), overwrite= TRUE)
# COMPARISION
fld_lst_model <- list.files(filebase_model, full.names = TRUE, pattern="20")
temp <- paste0(fld_lst_model,"/Kiliman_20km_Apr_May2014_SRF.2014041500.nc")
netcdf_topo <- read_modeloutput(filepath = temp, variable = "topo")
plot(SRTM_full_agg_res)
plot(netcdf_topo)
dif <- SRTM_full_agg_res-netcdf_topo
plot(dif)
density(dif)
|
0ada7756d0c559b631bb8c16008efe78dbecc5df | 40a4312554df0d1e2e298b3da91612855859c894 | /R/RcppExports.R | 2290a7069c875777df67707cb1c7c2efdc235e63 | [] | no_license | gravesee/spamming | 0aae78201f2faaf2ef6396fc94f54e766fca7da8 | 01399e4ab13ea958c944e71e2146a71247f2bb59 | refs/heads/master | 2021-09-18T02:42:27.942594 | 2018-07-05T12:17:40 | 2018-07-05T12:17:40 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 831 | r | RcppExports.R | # Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
ngCMatrix_to_array_test <- function(obj) {
.Call('_spamming_ngCMatrix_to_array_test', PACKAGE = 'spamming', obj)
}
hamming_ngCMatrix_x_only <- function(obj) {
.Call('_spamming_hamming_ngCMatrix_x_only', PACKAGE = 'spamming', obj)
}
hamming_ngCMatrix_x_and_y <- function(objx, objy) {
.Call('_spamming_hamming_ngCMatrix_x_and_y', PACKAGE = 'spamming', objx, objy)
}
hamming_find_mode <- function(obj) {
.Call('_spamming_hamming_find_mode', PACKAGE = 'spamming', obj)
}
test_conversion <- function(obj) {
.Call('_spamming_test_conversion', PACKAGE = 'spamming', obj)
}
BMM <- function(obj, K, repetitions) {
.Call('_spamming_BMM', PACKAGE = 'spamming', obj, K, repetitions)
}
|
18b93c119d7e67f5f96d7895dfad8925869bc2e5 | 1acb22f859b99f7d459730ebe5ffe7cc879ff89f | /tests/testthat/test-areBracketsBalanced.R | b55ddea734051e8a5ac7d98e58a2fd86036021e3 | [] | no_license | sonejilab/FastWilcoxTest | 8f992a592a73dd6318cdac422ff6795c37b98db8 | c9ea65dcc41aa5f3403441899f7e558d2a7cbe7d | refs/heads/master | 2022-11-05T08:36:55.592201 | 2022-10-17T10:46:36 | 2022-10-17T10:46:36 | 173,142,681 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,283 | r | test-areBracketsBalanced.R | context( 'areBracketsBalanced')
expect_true(areBracketsBalanced("{}"), label="{}" )
expect_true(areBracketsBalanced("()"), label="()" )
expect_true(areBracketsBalanced("[]"), label="[]" )
expect_true(areBracketsBalanced("{{{
[[[
(((
)))
]]]
}}}"), label="{{{[[[((()))]]]}}}" )
expect_true(!areBracketsBalanced("{{{[[((()))]]]}}}"), label="{{{[[((()))]]]}}}" )
expect_true(!areBracketsBalanced("{{[[[((()))]]]}}}"), label="{{[[[((()))]]]}}}" )
expect_true(!areBracketsBalanced("{{{[[[(()))]]]}}}"), label="{{{[[[(()))]]]}}}" )
expect_true(!areBracketsBalanced("{{{[[[((())]]]}}}"), label="{{{[[[((())]]]}}}" )
expect_true(!areBracketsBalanced("{{{[[[((()))]]}}}"), label="{{{[[[((()))]]}}}" )
expect_true(!areBracketsBalanced("{{{[[[((()))]]]}}"), label="{{{[[[((()))]]]}}" )
expect_true(!areBracketsBalanced("{{[[[((()))]]}}}"), label="{{[[[((()))]]}}}" )
expect_true(areBracketsBalanced("try this{ using that[(1,2),(1,2),(1,2)] and that[ (1,2) ] }"), label="try this1" )
expect_true(!areBracketsBalanced("try this{ using that[(1,2,(1,2),(1,2)] and that[ (1,2) ] }"), label="try this2" )
expect_true(!areBracketsBalanced("{(})"), label="{(})" )
expect_error( areBracketsBalanced( paste(rep("(", 255), rep(")",255), collapse="") ), "too many brackets -> max 500 !" )
|
27826957feeb7e50a54fa9b73e3f6612e5dcadf0 | 2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0 | /fuzzedpackages/rotations/man/maxwell.kappa.Rd | 3c1c4310c44387121ef20019b70f7de301ac0ba2 | [] | no_license | akhikolla/testpackages | 62ccaeed866e2194652b65e7360987b3b20df7e7 | 01259c3543febc89955ea5b79f3a08d3afe57e95 | refs/heads/master | 2023-02-18T03:50:28.288006 | 2021-01-18T13:23:32 | 2021-01-18T13:23:32 | 329,981,898 | 7 | 1 | null | null | null | null | UTF-8 | R | false | true | 1,188 | rd | maxwell.kappa.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kappa.R
\name{maxwell.kappa}
\alias{maxwell.kappa}
\title{Circular variance and concentration parameter}
\usage{
maxwell.kappa(nu)
}
\arguments{
\item{nu}{circular variance}
}
\value{
Concentration parameter corresponding to nu.
}
\description{
Return the concentration parameter that corresponds to a given circular
variance.
}
\details{
The concentration parameter \eqn{\kappa} does not translate across circular
distributions. A commonly used measure of spread in circular distributions
that does translate is the circular variance defined as
\eqn{\nu=1-E[\cos(r)]}{\nu=1-E[cos(r)]} where \eqn{E[\cos(r)]}{E[cos(r)]} is
the mean resultant length. See \cite{mardia2000} for more details. This
function translates the circular variance \eqn{\nu} into the corresponding
concentration parameter \eqn{\kappa} for the modified Maxwell-Boltzmann
distribution. For numerical stability, a maximum \eqn{\kappa} of 1000 is
returned.
}
\examples{
# Find the concentration parameter for circular variances 0.25, 0.5, 0.75
maxwell.kappa(0.25)
maxwell.kappa(0.5)
maxwell.kappa(0.75)
}
\seealso{
\code{\link{Maxwell}}
}
|
238ababf730d1646ca448ed70f1e2ee3d8235d42 | 1dc1a1a4e717c20112517501c43f9a966ab0c0e5 | /tests/testthat/test-calc_catchment.R | 62b67cbd3e75497ca131b0ccef49e96788de36aa | [
"MIT"
] | permissive | Robinlovelace/stplanr | 5d11640b9b644e40d81b97ee1a2debb77ffb4e26 | d1f10fe2335c2494ba153fd09675756e2c1572b3 | refs/heads/master | 2021-01-23T09:02:00.128500 | 2018-09-13T08:19:36 | 2018-09-13T08:19:36 | 30,063,520 | 14 | 4 | null | null | null | null | UTF-8 | R | false | false | 741 | r | test-calc_catchment.R | context("Test the calc_catchment function")
test_that(
desc = "calc_catchment returns a SpatialPolygonsDataFrame",
code = {
data_dir <- system.file("extdata", package = "stplanr")
unzip(file.path(data_dir, 'smallsa1.zip'))
unzip(file.path(data_dir, 'testcycleway.zip'))
sa1income <- readOGR(".","smallsa1")
testcycleway <- readOGR(".","testcycleway")
t1 <- calc_catchment(
polygonlayer = sa1income,
targetlayer = testcycleway,
calccols = c('Total'),
distance = 800,
projection = 'austalbers',
dissolve = TRUE
)
expect_is(t1, "SpatialPolygonsDataFrame")
files_to_remove = list.files(pattern = "smallsa|testcycleway")
file.remove(files_to_remove) # tidy up
}) |
7dcfe9bb4e1668789742d5b34778d50226666d91 | ba8c93066b190808f70d54386359ee015639ca33 | /dc/man.r | 8a2e6b59824cb6d72267e44a1fd4d53a22c88fcf | [] | no_license | unix-history/tropix-cmd | 0c0b54ae35b9e48c63aca8a7ac06e7935dd5b819 | 244f84697354b1c0e495a77cdff98549875d1b9f | refs/heads/master | 2021-05-27T22:06:51.390547 | 2014-11-06T17:41:20 | 2014-11-06T17:41:20 | 26,281,551 | 1 | 2 | null | null | null | null | ISO-8859-1 | R | false | false | 1,879 | r | man.r | .bp
.he 'DC (cmd)'TROPIX: Manual de Referência'DC (cmd)'
.fo 'Atualizado em 03.11.04'Versão 4.6.0'Pag. %'
.b NOME
.in 5
.wo "dc -"
Calculador de mesa com aritmética de múmeros inteiros
.br
.in
.sp
.b SINTAXE
.in 5
.(l
dc
.)l
.in
.sp
.b DESCRIÇÃO
.in 5
O programa "dc" simula um calculador de mesa (muito simples) com
aritmética de múmeros inteiros.
.sp
Os números podem ser dados
nas bases 10 (decimal), 16 (hexadecimal) ou 8 (octal),
conforme a sintaxe da linguagem "C":
(0x.... para hexadecimal e 0... para octal; os demais são considerados decimais).
Para o caso hexadecimal, somente são aceitas
as letras maiúsculas 'A' a 'F' para representar os dígitos
10 a 15.
Um número pode ser seguido das letras 'K', 'M' ou 'G',
o que fará com que seu valor seja multiplicado por
1024 (KB), 1024^2 (MB) ou 1024^3 (GB), respectivamente.
.sp
Estão disponíveis 26 variáveis para conterem valores intermediários,
cujos nomes são "a" a "z". Estas variáveis são inicializadas com o
valor 0, mas no entanto, "dc" indica quando está sendo usado o valor
de uma variável ainda não atribuída pelo usuário.
.sp
Os operadores disponíveis são: "=", "+", "-", "*", "/", "\%", "&",
"|", "^", ">>", "<<", "~", "(" e ")", conforme a mesma notação da
linguagem "C".
As expressões são formadas usando os operadores
na forma infixa, tal como "a = b + 5".
.sp
Os resultados são dados em decimal e hexadecimal.
.sp
Um <^D> encerra o uso de "dc".
.bc /*************************************/
.sp
As opções do comando são:
.in +3
.ip -v
Verboso: (nesta versão sem função).
.ep
.in -3
.ec /*************************************/
.in
.sp
.b
VEJA TAMBÉM
.r
.in 5
.wo "(cmd): "
fdc
.br
.in
.sp
.b EXEMPLO
.in 7
.(l
a = 0x34
b = 234
c = (a - 4) * (b ^ 1)
.)l
.sp
irá imprimir o valor 11280 (e atribuí-lo à variável "c").
.in
.sp
.b ESTADO
.in 5
Efetivo.
|
229932a924f866a8cfced4fb943719bf0ed93ae5 | 36eda864518a588a51b6de937da4c5f36316943c | /Power Lifting.R | 994e975a79cd0b253b6b6c8f4d323c18d899fd20 | [] | no_license | 26margaretwanjiru/Power-Lifting-Tidy-Tuesday | c85bc7c520d413ab2d54f0e1534c965b4869a4f3 | 269e0e37f93f931e315ff168fece2151910bd640 | refs/heads/master | 2020-08-09T06:41:28.843201 | 2019-10-09T21:11:24 | 2019-10-09T21:11:24 | 214,024,133 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,593 | r | Power Lifting.R | # Packages
library(tidyverse)
library(lubridate)
library(gganimate)
# Data
ipf_lifts <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-10-08/ipf_lifts.csv")
# Data Structure
str(ipf_lifts)
summary(ipf_lifts)
#Extracting the year and month columns
ipf_lifts<-ipf_lifts%>%
mutate(date1 = as_datetime(date)) %>%
mutate(Year=format(date,"%Y"),Month=format(date,"%B"))
# Group by sex and year
count_lift <- ipf_lifts%>%
group_by(Year, sex)%>%
count(sex)
# Data Viz
# manual colors
mycols <- c("black","red")
# The gif
lift_gif <- count_lift%>%
filter(Year > 2010)%>%
ggplot(aes(x = " ", y = n, fill = sex)) +
geom_bar(stat = "identity", color = "white") +
geom_text(aes(label = as.factor(n)), position = position_stack(vjust = 0.5), size = 8,
color = "white")+
scale_fill_manual(values = mycols) +
theme_void()+
transition_states(Year, transition_length = 3, state_length = 1)+
labs(title = "International Powerlifting \n Year : {closest_state}",
subtitle = "The increase/ decrease of men and women\n for the last 9 years")+
theme(
plot.title = element_text(size = 18, face = "bold", hjust = 0.5),
plot.subtitle = element_text(size=16, face = "italic", hjust = 0.5),
plot.caption = element_text(size = 12, face = "italic", color = "blue"))+
labs(caption = "Data Source: OpenPowerlifting.org\n Plot by @magwanjiru")
animate(lift_gif, fps = 10, width = 400, height = 600)
anim_save("lift_gif.gif")
|
894e4c303201d8139424d2deb02b424301b277e5 | 47d916b22c5880a6f02cdefc8eb62ce63390564d | /myCBD/ui.R | 3a37c45f9bfaa3b5d9ec2532af1556bee9749642 | [] | no_license | sharygin/CACommunityBurden | 44d0626d49bb43714813d4325c6faefe63bcc7e0 | b94870c60decd83733b959339131c63f00439e16 | refs/heads/master | 2021-06-24T11:07:03.066910 | 2020-02-28T01:10:01 | 2020-02-28T01:10:01 | 123,367,400 | 0 | 0 | null | 2018-03-01T01:55:08 | 2018-03-01T01:55:08 | null | UTF-8 | R | false | false | 13,491 | r | ui.R | # =============================================================================
# "ui.R" file
#
# required file for Shiny Application
#
# sets up user inputs (drop downs, buttons, etc.) and text in side pannels
# sets up tabs and places all maps, charts, images, titles in main pannels
# set all "styles"
#
# Michael Samuel
# 2018
#
# =============================================================================
# STYLES, CONSTANTS AND FUNCTIONS FOR UI --------------------------------------
STATE <- "CALIFORNIA" # needed this here with CDPH Shiny Server but not otherwise?
# funtion used as "short-cut" when making criteria for conditionals below
fC <- function(vec) {
tRep <- length(vec)-1
paste("input.ID == ",vec, c(rep("|",tRep),""), collapse="")
}
# Styles for help buttons and boxes
myButtonSty <- "height:22px; padding-top:0px; margin-top:-5px; float:right;
color: #fff; background-color: #337ab7;
border-color: #2e6da4"
myHelpButtonSty <- "background-color: #694D75;font-size:14px;"
myBoxSty <- "cursor:pointer; border: 3px solid blue;
padding-right:0px;padding-left:0px;"
# START OF UI --------------------------------------------------------------------
shinyUI(fluidPage(theme = "bootstrap.css",
# current approach to setting style for main fonts and hyperlink font
# TODO needs cleaning, improvement, and documentation from someone knowledgeable
tags$head(
tags$style(HTML("
@import url('//fonts.googleapis.com/css?family=Open+Sans');
* {font-family: 'Open Sans';line-height: 1.5;}
a {text-decoration: none; color: #0000EE;}
") ) ),
# from prior attempts to set fonts etc - maybe something useful here
# tags$head(tags$style(HTML('
# .skin-blue .main-header .logo:hover {background-color: #3c8dbc; }
# .main-header .logo {
# font-family: Tahoma, Geneva, sans-serif; font-weight: bold; font-size: 20px;}')),
# tags$style(type='text/css', "* {font-family: 'Open Sans', Georgia; }"),
tags$h3(mTitle), # app main title supplied from Global
# supprisingly enough, this removes the tick marks between the years on the year slider
# TODO how does this work?
tags$style(type = "text/css", ".irs-grid-pol.small {height: 0px;}"),
# SIDEBARS -----------------------------------------------------------------------
sidebarPanel(width=3,
# Tab help buttons on each tab ----------------------------
conditionalPanel(condition = fC(c(22,23)), actionButton("mapTab", "Tab Help",style=myHelpButtonSty),br(),br()),
conditionalPanel(condition = fC(c(33)), actionButton("conditionTab", "Tab Help",style=myHelpButtonSty),br(),br()),
conditionalPanel(condition = fC(c(45)), actionButton("conditionTableTab","Tab Help",style=myHelpButtonSty),br(),br()),
conditionalPanel(condition = fC(c(34)), actionButton("conditionSexTab", "Tab Help",style=myHelpButtonSty),br(),br()),
conditionalPanel(condition = fC(c(44)), actionButton("rankGeoTab", "Tab Help",style=myHelpButtonSty),br(),br()),
conditionalPanel(condition = fC(c(55)), actionButton("trendTab", "Tab Help",style=myHelpButtonSty),br(),br()),
conditionalPanel(condition = fC(c(66)), actionButton("sdohTab", "Tab Help",style=myHelpButtonSty),br(),br()),
conditionalPanel(condition = fC(c(1)), actionButton("sdohTab", "Tab Help",style=myHelpButtonSty),br(),br()),
# Input selections on each tab ----------------------------
# myCAUSE
conditionalPanel(condition = fC(c(22,23,44,55,56,66)),
# first parameter of actionButton (inputId) generated by server.R
actionButton(inputId="causeHelp", label="?",style=myButtonSty) ,
selectInput("myCAUSE", HTML("Cause:"), choices=fullList, selected="0")),
# myLHJ
conditionalPanel(condition = fC(c(22,23,33,44,45,55,56)),
selectInput("myLHJ","County/State:",choices=lList,selected=STATE)),
# myGeo
conditionalPanel(condition = fC(c(22,23,66)),
selectInput("myGeo","Geographic Level:",
choices=c("County","Community","Census Tract"))),
conditionalPanel(condition = paste("(input.myGeo == 'Census Tract') & (",fC(c(22,23)),")"),
helpText(h6(tractWarning,style="color:red"))) ,
# myYear
conditionalPanel(condition =
paste(
"(!(input.myGeo == 'Community' | input.myGeo == 'Census Tract')
&& (", fC(c(22,23)),") )
| (", fC(c(33,34,45,44)),")"
),
sliderInput("myYear","Year:",value=2017,min=2001,max=2017,animate = TRUE,
round=TRUE,sep="",step=1) ), #can use value=c(2017,2017)
# mySex
conditionalPanel(condition = fC(c(22,23,33,44,66)),
radioButtons( "mySex", "Sex:", choices=c("Total","Female","Male"))),
# myLev
conditionalPanel(condition = fC(c(33,34)),
actionButton( "levelHelp", label="?",style=myButtonSty) ,
radioButtons("myLev", "Levels to show:", choices=c("Top Level" = "lev1","Public Health" = "lev2","Detail" = "lev3"))),
# myStateCut
conditionalPanel(condition = fC(c(22,23)),
actionButton("statecutHelp", label="?", style=myButtonSty),br(), #add br(), here to fix spacing, but does not yet....
checkboxInput("myStateCut", "State-based cutpoints", value=TRUE)),
# myN
conditionalPanel(condition = fC(c(33,34)),
numericInput( "myN", "How Many:", value=10,min=1,max=50)),
# myMeasure
conditionalPanel(condition = fC(c(22,23,34,44,55,56,66)),
actionButton( "measureHelp", label="?",style=myButtonSty) ,
radioButtons( "myMeasure", "Measure:", choices=lMeasures,selected="YLL.adj.rate")),
# myMeasureShort
conditionalPanel(condition = fC(c(33)),
selectInput( "myMeasureShort", "Measure Sort Order:", choices=lMeasuresShort)),
# myCutSystem
conditionalPanel(condition = fC(c(22,23)),
actionButton("cutmethodHelp", label="?",style=myButtonSty) ,
radioButtons( "myCutSystem","Cut-point method:", choices=c("quantile","fisher"))), # pretty
# myLabName
conditionalPanel(condition = fC(c(23)),
checkboxInput("myLabName", "Place Names", value=FALSE)),
# myCI
conditionalPanel(condition =
paste(
"(",fC(c(44)),") &&",
"( (input.myMeasure == 'cDeathRate') | (input.myMeasure == 'YLLper') |
(input.myMeasure == 'aRate'))"
),
checkboxInput("myCI", "95% CIs?", value=FALSE)),
# myRefLine
conditionalPanel(condition = fC(c(44)),
checkboxInput("myRefLine", "Reference Line", value=FALSE)),
# myX
conditionalPanel(condition = fC(c(66)),
selectInput( "myX", "Socal Determinant of Health Variable:", choices=sdohVec)),
# Figure Download buttons ---------------------------------------------------
conditionalPanel(condition = "input.ID == 23", downloadButton('mapFigure', 'Download Map')),
conditionalPanel(condition = "input.ID == 33", downloadButton('rankCauseFigure', 'Download Figure')),
# Home page side bar text ---------------------------------------------------
conditionalPanel(condition = fC(c(11)),
HTML('<left><img src="CDPH.gif" height="125" width="150"></left>'), # 85 100
br(),br(),
helpText(h4("Welcome to the Preview Version of the CCB!"),style="color:green",align="left"), br(),
h5(tags$a(href="CA_Health_Views.pdf","SEE CCB DATA IN ACTION, in the new 'Measuring Health Status in California'")), br(),
actionButton("newsUse","News and Updates",style=myHelpButtonSty), br(),
h5(tags$a(href="https://www.surveymonkey.com/r/2N2JSTV","Report 'bugs' HERE!")),
h5(tags$a(href="https://www.surveymonkey.com/r/ZH9LSR8","Share your feedback HERE!")),
helpText(textIntroA,style="color:black"), br(),
helpText(textIntroC,style="color:black"), br(),
if (whichData == "real") { helpText(textNote.real,style="color:black")},
if (whichData == "fake") { helpText(textNote.fake,style="color:red")},
br(),br(),
icon("envelope-o"),tags$a(href = "mailto:michael.samuel@cdph.ca.gov","Questions? Want to Help?"), br(),
tags$a(href="https://shiny.rstudio.com/","Developed in R-Shiny"), br(),
tags$a(href="https://github.com/mcSamuelDataSci/CACommunityBurden","GitHub Site")
),
# Text on other pages -----------------------------------------
conditionalPanel(condition = fC(c(22,23,33,45,44,55,66)),
helpText(br(),helpText('Note: YLL is "Years of Life Lost"',style="color:green;font-weight: bold;"))
),
conditionalPanel(condition = fC(c(33,45,44,55,66)),
paste('Note: All values <',criticalNumber,'including zeros are excluded '),style="color:green;font-weight: bold;")
,
conditionalPanel(condition = "input.ID != 11",
br(),HTML('<left><img src="CDPH.gif" height="125" width="150"></left>')
),
# Text on all side bars -------------- ----------------------------------------
helpText(br(),h4(VERSION),style="color:green")
# -- END of sidebarPanel-------------------------------------------------------
),
# ------------------------------------------------------------------------------------
# MAIN PANNELS-------------------------------------------------------------------------
useShinyjs(),
mainPanel(
tabsetPanel(type = "tab",id="ID",
tabPanel("HOME", br(),align='center',
h4(HTML(above1),align="left"),
fluidRow(
column(width=3,img(id="map1I", src="mapInt.png", width="100%", onmouseout="this.src='mapInt.png'", onmouseover="this.src='mapInt2.png'", style = myBoxSty)),
column(width=3,img(id="map2I", src="mapStat.png", width="100%", onmouseout="this.src='mapStat.png'", onmouseover="this.src='mapStat2.png'", style = myBoxSty)),
column(width=3,img(id="trendI", src="trends.png", width="100%", onmouseout="this.src='trends.png'", onmouseover="this.src='trends2.png'", style = myBoxSty)),
column(width=3,img(id="scatterI", src="SDOH.png", width="100%", onmouseout="this.src='SDOH.png'", onmouseover="this.src='SDOH2.png'", style = myBoxSty))),
br(),
fluidRow(
column(width=4,img(id="rankgeoI", src="rankGeo.png", width="100%", onmouseout="this.src='rankGeo.png'", onmouseover="this.src='rankGeo2.png'", style = myBoxSty)),
column(width=4,img(id="ranktableI", src="table.png", width="100%", onmouseout="this.src='table.png'", onmouseover="this.src='table2.png'", style = myBoxSty)),
column(width=4,img(id="rankcauseI", src="rankCause.png", width="100%", onmouseout="this.src='rankCause.png'", onmouseover="this.src='rankCause2.png'", style = myBoxSty))),value = 11
),
tabPanel("ABOUT",
br(),
includeMarkdown("About.md"), value = 99),
tabPanel("INTERACTIVE MAP",
br(), htmlOutput("map_title") ,
leafletOutput("cbdMapTL", width=700, height=700), value = 22),
tabPanel("STATIC MAP",
plotOutput("cbdMapTS", height=700,width="100%"), value = 23),
tabPanel("RANK BY CAUSE",
br(), plotOutput("rankCause", width="100%",height=700), value = 33),
# tabPanel("RANK BY CAUSE AND SEX",
# plotOutput("rankCauseSex", width="100%",height=700), value = 34),
tabPanel("RANK BY GEOGRAPHY",
plotOutput("rankGeo", width="100%", height=1700), value = 44),
tabPanel("Trend",
br(),
plotOutput("trend", width="100%",height=700), value = 55),
tabPanel("Race Trend",
br(),
plotOutput("trendRace", width="100%",height=700), value = 56),
tabPanel("DATA TABLE",
dataTableOutput("rankCauseT"), value = 45), #DT::
tabPanel("SOCIAL DETERMINANTS",
br(),
plotlyOutput("scatter", height=700), value = 66),
tabPanel("Technical Documentation",
br(),
includeMarkdown("technical.md"), value = 77),
tabPanel("Links to Other Data",
br(),
includeMarkdown("ourLinks.md"), value = 88)
) # END tabSetPanel
) # END mainPanel
) # END fluidPage
) # END ShinyUI
# END =============================================================================================
# NOTES etc. :
# convert Markdown doc to Word if needed forediting
# https://cloudconvert.com/md-to-docx
# tags$style(type = "text/css", ".irs-grid-pol.small {height: 0px;}"), # removes ticks between years
# https://stackoverflow.com/questions/44474099/removing-hiding-minor-ticks-of-a-sliderinput-in-shiny
# "BETTER" drop down list look
# https://stackoverflow.com/questions/40513153/shiny-extra-white-space-in-selectinput-choice-display-label
#library(shinythemes)
# shinyUI(fluidPage(theme = "bootstrap.css",
# #shinythemes::themeSelector(),
# wellPanel
# navBarPanel
# work on customizing help button
# actionButton("causeHelp", "?",style=" height:22px; padding-top:0px; margin-top:-5px;
# float:right; color: #fff; background-color: #337ab7; border-color: #2e6da4")
# selectizeInput("myCAUSE", "Cause:", choices=fullList, selected="A",
# options = list(maxOptions = 10000),width='50%')),# size=30 selectize = F, size=3,
#width:100px;
# https://shiny.rstudio.com/reference/shiny/latest/selectInput.html
# https://shiny.rstudio.com/articles/selectize.html
# https://www.w3schools.com/html/html_form_elements.asp
# https://www.w3schools.com/css/css3_buttons.asp
|
6d3106ffe60d29e4ee7e6d2d8d6fea8852196b08 | 41ad0c923f892ae925f4e5a01b50c32f862a55cb | /report.R | 45e0ef11f0d6826e5fdcf0820fca50512bb4f33b | [] | no_license | humana-fragilitas/harvardx-ph125.9x-movielens | 71acfb8987c718c6c8d78814b5a70e646beb7a0c | 22a442218c47f8d46d4da90bdab6f492e37d9bc7 | refs/heads/master | 2021-03-02T23:35:24.538465 | 2020-03-09T01:11:11 | 2020-03-09T01:11:11 | 245,915,364 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,202 | r | report.R | ################################
# Create edx set, validation set
################################
# Note: this process could take a couple of minutes
if(!require(tidyverse)) {
install.packages("tidyverse", repos = "http://cran.us.r-project.org")
}
if(!require(caret)) {
install.packages("caret", repos = "http://cran.us.r-project.org")
}
if(!require(data.table)) {
install.packages("data.table", repos = "http://cran.us.r-project.org")
}
# Project specific packages
if(!require(ggplot2)) {
install.packages("ggplot2", repos = "http://cran.us.r-project.org")
}
if(!require(kableExtra)) {
install.packages("kableExtra", repos = "http://cran.us.r-project.org")
}
# Libraries required by the project
library(tidyverse)
library(caret)
library(data.table)
library(dplyr)
library(ggplot2)
library(kableExtra)
# MovieLens 10M dataset:
# https://grouplens.org/datasets/movielens/10m/
# http://files.grouplens.org/datasets/movielens/ml-10m.zip
dl <- tempfile()
download.file("http://files.grouplens.org/datasets/movielens/ml-10m.zip", dl)
ratings <- fread(text = gsub("::", "\t", readLines(unzip(dl, "ml-10M100K/ratings.dat"))),
col.names = c("userId", "movieId", "rating", "timestamp"))
movies <- str_split_fixed(readLines(unzip(dl, "ml-10M100K/movies.dat")), "\\::", 3)
colnames(movies) <- c("movieId", "title", "genres")
movies <- as.data.frame(movies) %>% mutate(movieId = as.numeric(levels(movieId))[movieId],
title = as.character(title),
genres = as.character(genres))
movielens <- left_join(ratings, movies, by = "movieId")
# Validation set will be 10% of MovieLens data
set.seed(1, sample.kind="Rounding")
# if using R 3.5 or earlier, use `set.seed(1)` instead
test_index <- createDataPartition(y = movielens$rating, times = 1, p = 0.1, list = FALSE)
edx <- movielens[-test_index,]
temp <- movielens[test_index,]
# Make sure userId and movieId in validation set are also in edx set
validation <- temp %>%
semi_join(edx, by = "movieId") %>%
semi_join(edx, by = "userId")
# Add rows removed from validation set back into edx set
removed <- anti_join(temp, validation)
edx <- rbind(edx, removed)
rm(dl, ratings, movies, test_index, temp, movielens, removed)
# Utility function suitable for converting number formats
# on axis labels to scientific 10^x format
# Credit: Brian Diggs (https://groups.google.com/forum/#!topic/ggplot2/a_xhMoQyxZ4)
fancy_scientific <- function(l) {
# turn in to character string in scientific notation
l <- format(l, scientific = TRUE)
# quote the part before the exponent to keep all the digits
l <- gsub("^(.*)e", "'\\1'e", l)
# turn the 'e+' into plotmath format
l <- gsub("e", "%*%10^", l)
# return this as an expression
parse(text=l)
}
# RMSE function
RMSE <- function(observed_values, forecasted_values){
sqrt(mean((observed_values - forecasted_values)^2))
}
################################
# Analysis
################################
glimpse(edx)
summary(edx)
# Count of unique users and movies in the dataset
edx %>% summarize(users = n_distinct(edx$userId), movies = n_distinct(edx$movieId))
# Total number of ratings available in the dataset
length(edx$rating) + length(validation$rating)
# Discern rating into two classes: decimal and non-decimal
ratings_decimal_vs_nondecimal <- ifelse(edx$rating%%1 == 0, "non_decimal", "decimal")
# Build a new dataframe suitable for inspecting decimal and non-decimal ratings ratio
explore_ratings <- data.frame(edx$rating, ratings_decimal_vs_nondecimal)
# Draw histogram
ggplot(explore_ratings, aes(x= edx.rating, fill = ratings_decimal_vs_nondecimal)) +
geom_histogram( binwidth = 0.2) +
scale_x_continuous(breaks=seq(0, 5, by= 0.5)) +
scale_y_continuous(labels = fancy_scientific) +
scale_fill_manual(values = c("decimal"="royalblue", "non_decimal"="navy"),
name="Ratings classes",
breaks=c("decimal", "non_decimal"),
labels=c("Decimal", "Non-decimal")) +
labs(x="Rating", y="Number of ratings",
caption = "Source: MovieLens 10M Dataset") +
ggtitle("Ratings distribution by class: decimal vs. non-decimal") +
theme_minimal()
# Build a new dataframe suitable for inspecting
# the top 20 movie titles by number of ratings
top_titles <- edx %>%
group_by(title) %>%
summarize(count=n()) %>%
top_n(20,count) %>%
arrange(desc(count))
# Draw bar chart: top titles
top_titles %>%
ggplot(aes(x=reorder(title, count), y=count)) +
ggtitle("Top 20 movie titles by \n number of user ratings") +
geom_bar(stat='identity', fill="navy") +
coord_flip(y=c(0, 40000)) +
labs(x="", y="Number of ratings",
caption = "Source: MovieLens 10M Dataset") +
geom_text(aes(label= count), hjust=-0.1, size=3) +
theme_minimal()
# Draw histogram: distribution of ratings by movieId
edx %>%
count(movieId) %>%
ggplot(aes(n)) +
ggtitle("Movies") +
labs(subtitle ="Distribution of ratings by movieId",
x="movieId" ,
y="Number of ratings",
caption ="Source: MovieLens 10M Dataset") +
geom_histogram(bins = 30, fill="navy", color = "white") +
scale_x_log10() +
theme_minimal()
# histogram of number of ratings by userId
edx %>%
count(userId) %>%
ggplot(aes(n)) +
geom_histogram( bins=30, fill="navy",color = "white") +
scale_x_log10() +
ggtitle("Users") +
labs(subtitle ="Number of ratings by userId",
x="userId" ,
y="Number of ratings") +
theme_minimal()
################################
# Results
################################
# Basic prediction via mean rating
mu <- mean(edx$rating)
rmse_naive <- RMSE(validation$rating, mu)
rmse_results = tibble(Method = "Basic prediction via mean rating", RMSE = rmse_naive)
rmse_results %>% knitr::kable() %>% kable_styling()
# Movie effects
# Simple model taking into account the movie effects, b_i
mu <- mean(edx$rating)
movie_averages <- edx %>%
group_by(movieId) %>%
summarise(b_i = mean(rating - mu))
predicted_ratings <- mu + validation %>%
left_join(movie_averages, by='movieId') %>%
pull(b_i)
rmse_model_movie_effects <- RMSE(validation$rating, predicted_ratings)
rmse_results <- bind_rows(rmse_results, tibble(Method="Movie effect model",
RMSE = rmse_model_movie_effects))
rmse_results %>% knitr::kable() %>% kable_styling()
# Movie and user effects
# Movie and user effects model
user_averages <- edx %>%
left_join(movie_averages, by="movieId") %>%
group_by(userId) %>%
summarise(b_u = mean(rating - mu - b_i))
predicted_ratings <- validation %>%
left_join(movie_averages, by='movieId') %>%
left_join(user_averages, by='userId') %>%
mutate(pred = mu + b_i + b_u) %>%
pull(pred)
rmse_model_user_effects <- RMSE(validation$rating, predicted_ratings)
rmse_results <- bind_rows(rmse_results,
tibble(Method="Movie and user effect model",
RMSE = rmse_model_user_effects))
rmse_results %>% knitr::kable() %>% kable_styling()
# Movie and user effects with regularization
# Prediction via movie and user effects model with regularization
lambdas <- seq(0, 10, 0.25)
rmses <- sapply(lambdas, function(l){
mu <- mean(edx$rating)
b_i <- edx %>%
group_by(movieId) %>%
summarise(b_i = sum(rating - mu)/(n()+l))
b_u <- edx %>%
left_join(b_i, by="movieId") %>%
group_by(userId) %>%
summarise(b_u = sum(rating - b_i - mu)/(n()+l))
predicted_ratings <- validation %>%
left_join(b_i, by = "movieId") %>%
left_join(b_u, by = "userId") %>%
mutate(predicted = mu + b_i + b_u) %>%
pull(predicted)
RMSE(validation$rating, predicted_ratings)
})
# Minimum RMSE value
rmse_regularization <- min(rmses)
rmse_regularization
# Optimal lambda
lambda <- lambdas[which.min(rmses)]
lambda
# Plot RMSE against lambdas to visualize the optimal lambda
qplot(lambdas, rmses) + theme_minimal()
# Summary of prediction models outcomes
rmse_results <- bind_rows(rmse_results, tibble(
Method="Movie and user effects model with regularization",
RMSE = rmse_regularization))
rmse_results %>% knitr::kable() %>% kable_styling()
|
42be8fb413036efa730d16e42686552dcc48283f | e67620506721117201d8e6ccbe7adc08335c8bd7 | /R/get_page.R | fd4da1da894d215ec4b01928087626ea8e5c285a | [
"MIT"
] | permissive | kwanlin/pendor | 42bb8345ff8e1b1c40cd78e29da5f4a73fc93bb8 | d38332259eec6761f59af615e33869ffad7471ca | refs/heads/master | 2021-05-11T22:04:31.285447 | 2018-02-03T02:30:28 | 2018-02-03T02:30:28 | 117,485,669 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 606 | r | get_page.R | #' Retrieve list of pages from Pendo
#'
#' @param string input character vector
#' @return
#' @seealso
#' @export
#' @examples
#' get_pages_list(some_auth_key)
get_pages_list <- function(auth_key) {
url <- paste0(c("https://app.pendo.io/api/v1/page"), collapse='')
pages_list <- httr::GET(url,
httr::add_headers(
"X-Pendo-Integration-Key" = auth_key,
"Content-Type" = "application/json"
)
) %>%
httr::content(as="text") %>%
fromJSON()
#fromJSON(readline(), flatten = TRUE)
return(pages_list)
}
|
bc52eea0c889c01ef884a652e83592153107f9c2 | 9322baceebe3a9908fe0ea7186567fd7a0868139 | /Writedata.r | d1c745763baa5c532b9818e8d5bed11d6505827b | [] | no_license | ArjenS/GettingAndCleaningData | 3e65d2f9ed475e5c40fcc2ac53e6656b54deada2 | aee00dd36794f265926c796b83e67c3ccc5c2fb9 | refs/heads/master | 2021-01-13T00:53:46.494608 | 2015-11-21T21:16:42 | 2015-11-21T21:16:42 | 46,630,097 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 132 | r | Writedata.r | #This script assumes the presence of a dataset called 'Dataset2'.
write.table(Dataset2,'OutputCourseProject.txt',row.names = FALSE) |
4313cd7ba0988b22313a903c08199f3228285eb2 | 4ae08acc5e05af8d006756b6d8ce96a72ff23d98 | /R/geo_topo.R | 9f9ebf642ca41ce01129e3018170f4facf587b7e | [
"MIT"
] | permissive | ropensci/geojsonio | 7f33854cce01d4f8b7e0c24cda3c34cb14484f09 | 6202af081025015872345d5a9b8c17caaec54d18 | refs/heads/main | 2023-08-31T15:23:52.949534 | 2023-08-21T15:18:50 | 2023-08-21T15:18:50 | 18,337,862 | 138 | 67 | NOASSERTION | 2023-09-05T16:50:05 | 2014-04-01T17:20:23 | R | UTF-8 | R | false | false | 4,083 | r | geo_topo.R | #' GeoJSON to TopoJSON and back
#'
#' @export
#' @param x GeoJSON or TopoJSON as a character string, json, a file path, or
#' url
#' @param object_name (character) name to give to the TopoJSON object created.
#' Default: "foo"
#' @param quantization (numeric) quantization parameter, use this to
#' quantize geometry prior to computing topology. Typical values are powers of
#' ten (`1e4`, `1e5`, ...), default is `0` to not perform quantization.
#' For more information about quantization, see this by Mike Bostock
#' https://stackoverflow.com/questions/18900022/topojson-quantization-vs-simplification/18921214#18921214
#' @param ... for `geo2topo` args passed on to
#' [jsonlite::fromJSON()], and for `topo2geo` args passed on to
#' [sf::st_read()]
#' @return An object of class `json`, of either GeoJSON or TopoJSON
#' @seealso [topojson_write()], [topojson_read()]
#' @examples
#' # geojson to topojson
#' x <- '{"type": "LineString", "coordinates": [ [100.0, 0.0], [101.0, 1.0] ]}'
#' z <- geo2topo(x)
#' jsonlite::prettify(z)
#' \dontrun{
#' library(leaflet)
#' leaflet() %>%
#' addProviderTiles(provider = "Stamen.Terrain") %>%
#' addTopoJSON(z)
#' }
#'
#' # geojson to topojson as a list
#' x <- list(
#' '{"type": "LineString", "coordinates": [ [100, 0], [101, 1] ]}',
#' '{"type": "LineString", "coordinates": [ [110, 0], [110, 1] ]}',
#' '{"type": "LineString", "coordinates": [ [120, 0], [121, 1] ]}'
#' )
#' geo2topo(x)
#'
#' # change the object name created
#' x <- '{"type": "LineString", "coordinates": [ [100.0, 0.0], [101.0, 1.0] ]}'
#' geo2topo(x, object_name = "HelloWorld")
#' geo2topo(x, object_name = "4")
#'
#' x <- list(
#' '{"type": "LineString", "coordinates": [ [100, 0], [101, 1] ]}',
#' '{"type": "LineString", "coordinates": [ [110, 0], [110, 1] ]}',
#' '{"type": "LineString", "coordinates": [ [120, 0], [121, 1] ]}'
#' )
#' geo2topo(x, "HelloWorld")
#' geo2topo(x, c("A", "B", "C"))
#'
#'
#' # topojson to geojson
#' w <- topo2geo(z)
#' jsonlite::prettify(w)
#'
#' ## larger examples
#' file <- system.file("examples", "us_states.topojson", package = "geojsonio")
#' topo2geo(file)
geo2topo <- function(x, object_name = "foo", quantization = 0, ...) {
UseMethod("geo2topo")
}
#' @export
geo2topo.default <- function(x, object_name = "foo", quantization = 0, ...) {
stop("no 'geo2topo' method for ", class(x), call. = FALSE)
}
#' @export
geo2topo.character <- function(x, object_name = "foo", quantization = 0, ...) {
if (!inherits(object_name, "character")) stop("'object_name' must be of class character")
if (length(object_name) > 1) {
if (length(x) != length(object_name)) {
stop("length of `x` and `object_name` must be equal, unless `object_name` length == 1")
}
Map(function(z, w) geo_to_topo(unclass(z), w, ...), x, object_name, quantization)
} else {
geo_to_topo(x, object_name, quantization)
}
}
#' @export
geo2topo.json <- function(x, object_name = "foo", quantization = 0, ...) {
if (!inherits(object_name, "character")) {
stop("'object_name' must be of class character")
}
geo_to_topo(unclass(x), object_name, quantization, ...)
}
#' @export
geo2topo.list <- function(x, object_name = "foo", quantization = 0, ...) {
Map(function(z, w, q) geo_to_topo(unclass(z), w, q, ...), x, object_name, quantization)
}
#' @export
#' @rdname geo2topo
topo2geo <- function(x, ...) {
UseMethod("topo2geo")
}
#' @export
topo2geo.default <- function(x, ...) {
stop("no 'topo2geo' method for ", class(x), call. = FALSE)
}
#' @export
topo2geo.character <- function(x, ...) {
topo_to_geo(x, ...)
}
#' @export
topo2geo.json <- function(x, ...) {
topo_to_geo(unclass(x), ...)
}
# helpers --------------------------
geo_to_topo <- function(x, object_name, quantization = 0, ...) {
topo$eval(
sprintf(
"var output = JSON.stringify(topojson.topology({%s: %s}, %s))",
object_name, x, quantization
)
)
structure(topo$get("output"), class = "json")
}
topo_to_geo <- function(x, ...) {
res <- tosf(x, stringsAsFactors = FALSE, ...)
geojson_json(res)
}
|
b645790dc010873de4e16e28d99832777356c14d | c844b3a0ed7e13239dda7c12c606f35e9d05e61d | /counties.R | 2c84c71f17d1b86d66218c08b8cc84301d47a126 | [] | no_license | AdamDS/counties_regions | d766545082ca518b3bac5c9bc3cfe48c3cdf2741 | e37e726bc5f67b46bd5196050a646cbbf0c7e9b1 | refs/heads/master | 2020-12-03T04:03:44.467540 | 2017-06-29T18:53:19 | 2017-06-29T18:53:19 | 95,706,818 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,608 | r | counties.R | #load packages
library( 'ggplot2' )
library( 'reshape' )
library( 'plyr' )
library( 'grid' )
library( 'gridExtra' )
library( 'scales' )
#get pivot table
data = read.table( file = 'pivot.csv' , header = TRUE , sep = "," , stringsAsFactors = FALSE )
#extract the list of counties
counties = data$Geography[ data$Geography != "Missouri" ]
#rename raw data columns to what they will be in chart
data = rename( data , c( "X30DCig16" = "Cigarettes" ,
"X30DAlc16" = "Alcohol" ,
"X30DBinge16" = "Binge*" ,
"X30DMJ16" = "Marijuana" ,
"X30DInh16" = "Inhalants" ,
"X30DScript16" = "Rx misuse" ,
"X30DOTC16" = "OTC misuse" ,
"X30DSynthetic16" = "Synthetic" ,
"X2016.Past.month.electronic.cigarette.use" = "E-cigs" ,
"X2016.Past.month.hookah.use" = "Hookah"
)
)
#remove the regions data columns
data$label = NULL
data$NSDUH.2012.14.reg = NULL
data$Sub.NSDUH.2012.14.Marijuana.Use.in.Past.Month = NULL
data$Sub.NSDUH.2012.14.Illicit.Drug.Use.other.than.Marijuana.in.Past.Mont = NULL
data$Sub.NSDUH.2012.14.Alcohol.Use.in.Past.Month = NULL
data$Sub.NSDUH.2012.14.Binge.Alcohol.Use.in.Past.Month = NULL
data$Sub.NSDUH.2012.14.Cigarette.Use.in.Past.Month = NULL
data$sub.NSDUH.2012.14.Nonmedical.Use.of.Pain.Relievers.Past.Year.Age.18. = NULL
data$label2 = NULL
#order Missouri to beginning
#relevel( data$Geography , "Missouri" )
state = " Missouri "
data$Geography[ data$Geography == "Missouri" ] = state
#reform all data into factor data (id) and numerical, usage data type (variable) and percentage (value)
goop = melt( data , id = c( "Geography" ) )
#need a labels column above bars based on numerical values
goop$labels = goop$value
#clean up label precision to tenths
goop$labels = gsub( '%' , '' , goop$labels )
goop$labels = as.numeric( goop$labels )
goop$labels = format( round( goop$labels , 1 ) )
goop$labels = as.factor( goop$labels )
goop$labels = paste( goop$labels , "%" , sep = '' )
#remove percent sign & make values as numbers
goop$value = gsub( '%' , '' , goop$value )
goop$value = as.numeric( goop$value ) / 100
#custom chart parameters
chartTitle = ' ' #'Current Substance Use for Grades 6-12, 2016'
labelSize = 3
axisSize = 9
tickSize = 9
titleSize = 11
legendSize = 9
keySize = 0.25
purple = "#5e498B"
yellow = "#F8CB44"
palette = c( yellow , purple )
barWidth = 0.6
ymin = 0
ymax = .40
barSeparation = 0.075
labelSeparation = -barSeparation
barLabelAngle = 90
xOrder = c( "Alcohol" , "E-cigs" , "Rx misuse" , "Marijuana" , "Cigarettes" , "Binge*" , "Hookah" , "OTC misuse" , "Inhalants" , "Synthetic" )
#loop over all counties
for ( i in 1:length( counties ) ) {
county = counties[i]
#data to plot, always MO with county
dat = data.frame( goop[ goop$Geography == state , ] )
dat = rbind( dat , goop[ goop$Geography == county , ] )
#dat$ordering = c(1:length(dat$Geography))
#dat$Geography = factor( dat$Geography , levels = letters[1:length(dat$Geography)] )
#begin plot, use category (variable) as x, corresponding values as y, color bars by state or county
g = ggplot( dat , aes( x = variable , y = value , fill = Geography ) )
#bar plot, use values instead of count (identity), offset state & county bars with dodge
g = g + geom_bar( stat = "identity" , position = position_dodge( width = barWidth + barSeparation ) , width = barWidth )
#labels above bars
g = g + geom_text( data = dat , aes( variable , value , label = labels ) , size = labelSize , hjust = labelSeparation , guide = FALSE , color = "black" , position = position_dodge( width = barWidth ) , angle = barLabelAngle )
#use white background in chart area, remove background from around chart
g = g + theme_bw( )
g = g + theme( plot.background = element_rect( fill = "transparent" , colour = NA ) )
#chart title text and format
g = g + ggtitle( chartTitle )
g = g + theme( title = element_text( size = titleSize , color = purple , face = "bold" ) )
#x-label removed, x tick labels formatted and ordered
g = g + xlab( '' )
g = g + theme( axis.text.x = element_text( size = axisSize , angle = 45 , vjust = 1 , hjust = 1 ) )
g = g + scale_x_discrete( limits = xOrder )
#y-label removed, y tick labels formatted
g = g + ylab( "" )
g = g + scale_y_continuous( limits = c( ymin , ymax ) , labels = percent , expand = c( 0 , 0 ) )
g = g + theme( axis.text.y = element_text( size = axisSize ) )
#legend shape formatting
g = g + theme( legend.key.size = unit( keySize , "cm" ) )
g = g + theme( legend.position = c( .5 , 0.85 ) )
g = g + theme( legend.background = element_blank( ) )
g = g + theme( legend.direction = "horizontal" )
g = g + scale_fill_manual( values = palette )
#g = g + scale_color_manual( values = palette )
#g = g + guides( fill = guide_legend( override.aes = list( size = 3 ) ) )
g = g + theme( legend.text.align = 1 )
g = g + theme( legend.justification = 0 )
#legend text and formatting
g = g + theme( legend.title = element_blank() )
g = g + theme( legend.text = element_text( size = legendSize ) )
#format chart grid & boundaries
g = g + theme( panel.grid.minor = element_blank() ,
panel.grid.major = element_blank() )
g = g + theme( panel.border = element_blank() )
g = g + theme( axis.line = element_line( color = "black" , size = 0.5 ) )
#reduce margins around chart
g = g + theme( plot.margin = unit( c( 0.1 , 0.12 , -0.15 , -0.15 ) , "in" ) )
cat( paste( "store plot for" , county , sep = ' ' ) )
#save charts as .png
filename = paste( toString( county ) , "png" , sep = "." )
ggsave( filename , width = 5 , height = 3 , dpi = 300 , units = "in" )
}
|
2f9c2b41bf2f4711856fd23f0c1875df590c1854 | 2b161e485c17614927c847adb74a040d9c0e1129 | /app.R | 87103e0871c9639bc40bc93c96b77e2f1cfde7b8 | [
"Apache-2.0"
] | permissive | aammd/CCISS_ShinyApp | 5bdd1e83b50da266a19e99f9c2d45ec305702fae | 462e25fa032439aec3ff6ad7b85f46dbe24f2bc9 | refs/heads/main | 2023-03-12T11:22:36.863766 | 2021-02-27T04:07:52 | 2021-02-27T04:07:52 | 342,243,789 | 0 | 0 | Apache-2.0 | 2021-02-25T12:52:40 | 2021-02-25T12:52:39 | null | UTF-8 | R | false | false | 1,463 | r | app.R | library(shiny)
library(bslib)
library(thematic)
# ----- non-shiny packages
require(data.table)
require(foreach)
require(tidyverse)
require(DBI)
require(sf)
library(here)
require(RPostgreSQL)
library(raster)
library(matrixStats)
library(Rcpp)
require(tictoc)
require(ggplot2)
require(ggthemes)
require(tmap)
library(leaflet)
Rcpp::sourceCpp("0CCISS_Cfn.cpp")
source("1CCISS_Data.R")
source("2CCISS_EdaOverlap.R")
source("3CCISS_Suit.R")
source("setup_function.R")
source("shiny_functions.R")
thematic::thematic_shiny(font = "auto")
# take a vector, make a histogram
ggvec2hist <- function(vec){
ggplot(data.frame(x = vec), aes(x = x)) + geom_histogram(bins = 42)
}
# set up data
data_list <- setup_data_function()
cciss_app <- function(test_data){
ui <- navbarPage(title = "CCISS Shiny app draft",
theme = bslib::bs_theme(
bg = "white", fg = "#42a5f5", primary = "white",
base_font = font_google("Bree Serif")
),
tabPanel("BGC",
bgc_vis_ui("bgc_vis")
),
tabPanel("Site Map",
site_map_ui("sites")
),
tabPanel("FailRisk",
risk_plot_ui("risk")
)
)
server <- function(input, output, session) {
# browser()
bgc_vis_server("bgc_vis", BGC_data = test_data$BGC)
site_map_server("sites", site_data = test_data$CCISS_Sites)
risk_plot_server("risk", summary_data = test_data$CCISS_Summary)
}
shinyApp(ui, server)
}
cciss_app(data_list)
|
c0b600c420cbbd2d352f845a303da65dcd5bf1d1 | 7f006b75626a2cf839c5ec1281ec86cf2cde611b | /man/contains_underscores.Rd | 1a1b1b5282deb194aff5d1d3a2088caaf6e0e19b | [] | no_license | kashenfelter/packageAnalyzeR | 9e299dadfbe244cd1a02fba889438560fa694b74 | 67d9d5b63807dce8774316747cfc8880ca7a1753 | refs/heads/master | 2020-03-13T22:30:32.624372 | 2014-04-30T00:51:42 | 2014-04-30T00:51:42 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 223 | rd | contains_underscores.Rd | \name{contains_underscores}
\alias{contains_underscores}
\title{Contains underscores}
\usage{
contains_underscores(funs)
}
\arguments{
\item{funs}{Character vector}
}
\description{
Does the input contain an underscore
}
|
7e07fc81d08c74eae1892249fc8fb90acc6c3cc0 | 175150af7e81f24cd7b15e3e96c9442a1fc4d0fb | /R/data.R | 0bc4a7c3bade5856652ea3f15639c5a1af3caabe | [
"MIT"
] | permissive | TS404/WikidataR | 8c59f843d4d627bc884de7104cf97e6e5edd8607 | c97943349f69633a505029aa64b9f7e791754b3b | refs/heads/master | 2022-12-21T00:20:17.078052 | 2022-05-29T09:25:46 | 2022-05-29T09:25:46 | 249,677,195 | 25 | 5 | NOASSERTION | 2022-12-11T15:17:03 | 2020-03-24T10:24:11 | R | UTF-8 | R | false | false | 664 | r | data.R | #' @name WD.globalvar
#'
#' @title Global variables for Wikidata properties
#'
#' @description A dataset of Wikidata global variables.
#'
#' @format A list of tibbles documenting key property constraints from wikidata
#' \describe{
#' \item{SID.valid}{valid reference source properties}
#' \item{PID.datatype}{required data type for each property}
#' \item{PID.constraint}{expected regex match for each property}
#' \item{lang.abbrev}{language abbreviations}
#' \item{lang.abbrev.wiki}{language abbreviations for current wikis}
#' \item{abbrev.wiki}{Wikimedia abbreviations for current wikis}
#' ...
#' }
utils::globalVariables(c("WD.globalvar")) |
5d43444fb141cfa38f8796323765bd0bacfe0db2 | 24194d2bd6986aeb089e5557158470194923d128 | /man/tangent.Rd | 4b38ce78065fabe223b8a959e5988c21f0a66054 | [] | no_license | shizidushu/fsrs | 35e83a11a77c6110225748b90baf147f030b8ba7 | 32e9ff6e132c3eeee04f2ee9472735f9bdd2fd00 | refs/heads/master | 2021-01-25T14:16:43.047809 | 2018-08-03T03:51:24 | 2018-08-03T03:51:24 | 123,678,350 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 392 | rd | tangent.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/trigonometric.R
\name{tangent}
\alias{tangent}
\title{tangent}
\usage{
tangent(y, x)
}
\arguments{
\item{y}{y coordinate in a circle or opposite of an acute angle in terms of a right triangle}
\item{x}{x coordinate in a circle or adjacent of an acute angle in terms of a right triangle}
}
\description{
tangent
}
|
b4d63dc36a600e162f71d885111a7691796ddabe | dd131db159a5d9c28e28f5410d289ffe7a5e9f8e | /distribution/二項分布.R | 72263d47db5f8e372af461d2336412f36479d20b | [] | no_license | oshino-taihei/analytics_selfstudy | b580161d705a09060560c744606de0100dfcccbf | 6f650f1d05968e40e94e710b494035f392aa1219 | refs/heads/master | 2021-01-21T04:51:01.113843 | 2016-07-20T02:34:04 | 2016-07-20T02:34:04 | 54,970,595 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 861 | r | 二項分布.R | # B(n,p)の二項分布に従う試行をしたときの成功回数
bin <- function(n, p = 0.5) {
sum(sample(0:1, size = n, replace = TRUE, prob = c(p, 1 - p)))
}
# B(n,p)のヒストグラムを描画
hist_bin <- function(n = 20, p = 0.5) {
x <- sapply(c(1:10000), function(x) { bin(n, p)} )
main <- paste("二項分布 B(", n, ", ", p, ")", seq="")
hist(x, breaks=0:n, freq=FALSE, main=main, ylim=c(0,0.30))
}
hist_bin(20,0.5)
curve(dbinom(x, 20, 0.5), 0, 20, 21, add=TRUE)
# パラメータを連続して変更させながら描画
# B(n=20, p=[0.1,0.9])
for (i in seq(0.1, 0.9, 0.1)) {
.file <- paste("out/plot", i, ".png", sep="")
png(.file)
hist_bin(20, i)
dev.off()
}
# B(n=[10,100], p=0.5)
for (i in seq(10, 100, 10)) {
.file <- paste("out/plot", sprintf("%03d", i), ".png", sep="")
png(.file)
hist_bin(i, 0.5)
dev.off()
}
|
b97b0bd84ef911102cb338f1bc7f6d25aa4f71be | 2f94579f26bcbc190a21378221c55d205284e95f | /listings/ex-12.4.R | d5e96df57bbb932fc2bd89a824c551afd458cb5b | [] | no_license | Fifis/ekonometrika-bakalavr | 9fdf130d57446182fcf935e60d9276cc99d89c0e | c2d89a382d3eaf450a8f4f4dc73c77dd7c6eefd7 | refs/heads/master | 2021-01-20T20:42:12.937927 | 2016-07-23T17:14:35 | 2016-07-23T17:14:35 | 63,989,268 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 146 | r | ex-12.4.R | adfTest(data$CAC40, lags = 5, type ="c")
adfTest(data$DAX, lags = 5, type ="c")
install.packages("egcm")
library(egcm)
egcm(data$CAC40,data$DAX)
|
03969872ffe5633b4fe5072cbe6282243a431ffc | 5eb30613428827b36c74ee9a46ea679b324a6d64 | /plot4.R | acc50a9de829a87eb5bcd2ddc0499a2caedd3523 | [] | no_license | irxum/ExData_Plotting1 | 69fd0e08ac3072ec3df55d5c341b7fd4f7c87a6a | d156dd81090d59d17bde7b0ecfc2c5d2753e4b91 | refs/heads/master | 2021-01-20T10:28:49.885161 | 2015-06-07T06:20:03 | 2015-06-07T06:20:03 | 37,006,663 | 0 | 0 | null | 2015-06-07T06:14:27 | 2015-06-07T06:14:26 | null | UTF-8 | R | false | false | 1,886 | r | plot4.R | #
# plot4.R
#
library(lubridate)
# this script assumes the raw data is in the working directory
dstop <- read.table("household_power_consumption.txt",nrows=25)
dsok_top <- read.table("household_power_consumption.txt",nrows=25,header=TRUE,sep=";")
datetimestart <- dmy_hms(paste(as.character(dsok_top[1,1]),as.character(dsok_top[1,2])))
# need to get only the rows between Feb 1st and Feb 2nd in 2007
datedesired1 <- ymd_hms("2007-02-01 00:00:00")
datedesired2 <- ymd_hms("2007-02-02 23:59:00")
dsrowstart <- as.numeric(difftime(datedesired1, datetimestart, units="mins"))+1
dsnrows <- as.numeric(difftime(datedesired2, datedesired1, units="mins"))+1
ds_feb07 <- read.table("household_power_consumption.txt", skip = dsrowstart, nrows= dsnrows,
sep=";")
names(ds_feb07) <- names(dsok_top)
ds_feb07$datetime <- dmy_hms(paste(ds_feb07$Date, ds_feb07$Time, sep=" "))
windows.options(reset=TRUE)
# Plot 4
# start with calling png to set width and height of image
# and legend text scales appropriately
png(file = "plot4.png",width=480,height=480)
par(mfrow=c(2,2),mai=c(0.8,0.8,0.2,0.2))
with(ds_feb07, {
plot(x=datetime,y=Global_active_power, type="l",
xlab = "",
ylab = "Global Active Power (kilowatts)")
plot(x=datetime,y=Voltage, type="l",
xlab = "datetime",
ylab = "Voltage")
plot(x=ds_feb07$datetime,y=ds_feb07$Sub_metering_1, type="l",
xlab = "",
ylab = "Energy sub metering")
points(x=ds_feb07$datetime, y=ds_feb07$Sub_metering_2, type="l",col="red")
points(x=ds_feb07$datetime, y=ds_feb07$Sub_metering_3, type="l",col="blue")
legend("topright",lty=1,col=c("black","red","blue"),bty="n",
legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
plot(x=datetime,y=Global_reactive_power, lty=1,type="o",pch=".",
xlab = "datetime",
ylab = "Global_reactive_power")
})
dev.off() |
eddb36416da6deebb5698d7dfb17d526de4cc975 | dc4f0aceb5543a8440c173d64acf26c2df996f8f | /plot1.R | 3f712349b9acf5b1af448703ea152161fa7226e0 | [] | no_license | nelson-pedernera/ExData_Plotting1 | 79890911b626ad3deba520a02fa4083394376d85 | 9264cbe27c10ef7ddbeca838c03e9dcee2e1d423 | refs/heads/master | 2021-01-22T12:44:58.667721 | 2016-01-13T00:33:30 | 2016-01-13T00:33:30 | 49,475,597 | 0 | 0 | null | 2016-01-12T04:46:11 | 2016-01-12T04:46:11 | null | UTF-8 | R | false | false | 742 | r | plot1.R | setwd("C:/Users/nelson.pedernera/Desktop/BigData/DataScience/MIV/01/CourseProject/GitHub/ExData_Plotting1/")
#----------------------------------------------------------------------------------------------------------------------------------
# Reading the data
data <- read.csv("household_power_consumption.txt", sep = ";")
# Trasforming data
data$Date<-as.Date(data$Date, "%d/%m/%Y")
data$Global_active_power<-as.numeric(as.character(data$Global_active_power))
# Filtering data
finalData<- subset(data, Date == "2007-02-01" | Date == "2007-02-02" )
# Printing on the file
hist(as.numeric(finalData$Global_active_power), col="red", xlab="Global Active Power (kilowatts)", main="Global Active Power" )
dev.copy(png,'plot1.png')
dev.off() |
0d6e43f3ac17cfeb40d69d0f51206b38ca0fabbc | 5113b035b5e5022e71fb6d065f7033a2ba2c1a96 | /R/data.R | 6fe47abf5ecb07bfaa7a13f98f0f777c556c658d | [] | no_license | cran/glmtlp | 2f9a636f47d3f35c0852608b5d8244aa58030956 | ec957cac73e6c7e82731a5ab6f8665392cf1b4bf | refs/heads/master | 2021-12-25T07:43:50.859511 | 2021-12-17T22:00:02 | 2021-12-17T22:00:02 | 120,621,828 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,581 | r | data.R | #' A simulated binomial data set.
#'
#' A data set simulated for illustrating logistic regression models. Generated by
#' \code{gen.binomial.data(n = 200, p = 20, seed = 2021)}.
#'
#' @format A list with three elements: design matrix \code{X}, response \code{y},
#' and the true coefficient vector \code{beta}.
#' \describe{
#' \item{X}{design matrix}
#' \item{y}{response}
#' \item{beta}{the true coefficient vector}
#' }
#'
#' @usage data(bin_data)
#'
#' @examples
#' data("bin_data")
#' cv.fit <- cv.glmtlp(bin_data$X, bin_data$y, family = "binomial", penalty = "l1")
#' plot(cv.fit)
#'
"bin_data"
#' A simulated gaussian data set.
#'
#' A data set simulated for illustrating linear regression models. Generated by
#' \code{gen.gaussian.data(n = 200, p = 20, seed = 2021)}.
#'
#' @format A list with five elements: design matrix \code{X}, response \code{y},
#' correlation structure of the covariates \code{Sigma}, true beta \code{beta},
#' and the noise level \code{sigma}.
#' \describe{
#' \item{X}{design matrix}
#' \item{y}{response}
#' \item{beta}{true beta values}
#' \item{sigma}{the noise level}
#' }
#'
#' @usage data(gau_data)
#'
#' @examples
#' data("gau_data")
#' cv.fit <- cv.glmtlp(gau_data$X, gau_data$y, family = "gaussian", penalty = "tlp")
#' plot(cv.fit)
#'
"gau_data"
#' Simulate a binomial data set
#'
#' @description
#' Simulate a data set with binary response following the logistic regression
#' model.
#'
#' @param n Sample size.
#' @param p Number of covariates.
#' @param rho The parameter defining the AR(1) correlation matrix.
#' @param kappa The number of nonzero coefficients.
#' @param beta.type Numeric indicator for choosing the beta type. For
#' \code{beta.type = 1}, the true coefficient vector has \code{kappa} components being 1,
#' roughly equally distributed between 1 to \code{p}. For \code{beta.type = 2},
#' the first \code{kappa} values are 1, and the rest are 0. For \code{beta.type = 3},
#' the first \code{kappa} values are equally-spaced values from 10 to 0.5, and
#' the rest are 0. For \code{beta.type = 4}, the first \code{kappa} values are
#' the first \code{kappa} values in c(-10, -6, -2, 2, 6, 10), and the rest are
#' 0. For \code{beta.type = 5}, the first \code{kappa} values are 1, and the
#' rest decay exponentially to 0 with base 0.5.
#' @param seed The seed for reproducibility. Default is 2021.
#'
#' @return A list containing the simulated data.
#' \item{X}{the covariate matrix, of dimension \code{n} x \code{p}.}
#' \item{y}{the response, of length \code{n}.}
#' \item{beta}{the true coefficients, of length \code{p}.}
#'
#' @examples
#' bin_data <- gen.binomial.data(n = 200, p = 20, seed = 2021)
#' head(bin_data$X)
#' head(bin_data$y)
#' head(bin_data$beta)
#'
#' @importFrom stats rnorm
#' @importFrom stats rbinom
#' @export gen.binomial.data
#'
gen.binomial.data <- function(n, p, rho = 0, kappa = 5, beta.type = 1, seed = 2021) {
set.seed(seed)
X <- matrix(rnorm(n * p), n, p)
if (rho != 0) {
for (j in 2:p) {
X[, j] <- sqrt(1 - rho^2) * X[, j] + rho * X[, j - 1]
}
}
beta <- gen.beta(kappa, p, beta.type)
mu <- plogis(as.numeric(X %*% beta))
y <- rbinom(n, 1, mu)
if (p > 1) {
colnames(X) <- paste("V", seq(p), sep = "")
}
list(X = X, y = y, beta = beta)
}
#' Simulate a gaussian data set
#'
#' @description
#' Simulate a data set with gaussian response following the linear regression
#' model.
#'
#' @param n Sample size.
#' @param p Number of covariates.
#' @param rho The parameter defining the AR(1) correlation matrix.
#' @param kappa The number of nonzero coefficients.
#' @param beta.type Numeric indicator for choosing the beta type. For
#' \code{beta.type = 1}, the true coefficient vector has \code{kappa} components being 1,
#' roughly equally distributed between 1 to \code{p}. For \code{beta.type = 2},
#' the first \code{kappa} values are 1, and the rest are 0. For \code{beta.type = 3},
#' the first \code{kappa} values are equally-spaced values from 10 to 0.5, and
#' the rest are 0. For \code{beta.type = 4}, the first \code{kappa} values are
#' the first \code{kappa} values in c(-10, -6, -2, 2, 6, 10), and the rest are
#' 0. For \code{beta.type = 5}, the first \code{kappa} values are 1, and the
#' rest decay exponentially to 0 with base 0.5.
#' @param snr Signal-to-noise ratio. Default is 1.
#' @param seed The seed for reproducibility. Default is 2021.
#'
#' @return A list containing the simulated data.
#' \item{X}{the covariate matrix, of dimension \code{n} x \code{p}.}
#' \item{y}{the response, of length \code{n}.}
#' \item{beta}{the true coefficients, of length \code{p}.}
#' \item{sigma}{the standard error of the noise.}
#'
#' @examples
#' gau_data <- gen.gaussian.data(n = 200, p = 20, seed = 2021)
#' head(gau_data$X)
#' head(gau_data$y)
#' head(gau_data$beta)
#' gau_data$sigma
#'
#' @importFrom stats rnorm
#' @export gen.gaussian.data
gen.gaussian.data <- function(n, p, rho=0, kappa=5, beta.type=1, snr=1, seed=2021) {
set.seed(seed)
X <- matrix(rnorm(n*p), n, p)
if (rho != 0) {
for (j in 2:p) {
X[, j] <- sqrt(1 - rho^2) * X[, j] + rho * X[, j - 1]
}
}
beta <- gen.beta(kappa, p, beta.type)
vmu <- sum((crossprod(cholesky.ar1.root(rho, p), beta))^2)
sigma <- sqrt(vmu/snr)
y <- as.numeric(X %*% beta + rnorm(n) * sigma)
if (p > 1) {
colnames(X) <- paste("V", seq(p), sep = "")
}
list(X = X, y = y, beta = beta, sigma = sigma)
}
gen.beta <- function(kappa, p, beta.type) {
kappa <- min(kappa, p)
beta <- rep(0, p)
if (beta.type == 1) {
beta[round(seq(1, p, length = kappa))] <- 1
} else if (beta.type == 2) {
beta[1:kappa] <- 1
} else if (beta.type == 3) {
beta[1:kappa] <- seq(10, 0.5, length = kappa)
} else if (beta.type == 4) {
beta[1:6] <- c(-10, -6, -2, 2, 6, 10)
} else {
beta[1:kappa] <- 1
if (kappa + 1 <= p) {
beta[(kappa + 1):p] <- 0.5^(1:(p - kappa))
}
}
beta
}
cholesky.ar1.root <- function(rho, p) {
# reference 1: https://blogs.sas.com/content/iml/2018/10/03/ar1-cholesky-root-simulation.html
# reference 2: https://www.ncbi.nlm.nih.gov/pmc/articles/PMC4455603/ (Direct formulation to Cholesky decomposition of a general nonsingular correlation matrix)
if (rho != 0) {
L <- matrix(0, nrow = p, ncol = p)
L[, 1] <- rho^(0:(p - 1))
c <- sqrt(1 - rho^2)
cL <- c * L[, 1]
for (i in 2:p) {
L[i:p, i] <- cL[1:(p - i + 1)]
}
} else {
L <- diag(1, p)
}
L
}
|
faf9f7204473fb9dc32e4761a1b2c0573a324226 | 66ee5b9cbe7f6b3a745cc8174deda69ef6b833b8 | /man/readXCI.Rd | 623e72ef9ae4e2ccbefb5dc404128418e5f3739b | [] | no_license | SRenan/XCIR | 5e4d2299ea57edbff200793e8d91f7814b27c79a | 4e51efe9980056e7fe274224da0173fcfaf2edd7 | refs/heads/master | 2021-10-12T07:06:51.967393 | 2021-10-04T20:36:54 | 2021-10-04T20:36:54 | 69,993,016 | 0 | 2 | null | null | null | null | UTF-8 | R | false | true | 1,291 | rd | readXCI.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utils.R
\name{readXCI}
\alias{readXCI}
\title{Read a list of known inactivated genes}
\usage{
readXCI(xciGenes = NULL)
}
\arguments{
\item{xciGenes}{A \code{character} or code{NULL}. By defaults, return a
vector of 177 genes. Other available choices include "cotton" and "intersect".
If a file path is given, the genes will be read from the file.}
}
\value{
A \code{character} vector of gene names.
}
\description{
Read a list of gene symbols of known inactivated genes
to be used as training set in \code{betaBinomXI}.
}
\details{
Both gene lists are extracted from Cotton et al. Genome
Biology (2013). doi:10.1186/gb-2013-14-11-r122.
By default, the function returns a list that was used as training set in
the paper. This training set was generated as the intersection of the
silenced genes identified by both expression (Carrel & Willard, 2005) and
DNA methylation analysis (Cotton et al, 2011).
Setting it to "cotton" will instead return a list of 294 genes that were
classified as inactivated by Cotton et al.
"intersect" is the most stringent list which returns the intersection of
training and predicted set.
}
\examples{
xcig <- readXCI()
xcig <- readXCI("cotton")
}
\seealso{
\code{betaBinomXI}
}
|
502a7c6094985b77e06257d7a8537ee0aff4629e | 82811c38e5f5222327b484edaac06eafe5a7ac59 | /man/cc_crossclustering.Rd | 95da8f8e282e1d06fc02e33081e4d177d9933bd6 | [] | no_license | CorradoLanera/CrossClustering | 36b49c6e7e1391daa4d6d728cbcb6e5d136f0480 | 19c1352b5bc2829eb32ac842b1a8bd9a70ef5893 | refs/heads/master | 2021-03-30T18:16:36.757604 | 2018-07-30T14:40:04 | 2018-07-30T14:40:04 | 114,754,093 | 2 | 0 | null | 2018-07-29T09:40:05 | 2017-12-19T10:54:35 | R | UTF-8 | R | false | true | 5,909 | rd | cc_crossclustering.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cc_crossclustering.R
\name{cc_crossclustering}
\alias{cc_crossclustering}
\alias{print.crossclustering}
\title{A partial clustering algorithm with automatic estimation of
the number of clusters and identification of outliers}
\usage{
cc_crossclustering(dist, k_w_min = 2, k_w_max = attr(dist, "Size") - 2,
k2_max = k_w_max + 1, out = TRUE, method = c("complete", "single"))
\method{print}{crossclustering}(x, ...)
}
\arguments{
\item{dist}{A dissimilarity structure as produced by the function
\code{dist}}
\item{k_w_min}{[int] Minimum number of clusters for the Ward's minimum
variance method. By default is set equal 2}
\item{k_w_max}{[int] Maximum number of clusters for the Ward's minimum
variance method (see details)}
\item{k2_max}{[int] Maximum number of clusters for the
Complete/Single-linkage method. It can not be equal or greater
than the number of elements to cluster (see details)}
\item{out}{[lgl] If \code{TRUE} (default) outliers must be searched (see
details)}
\item{method}{[chr] "complete" (default) or "single". CrossClustering
combines Ward's algorithm with Complete-linkage if method is set
to "complete", otherwise (if method is set to 'single')
Single-linkage will be used.}
\item{x}{an object used to select a method.}
\item{...}{further arguments passed to or from other methods.}
}
\value{
A list of objects describing characteristics of the partitioning
as follows:
\item{Optimal_cluster}{number of clusters}
\item{Cluster_list}{a list of clusters; each element of this
lists contains the indices of the elements belonging to the
cluster}
\item{Silhouette}{the average silhouette width over all the
clusters}
\item{n_total}{total number of input elements}
\item{n_clustered}{number of input elements that have actually
been clustered}
}
\description{
This function performs the CrossClustering algorithm. This method
combines the Ward's minimum variance and Complete-linkage (default,
useful for finding spherical clusters) or Single-linkage (useful for
finding elongated clusters) algorithms, providing automatic estimation of
a suitable number of clusters and identification of outlier elements.
print method for crossclustering class
}
\details{
See cited document for more details.
}
\section{Methods (by generic)}{
\itemize{
\item \code{print}:
}}
\examples{
library(CrossClustering)
#### Example of Cross-Clustering as in reference paper
#### method = "complete"
data(toy)
### toy is transposed as we want to cluster samples (columns of the
### original matrix)
toy_dist <- t(toy) \%>\%
dist(method = "euclidean")
### Run CrossClustering
cc_crossclustering(toy_dist,
k_w_min = 2,
k_w_max = 5,
k2_max = 6,
out = TRUE
)
#### Simulated data as in reference paper
#### method = "complete"
set.seed(10)
sg <- c(500, 250, 700, 300, 100)
# 5 clusters
t <- matrix(0, nrow = 5, ncol = 5)
t[1, ] <- rep(6, 5)
t[2, ] <- c( 0, 5, 12, 13, 15)
t[3, ] <- c(15, 11, 9, 5, 0)
t[4, ] <- c( 6, 12, 15, 10, 5)
t[5, ] <- c(12, 17, 3, 7, 10)
t_mat <- NULL
for (i in seq_len(nrow(t))){
t_mat <- rbind(
t_mat,
matrix(rep(t[i, ], sg[i]), nrow = sg[i], byrow = TRUE)
)
}
data_15 <- matrix(NA, nrow = 2000, ncol = 5)
data_15[1:1850, ] <- matrix(abs(rnorm(sum(sg) * 5, sd = 1.5)),
nrow = sum(sg),
ncol = 5
) + t_mat
set.seed(100) # simulate outliers
data_15[1851:2000, ] <- matrix(
runif(n = 150 * 5, min = 0, max = max(data_15, na.rm = TRUE)),
nrow = 150,
ncol = 5
)
### Run CrossClustering
cc_crossclustering(dist(data_15),
k_w_min = 2,
k_w_max = 19,
k2_max = 20,
out = TRUE
)
#### Correlation-based distance is often used in gene expression time-series
### data analysis. Here there is an example, using the "complete" method.
data(nb_data)
nb_dist <- as.dist(1 - abs(cor(t(nb_data))))
cc_crossclustering(dist = nb_dist, k_w_max = 20, k2_max = 19)
#### method = "single"
### Example on a famous shape data set
### Two moons data
data(twomoons)
moons_dist <- twomoons[, 1:2] \%>\%
dist(method = "euclidean")
cc_moons <- cc_crossclustering(moons_dist,
k_w_max = 9,
k2_max = 10,
method = 'single'
)
moons_col <- cc_get_cluster(cc_moons)
plot(twomoons[, 1:2], col = moons_col,
pch = 19,
xlab = "",
ylab = "",
main = "CrossClustering-Single"
)
### Worms data
data(worms)
worms_dist <- worms[, 1:2] \%>\%
dist(method = "euclidean")
cc_worms <- cc_crossclustering(worms_dist,
k_w_max = 9,
k2_max = 10,
method = 'single'
)
worms_col <- cc_get_cluster(cc_worms)
plot(worms[, 1:2], col = worms_col,
pch = 19,
xlab = "",
ylab = "",
main = "CrossClustering-Single"
)
### CrossClustering-Single is not affected to chain-effect problem
data(chain_effect)
chain_dist <- chain_effect \%>\%
dist(method = "euclidean")
cc_chain <- cc_crossclustering(chain_dist,
k_w_max = 9,
k2_max = 10,
method = 'single'
)
chain_col <- cc_get_cluster(cc_chain)
plot(chain_effect, col = chain_col,
pch = 19,
xlab = "",
ylab = "",
main = "CrossClustering-Single"
)
}
\references{
Tellaroli P, Bazzi M., Donato M., Brazzale A. R., Draghici S. (2016).
Cross-Clustering: A Partial Clustering Algorithm with Automatic
Estimation of the Number of Clusters. PLoS ONE 11(3): e0152333.
doi:10.1371/journal.pone.0152333
#' Tellaroli P, Bazzi M., Donato M., Brazzale A. R., Draghici S. (2017).
E1829: Cross-Clustering: A Partial Clustering Algorithm with Automatic
Estimation of the Number of Clusters. CMStatistics 2017, London 16-18
December, Book of Abstracts (ISBN 978-9963-2227-4-2)
}
\author{
Paola Tellaroli, <paola [dot] tellaroli [at] unipd [dot] it>;;
Marco Bazzi, <bazzi [at] stat [dot] unipd [dot] it>;
Michele Donato, <mdonato [at] stanford [dot] edu>
}
|
3e8e13ce425ec4876dbf0dc8726dedede2c5be76 | a21c7571d37d95cd1f826924d623a041c24393f3 | /manuscript/pandoc/bin/render_html.R | fa2e4bb522eb0daf3467352a7f9a372489e53298 | [] | no_license | andrewheiss/covid-derogations | ccd24888d316aa2bb4314f6f1d90089fb36ea225 | 30827292032344fe95b1ce205e8c55bad3f920e2 | refs/heads/main | 2023-03-24T23:14:23.118019 | 2021-03-27T02:15:12 | 2021-03-27T02:15:12 | 348,423,163 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,246 | r | render_html.R | #!/usr/bin/env Rscript
args <- R.utils::cmdArgs()
# Check arguments
stopifnot("Specify an input file using -input\n(e.g. `-input manuscript.Rmd`)" = is.character(args$input),
"Specify an output file using -output\n(e.g. `-output manuscript.html`)" = is.character(args$output),
"Specify a CSL file using -csl\n(e.g. `-csl pandoc/csl/apa.csl`)" = is.character(args$csl))
pandoc_suport <- ifelse(is.null(args$pandoc), "pandoc", args$pandoc)
# Add CSS file as a dependency so it goes into the _files directory
dep <- htmltools::htmlDependency(
name = "ath",
version = "1.0.0",
paste0(pandoc_suport, "/css"),
stylesheet = "ath-clean.css"
)
extra_dependencies <- list(dep)
# Knit
rmarkdown::render(
input = args$input,
output_file = args$output,
bookdown::html_document2(
template = paste0(pandoc_suport, "/templates/html.html"),
pandoc_args = c("--metadata", "link-citations=true",
"--metadata", "linkReferences=true",
paste0("--csl=", args$csl)),
md_extensions = "+raw_tex+smart-autolink_bare_uris+ascii_identifiers",
toc = TRUE,
number_sections = FALSE,
self_contained = FALSE,
theme = NULL,
extra_dependencies = extra_dependencies
)
)
|
66555550321108f4dd4ed2dcb779c050ff47d603 | 5d00314e5a831232aee5b22eff35700154cfbb08 | /man/FLXMCfactanal.Rd | 204c0910d5c8bfcdd0a45df8d99d6610bb832db0 | [] | no_license | cran/flexmix | e60085cbe878fea1e2bfca9e21c74eb42dc3e594 | 79b16f19b9d44c48bd743882b618b3504dd44b31 | refs/heads/master | 2023-04-06T10:14:48.855288 | 2023-03-16T19:50:08 | 2023-03-16T19:50:08 | 17,696,081 | 6 | 7 | null | null | null | null | UTF-8 | R | false | false | 2,390 | rd | FLXMCfactanal.Rd | \name{FLXMCfactanal}
\alias{FLXMCfactanal}
\alias{rFLXM,FLXMCfactanal,FLXcomponent-method}
\title{Driver for Mixtures of Factor Analyzers}
\description{
This driver for \code{\link{flexmix}} implements estimation of mixtures of
factor analyzers using ML estimation of factor analysis implemented in
\code{factanal} in each M-step.
}
\usage{
FLXMCfactanal(formula = . ~ ., factors = 1, ...)
}
\arguments{
\item{formula}{A formula which is interpreted relative to the formula
specified in the call to \code{\link{flexmix}} using
\code{\link{update.formula}}. Only the left-hand side (response) of
the formula is used. Default is to use the original
\code{\link{flexmix}} model
formula.}
\item{factors}{Integer specifying the number of factors in each component.}
\item{\dots}{Passed to \code{factanal}}
}
\value{
\code{FLXMCfactanal} returns an object of class \code{FLXM}.
}
\references{
G. McLachlan and D. Peel. \emph{Finite Mixture Models}, 2000.
John Wiley and Sons Inc.
}
\author{Bettina Gruen}
\section{Warning}{
This does not implement the AECM framework presented in McLachlan and
Peel (2000, p.245), but uses the available functionality in R for ML
estimation of factor analyzers. The implementation therefore is only
experimental and has not been well tested.
Please note that in general a good initialization is crucial for the
EM algorithm to converge to a suitable solution for this model class.
}
\seealso{\code{\link{flexmix}}}
\examples{
## Reproduce (partly) Table 8.1. p.255 (McLachlan and Peel, 2000)
if (require("gclus")) {
data("wine", package = "gclus")
wine_data <- as.matrix(wine[,-1])
set.seed(123)
wine_fl_diag <- initFlexmix(wine_data ~ 1, k = 3, nrep = 10,
model = FLXMCmvnorm(diagonal = TRUE))
wine_fl_fact <- lapply(1:4, function(q) flexmix(wine_data ~ 1, model =
FLXMCfactanal(factors = q, nstart = 3),
cluster = posterior(wine_fl_diag)))
sapply(wine_fl_fact, logLik)
## FULL
set.seed(123)
wine_full <- initFlexmix(wine_data ~ 1, k = 3, nrep = 10,
model = FLXMCmvnorm(diagonal = FALSE))
logLik(wine_full)
## TRUE
wine_true <- flexmix(wine_data ~ 1, cluster = wine$Class,
model = FLXMCmvnorm(diagonal = FALSE))
logLik(wine_true)
}
}
\keyword{models}
|
1608e375d00c3c5e7136b2c200cea013d2e61331 | e8ab695b941a3a1d9920990ca1f83c498d9abf7c | /hw1.R | 3645f503c7770d22aebf8b075558ac0d6a2050d9 | [] | no_license | broshni/RProgrammingCoursera | 3895946da8a622f053e842320249d6ad67f6a9f7 | ba1aa5f50cb6d508c9a503008e23915b06181f67 | refs/heads/master | 2022-11-18T17:21:41.162534 | 2020-07-04T00:47:06 | 2020-07-04T00:47:06 | 271,906,498 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,248 | r | hw1.R | ## In the dataset provided for this Quiz, what are the column names of the dataset?
data_1 <- read.csv("hw1_data.csv")
names(data_1)
## How many observations (i.e. rows) are in this data frame?
nrow(data_1)
## Extract the last 2 rows of the data frame and print them to the console. What does the output look like?
tail(data_1)
## What is the value of Ozone in the 47th row?
data_1$Ozone[47]
## How many missing values are in the Ozone column of this data frame?
miss_Ozone <-is.na(data_1$Ozone)
ozone_m<-data_1$Ozone[miss_Ozone]
length(ozone_m)
## What is the mean of the Ozone column in this dataset? Exclude missing values (coded as NA) from this calculation.
ozone_nom <- data_1$Ozone[!miss_Ozone]
mean(ozone_nom)
## Extract the subset of rows of the data frame where Ozone values are above 31 and Temp values are above 90. What is the mean of Solar.R in this subset?
temp_o <- subset(data_1, Ozone>31)
temp_t<- subset(temp_o, Temp>90)
mean(temp_t$Solar.R)
## What is the mean of "Temp" when "Month" is equal to 6?
temp_m <- subset(data_1, Month==6)
mean(temp_m$Temp)
## What was the maximum ozone value in the month of May (i.e. Month is equal to 5)?
temp_may <- subset(data_1, Month == 5)
sort(temp_may$Ozone, decreasing = TRUE)
|
de41be47b25a984f8a5f5abf2ce47d422b578467 | 6464efbccd76256c3fb97fa4e50efb5d480b7c8c | /paws/man/devicefarm_get_test_grid_project.Rd | fab3fc6d596b818aaada891ee529c7dc19bf1415 | [
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | johnnytommy/paws | 019b410ad8d4218199eb7349eb1844864bd45119 | a371a5f2207b534cf60735e693c809bd33ce3ccf | refs/heads/master | 2020-09-14T23:09:23.848860 | 2020-04-06T21:49:17 | 2020-04-06T21:49:17 | 223,286,996 | 1 | 0 | NOASSERTION | 2019-11-22T00:29:10 | 2019-11-21T23:56:19 | null | UTF-8 | R | false | true | 634 | rd | devicefarm_get_test_grid_project.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/devicefarm_operations.R
\name{devicefarm_get_test_grid_project}
\alias{devicefarm_get_test_grid_project}
\title{Retrieves information about a Selenium testing project}
\usage{
devicefarm_get_test_grid_project(projectArn)
}
\arguments{
\item{projectArn}{[required] The ARN of the Selenium testing project, from either
CreateTestGridProject or ListTestGridProjects.}
}
\description{
Retrieves information about a Selenium testing project.
}
\section{Request syntax}{
\preformatted{svc$get_test_grid_project(
projectArn = "string"
)
}
}
\keyword{internal}
|
15e72cfb8731157a1d14e9fd253ef58b50872be8 | f9e55b969b7acf64ba5222e4fa665dc0931e35da | /tests/testthat/test-preCheck1.R | b7d581bb9af1c65f111cc34d62928828a1105c3c | [
"MIT"
] | permissive | ntsim/dfe-published-data-qa | 48747772391e64cfc3553dd674fc9d06bd420be2 | 0b558b3fceb4a0ba41ddeffc492bcd4d2314ad23 | refs/heads/master | 2023-07-14T16:52:46.112112 | 2021-07-21T16:10:08 | 2021-07-21T16:10:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,512 | r | test-preCheck1.R | context("preCheck1Functions")
pathStart <- "preCheck1/"
# invalid_meta_cols -------------------------------------------------------------------------------------------------------
test_that("invalid_meta_cols", {
expect_equal(testIndividualTest(pathStart, "invalid_meta_cols"), "FAIL")
})
# meta_to_data_crosscheck -------------------------------------------------------------------------------------------------------
test_that("meta_to_data_crosscheck", {
expect_equal(testIndividualTest(pathStart, "meta_to_data_crosscheck"), "FAIL")
})
# time_identifier -------------------------------------------------------------------------------------------------------
test_that("time_identifier", {
expect_equal(testIndividualTest(pathStart, "time_identifier"), "FAIL")
})
# geographic_level -------------------------------------------------------------------------------------------------------
test_that("geographic_level", {
expect_equal(testIndividualTest(pathStart, "geographic_level"), "FAIL")
})
# col_name_completed -------------------------------------------------------------------------------------------------------
test_that("col_name_completed", {
expect_equal(testIndividualTest(pathStart, "col_name_completed"), "FAIL")
})
# duplicate_variable_names -------------------------------------------------------------------------------------------------------
test_that("duplicate_variable_names", {
expect_equal(testIndividualTest(pathStart, "duplicate_variable_names"), "FAIL")
})
|
66b02482601114832158ab030323cb4c23c49e25 | e0df9b803c7863832d405f47c4272f8b9f1f1c6a | /R/weather_fips.R | ea25a33b4ad5c42fe40da8958922d8021847bb94 | [] | no_license | jferreri/countyweather | 5a3918055daefa2b88af9efedda4dfcae50b4c1c | 3e04af3c6e05e75da0900e0d393e5fae69ad9f9b | refs/heads/master | 2021-01-17T05:18:22.722937 | 2016-04-21T15:21:53 | 2016-04-21T15:21:53 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,742 | r | weather_fips.R | #' Return average daily weather data for a particular county.
#'
#' \code{weather_fips} returns a data.frame of average daily precipitation,
#' maximum and minimum temperature values for a particular county, date range,
#' and specified "coverage."
#'
#' This function serves as a wrapper to several functions from the
#' \code{rnoaa} package, which provide weather data from all relevant
#' monitors in a county, and then this function filters and averages
#' across monitors based on user-specified coverage specifications.
#'
#' @note Because this function uses the NOAA API to identify the weather
#' monitors within a US county, you will need to get an access token from
#' NOAA to use this function. Visit NOAA's token request page
#' (\url{http://www.ncdc.noaa.gov/cdo-web/token}) to request a token by
#' email, and then use the code
#' \code{options("noaakey" = "<key the NOAA emails you>")} to set up your
#' API access.
#'
#' @param fips A character string giving the five-digit U.S. FIPS county code
#' of the county for which the user wants to pull weather data.
#' @param percent_coverage A numeric value in the range of 0 to 1 that specifies the
#' desired percentage coverage for the weather variable (i.e., what percent
#' of each weather variable must be non-missing to include data from a
#' monitor when calculating daily values averaged across monitors. The
#' default is 0.90 (90% non-missing observations required to include a
#' monitor in averaging).
#' @param date_min A character string giving the earliest date you want
#' in your dataset in "yyyy-mm-dd" format. -
#' \code{date_min}.
#' @param date_max A character string giving the latest date you want
#' in your dataset in "yyyy-mm-dd" format. -
#'
#' @return A dataframe with
#'
#' @examples
#' \dontrun{
#' df <- weather_fips(fips = "06037", percent_coverage = 0.90,
#' min_date = "1999-01-01", max_date = "2012-12-31")
#' }
#'
#' @export
weather_fips <- function(fips, percent_coverage, date_min, date_max){
# get stations for 1 fips
# fips_stations() from weather_fips function.R in countyweather
stations <- fips_stations(fips, date_min, date_max)
# get tidy full dataset for all monitors
# clean_daily() and meteo_pull_monitors() from helpers_ghcnd.R in
# openscilabs/rnoaa
monitors <- meteo_pull_monitors(monitors = stations,
date_min,
date_max,
var = c("tmin", "tmax", "prcp"))
# calculate coverage for each variable (prcp, tmax, tmin)
# meteo_coverage() from meteo_utils.R in rnoaaopenscilabs
coverage_df <- meteo_coverage(monitors, verbose = FALSE)
# filter station dataset based on specified coverage
filtered <- filter_coverage(coverage_df, percent_coverage)
good_monitors <- unique(filtered$id)
# filter weather dataset based on stations w/ specified coverage
filtered_data <- gather(monitors, key, value, -id, -date) %>%
left_join(filtered, by = c("id", "key")) %>%
filter(id %in% good_monitors) %>%
mutate(value = value * covered) %>%
select(-covered) %>%
spread(key = key, value = value)
# average across stations, add a column for number of stations that contributed
# to each daily average
averaged <- ave_weather(filtered_data)
return(averaged)
}
#' Average weather data across multiple stations
#'
#' @export
ave_weather <- function(filtered_data){
averaged_data <- gather(filtered_data, key, value, -id, -date) %>%
ddply(c("date", "key"), summarize,
mean = mean(value, na.rm = TRUE)) %>%
spread(key = key, value = mean)
n_reporting <- gather(filtered_data, key, value, -id, -date) %>%
ddply(c("date", "key"), summarize,
n_reporting = sum(!is.na(value))) %>%
mutate(key = paste(key, "reporting", sep = "_")) %>%
spread(key = key, value = n_reporting)
averaged_data <- left_join(averaged_data, n_reporting,
by = "date")
return(averaged_data)
}
#' Filter stations based on "coverage" requirements
#'
#' \code{filter_coverage} filters available weather variables
#' based on a specified required minimum coverage (i.e., percent non-missing
#' daily observations).
#'
#' @param coverage_df a \code{meteo_coverage} data.frame
#' @inheritParams weather_fips
#'
#' @return a \code{data.frame} with stations that meet the specified coverage
#' requirements for \code{prcp}, \code{tmax}, and \code{tmin}.
#'
#' @export
filter_coverage <- function(coverage_df, percent_coverage){
filtered <- select(coverage_df, -start_date, -end_date, -total_obs) %>%
gather(key, covered, -id) %>%
filter(covered >= percent_coverage) %>%
mutate(covered = 1) %>%
group_by(id) %>%
mutate(good_monitor = sum(!is.na(covered)) > 0) %>%
ungroup() %>%
filter(good_monitor) %>%
select(-good_monitor)
return(filtered)
}
#' Plot of stations for a particular FIPS
#'
#'
#'
#' @examples
#' \dontrun{
#' ex <- stationmap_fips("08031", 0.90, "2000-01-01", "2010-12-31")
#' }
#'
#' @export
stationmap_fips <- function(fips, percent_coverage, date_min, date_max){
stations <- fips_stations(fips, date_min, date_max)
monitors <- meteo_pull_monitors(monitors = stations,
date_min = date_min,
date_max = date_max,
var = c("tmin", "tmax", "prcp"))
coverage_df <- meteo_coverage(monitors, verbose = FALSE)
filtered <- filter_coverage(coverage_df, percent_coverage)
good_monitors <- unique(filtered$id)
df <- mapping(station_df)
station_latlong <- filter(df, df$id %in% good_monitors)
monitors <- filter(monitors, monitors$id %in% good_monitors)
perc_missing <- gather(monitors, key, value, -id, -date) %>%
ddply(c("id", "key"), summarize,
percent_missing = sum(is.na(value)) / length(value)) %>%
mutate(key = paste(key, "percent_missing", sep = "_")) %>%
spread(key = key, value = percent_missing)
final_df <- left_join(station_latlong, perc_missing, by = "id")
map <- ggmap::get_map(location = c(lon = final_df$lon[1],
lat = final_df$lat[1]),
zoom = 9, maptype = "toner")
map <- ggmap::ggmap(map) +
geom_point(data = final_df, aes(x = lon, y = lat, color = prcp_percent_missing),
size = 3)
# prcp_percent_missing for example - prob want to be able to specify what
# weather variable you want here
return(map)
}
#' Mapping function
mapping <- function(ncdcdf){
df <- select(ncdcdf, longitude, latitude, id)
colnames(df) <- c("lon", "lat", "id")
df$id <- gsub("GHCND:", "", df$id)
return(df)
}
|
a029b5747c57370bdf485a4b4feb467606bd2d80 | 360df3c6d013b7a9423b65d1fac0172bbbcf73ca | /FDA_Pesticide_Glossary/5-cyano-_pyrimidine.R | c942fec3f1eb836ac7d410a1b9a399d058aee6bb | [
"MIT"
] | permissive | andrewdefries/andrewdefries.github.io | 026aad7bd35d29d60d9746039dd7a516ad6c215f | d84f2c21f06c40b7ec49512a4fb13b4246f92209 | refs/heads/master | 2016-09-06T01:44:48.290950 | 2015-05-01T17:19:42 | 2015-05-01T17:19:42 | 17,783,203 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 270 | r | 5-cyano-_pyrimidine.R | library("knitr")
library("rgl")
#knit("5-cyano-_pyrimidine.Rmd")
#markdownToHTML('5-cyano-_pyrimidine.md', '5-cyano-_pyrimidine.html', options=c("use_xhml"))
#system("pandoc -s 5-cyano-_pyrimidine.html -o 5-cyano-_pyrimidine.pdf")
knit2html('5-cyano-_pyrimidine.Rmd')
|
3290b3b1eb682cf67b9b4c6a982d8fd41decc7fd | ca0a9f8dfa1487adaf9ba2f8d21d0d06dddd79dc | /man/plot-methods.Rd | 1fcc32ac9616ce9f50d2188fc864025a5c895c3c | [] | no_license | cran/packS4 | c964c29f4e9e2c1f18bdcbbd91e3257f5ca15296 | 2c3a7206237f28672987275ef5696d66bb5ded34 | refs/heads/master | 2021-03-12T23:57:06.953797 | 2015-05-27T00:00:00 | 2015-05-27T00:00:00 | 17,698,171 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 608 | rd | plot-methods.Rd | \name{plot-methods}
\docType{methods}
%\alias{plot-methods}
\alias{plot,ClassU-method}
\alias{plot,ClassV-method}
\alias{plot,ClassW-method}
\title{plot-ClassV ; plot-ClassW}
\description{
\code{plot} for two objects.
}
\section{Methods}{
\describe{
\item{x = "ClassV"}{just plot a point}
\item{x = "ClassW"}{plot a point and add a title}
}
}
\author{Christophe Genolini <genolini@u-paris10.fr>}
\references{
Book: "Petit Manuel de Programmation Orientee Objet sous R"
}
\examples{
www <- classW(w1="A",u1=2,u2=-4)
plot(www)
}
\keyword{documentation}
|
413b688c505d91c0fd99ae63088445ff386a69d3 | a937a3eec5f65fcfeab29b9f68ba21a0e6ae0f84 | /R/functions/functions-twitter-fix.R | 86643fb7b504bdcecbf47f6203b0ef0920efc9d6 | [] | no_license | tonyelhabr/twitterbot-ratio | 3e01d484bb0c280600785a59b2604a1a4067fbce | 5964cf4812b2c21aba9fa7939e61e390581e8b18 | refs/heads/master | 2020-04-05T19:26:54.343679 | 2018-11-24T19:41:19 | 2018-11-24T19:41:19 | 157,134,969 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,878 | r | functions-twitter-fix.R |
.rebind_ratio_log_scrape <-
function(x,
y,
...,
how = "left",
by = setdiff(.COLS_RATIO_BASE_ORDER, setdiff(names(x), names(y))),
verbose = FALSE) {
# Note: This verbose is essentially a replacement for the regular message
# output by `dplyr::*join()` when `by` isn't specified.
if (verbose) {
msg <-
paste0("Joining by `", paste(by, collapse = "`, `", sep = ""), "`.")
message(msg)
}
f_join <- sprintf("%s_join", how)
suppressWarnings(y <- y %>% .select_ratio_cols_at())
x <- x %>% mutate(rn = row_number())
res <-
purrr::invoke(
f_join,
x = x,
y = y,
by = by,
suffix = c("", "_y")
)
res %>%
mutate(
considered = coalesce(considered_y, considered),
posted = coalesce(posted_y, posted),
status_id_post = coalesce(status_id_post_y, status_id_post),
text_post = coalesce(text_post_y, text_post),
timestamp_post = coalesce(timestamp_post_y, timestamp_post)
) %>%
arrange(rn) %>%
select(-rn) %>%
.select_ratio_cols_at()
}
.rebind_ratio_log_scrape_loosely <-
purrr::partial(.rebind_ratio_log_scrape, how = "left", by = .COLS_RATIO_BASE_ORDER)
.rebind_ratio_log_scrape_strictly <-
purrr::partial(.rebind_ratio_log_scrape, how = "inner")
.refresh_ratio_log_scrape <-
function(tweets_self = NULL, ratio_log_scrape = NULL, ..., verbose = TRUE) {
if(is.null(tweets_self)) {
tweets_self <- .get_tweets_self()
}
# favs_self <- tweets_self %>% filter(!favorited_by %>% is.na())
# rt_self <- tweets_self %>% filter(is_retweet)
post_self <- tweets_self %>% filter(str_detect(text, "^Congratulations"))
# other_self <-
# bind_rows(
# favs_self,
# rt_self,
# post_self
# ) %>%
# select_if(~!is.list(.)) %>%
# anti_join(tweets_self %>% select_if(~!is.list(.)))
if(is.null(ratio_log_scrape)) {
ratio_log_scrape <- import_ratio_log_scrape()
}
posted_self <-
post_self %>%
rename(
text_post = text,
status_id_post = status_id,
timestamp_post = created_at
) %>%
select(-user_id, -user, -favorite_count, -retweet_count) %>%
rename(
user_id = quoted_user_id,
user = quoted_screen_name,
status_id = quoted_status_id #,
# Note: Could also join these, but it might cause "mismatches".
# (Haven't tested this though.)
# created_at = quoted_created_at,
# favorite_count = quoted_favorite_count,
# retweet_count = quoted_retweet_count,
# text = quoted_text
) %>%
mutate(considered = 1L, posted = 1L) %>%
.select_ratio_cols_at()
ratio_log_scrape_distinct <-
ratio_log_scrape %>%
distinct(text, .keep_all = TRUE)
ratio_log_rebind <-
.rebind_ratio_log_scrape_strictly(
x = ratio_log_scrape_distinct,
y = posted_self
)
.compare_n_row_eq(
data1 = ratio_log_rebind,
data2 = posted_self
)
ratio_log_rebind_export <-
.rebind_ratio_log_scrape_loosely(
x = ratio_log_scrape,
y = ratio_log_rebind
)
.compare_n_row_eq(
data1 = ratio_log_rebind_export,
data2 = ratio_log_scrape
)
path_ratio_log_export <- export_ratio_log_scrape(ratio_log_rebind_export)
invisible(ratio_log_rebind_export)
}
refresh_ratio_log_scrape <-
function(...,
path = config$path_ratio_log_scrape,
backup = TRUE,
clean = TRUE,
n_keep = 10, # Note: This is passed to `.clean_backup()` in `.create_backup()`.
verbose = TRUE) {
if (backup & !is.null(path)) {
.create_backup(path = path, clean = clean, n_keep = 10)
}
.refresh_ratio_log_scrape(...)
}
|
498ca7083ffcd17b91f0dc22e904b97826ff8dd2 | 599fbbb368dbfd0b781a855e8a08a86e833ed10f | /ui.R | d127ef86ec5675b315fff4f11fd57e333ec6cc03 | [] | no_license | joshi-v/DevDatProd | 0adeb41059762a469863812949fbefdad3b52a54 | 5556c82cc26c0d88c5a74b7a6c659409d44867b6 | refs/heads/master | 2021-01-20T07:18:03.664132 | 2017-05-05T20:44:25 | 2017-05-05T20:44:25 | 89,988,577 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 944 | r | ui.R | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel("Plot location on world map"),
# Sidebar with a slider input for number of bins
sidebarLayout(position="left",
sidebarPanel(
numericInput('latitude', 'Latitude', value = 27.1750, min = -90, max = 90),
br(),
numericInput('longitude', 'Longitude', value = 78.0422, min = -180, max = 180),
br(),
actionButton ("plotButton", "plot"),
p ("Click to update")
),
# Show a plot of the generated distribution
mainPanel (
textOutput("TMdist"),
br(),
leafletOutput("plot1")
)
)
)) |
029cf0de903fc0173d8a2cdf5822772ae2088c22 | d426aa790bdbeac9a62fc3ad5cbb58d3384bf593 | /R/pslScore.R | 8a5a09dd4100d9dc68a27257dd5c8a2bd0dd3615 | [] | no_license | drmjc/blat | 0977e4ae82ed2751c2f8922b6d79b6ed4607e792 | 84400169501b6f7e4e1e6ec9d88b27a53410f776 | refs/heads/master | 2020-05-31T05:03:53.663159 | 2013-08-29T00:17:06 | 2013-08-29T00:17:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,099 | r | pslScore.R | ## Code ported from BLAT source code to determine the pslScore for a psl alignment
##
## http://genome.ucsc.edu/FAQ/FAQblat#blat4
##
## Mark Cowley, 12 April 2006
##
#
# colnames and their indices of a psl result table:
#
# [,1] [,2] [,3] [,4] [,5] [,6] [,7]
# "match" "mis-match" "rep-match" "N's" "Q gapcount" "Q gapbases" "T gapcount"
# [,8] [,9] [,10] [,11] [,12] [,13] [,14] [,15]
# "T gapbases" "strand" "Q name" "Q size" "Q start" "Q end" "T name" "T size"
# [,16] [,17] [,18] [,19] [,20] [,21]
# "T start" "T end" "blockcount" "blockSizes" "qStarts" "tStarts"
#
#' calculate the score from a psl alignment
#'
#' This calculates the score for a PSL alignment, based on C code from
#' Jim Kent, see comment in pslScore.R.
#' This has been optimised, breaking the problem into smaller chunks.
#'
#' @param psl a PSL object
#' @param isMrna logical: protein scores are 3x size of mrna/nucleotide scores.
#'
#' @return vector psl alignment score
#'
#' @author Mark Cowley, 12 April 2006
#' @export
#' @examples
#' f <- file.path(system.file(package="blat"), "examples", "test.psl")
#' psl <- import.psl(f, score=FALSE)
#' psl$score <- pslScore(psl, FALSE)
#' head(psl)
#'
#' # or the simpler appraoch
#' f <- file.path(system.file(package="blat"), "examples", "test.psl")
#' psl <- import.psl(f, score=TRUE)
#' head(psl)
#'
pslScore <- function(psl, isMrna=!pslIsProtein(psl[1,])) {
BATCH_SIZE <- 1000
# if( nrow(psl) > 1 )
# init.progress.meter2(nrow(psl))
if( nrow(psl) > BATCH_SIZE ) {
#
# Running pslScore on all 37837 records in psl for the compugen22k 65mers using
# a loop from 1:37837 takes 30 mins:
# date(); scores <- pslScore(psl); date()
# [1] "Wed Apr 12 16:41:55 2006"
# [1] "Wed Apr 12 17:16:19 2006"
#
# ## using this loop:
# for(i in 1:nrow(psl)) {
# scores[i] <- pslScore(psl[i,])
# }
#
# Splitting the 37837 into 38 loops of 1000 rows, and for each set of 1000,
# compute pslScore via loops from 1:1000 takes 90 seconds:
# date(); scores <- pslScore(psl); date()
# [1] "Wed Apr 12 15:48:43 2006"
# [1] "Wed Apr 12 15:50:26 2006"
#
# Note I tried the loop from 1:37837 both with full and empty RAM and still
# got approx 30 mins both times?
#
# What gives?
#
# Note, I've tried replacing bitShiftR below with floor(x/2) which didn't
# speed things up much...
#
scores <- rep(0, nrow(psl))
for(idx in split_into_batches(1:nrow(psl), batch.size=BATCH_SIZE)) {
scores[idx] <- .pslScore(psl[idx,], isMrna)
}
return( scores )
}
else {
return( .pslScore(psl, isMrna) )
}
}
#' @importFrom bitops bitShiftR
.pslScore <- function(psl, isMrna) {
# stop("I dropped bitops dependency, as it doesn't have namespace; thus bitops::bitShiftR no longer works.")
if( nrow(psl) > 1 ) {
scores <- rep(0, nrow(psl))
for(i in 1:nrow(psl)) {
scores[i] <- pslScore(psl[i,], isMrna)
# update.progress.meter2()
}
return( scores )
}
else {
sizeMul <- ifelse(isMrna, 1, 3)
return( sizeMul * (psl$"match" + ( bitShiftR(psl$"rep-match", 1) )) -
sizeMul * psl$"mis-match" - psl$"Q gapcount" - psl$"T gapcount" )
}
}
## C-code from Jim Kent.
## int pslScore(const struct psl *psl) {
## /* Return score for psl. */
## int sizeMul = pslIsProtein(psl) ? 3 : 1;
##
## # x >> 1 right shifts x by 1 bit (ie 0111 -> 0011; 7 -> 3)
## return sizeMul * (psl->match + ( psl->repMatch>>1)) -
## sizeMul * psl->misMatch - psl->qNumInsert - psl->tNumInsert;
## }
#' calculate the % identity of a psl alignment
#'
#' Many years later, this version looks more robust than pslPercentIdentity2 and pslPercentIdentity3
#'
#' @inheritParams pslScore
#' @export numeric vector
#' @author Mark Cowley, 12 April 2006
#' @export
#'
#' @examples
#' f <- file.path(system.file(package="blat"), "examples", "test.psl")
#' psl <- import.psl(f, score=TRUE)[1:5,]
#' pslPercentIdentity(psl, FALSE)
pslPercentIdentity <- function(psl, isMrna=!pslIsProtein(psl)) {
BATCH_SIZE <- 1000
if( nrow(psl) > BATCH_SIZE ) {
#
# see comment in pslScore re why this takes so damn long looping
# from 1:nrow(psl) when if you split into chunks of 1000 rows it
# runs many orders faster!?
#
scores <- rep(0, nrow(psl))
# # init.progress.meter2( nrow(psl) )
for(idx in split_into_batches(1:nrow(psl), batch.size=BATCH_SIZE)) {
scores[idx] <- pslPercentIdentity(psl[idx,], isMrna)
}
return( scores )
}
else if( nrow(psl) <= BATCH_SIZE && nrow(psl) > 1 ) {
scores <- rep(0, nrow(psl))
for(i in 1:nrow(psl)) {
scores[i] <- pslPercentIdentity(psl[i,], isMrna)
# # update.progress.meter2()
}
return( scores )
}
else {
return( round(100.0 - pslCalcMilliBad(psl, isMrna) * 0.1, 1) )
}
}
#' calculate the % identity of a psl alignment V2
#'
#' @inheritParams pslScore
#' @author Mark Cowley, 12 April 2006
#' @importFrom mjcbase split_into_batches
pslPercentIdentity2 <- function(psl, isMrna=!pslIsProtein(psl)) {
BATCH_SIZE <- 1000
if( nrow(psl) > BATCH_SIZE ) {
#
# see comment in pslScore re why this takes so damn long looping
# from 1:nrow(psl) when if you split into chunks of 1000 rows it
# runs many orders faster!?
#
scores <- rep(0, nrow(psl))
# # init.progress.meter2( nrow(psl) )
for(idx in split_into_batches(1:nrow(psl), batch.size=BATCH_SIZE)) {
for(i in 1:length(idx)) {
scores[idx[i]] <- round(100.0 - pslCalcMilliBad(psl[idx[i],], isMrna) * 0.1, 1)
# # update.progress.meter2()
}
}
return( scores )
}
}
#' calculate the % identity of a psl alignment V3
#'
#' should be slower than V2
#'
#' @inheritParams pslScore
#' @author Mark Cowley, 12 April 2006
#' @importFrom mjcbase split_into_batches
pslPercentIdentity3 <- function(psl, isMrna=!pslIsProtein(psl)) {
scores <- rep(0, nrow(psl))
# # init.progress.meter2( nrow(psl) )
for(i in 1:nrow(psl)) {
scores[i] <- round(100.0 - pslCalcMilliBad(psl[i,], isMrna) * 0.1, 1)
# # update.progress.meter2()
}
return( scores )
}
#' internal function used to determing a psl alignment score.
#'
#' @inheritParams pslScore
#' @return integer. largely undocumented
#' @author Mark Cowley, 12 April 2006
#'
#' @examples
#' ## Note the order within psl differs if import.psl(score=TRUE)
#' f <- file.path(system.file(package="blat"), "examples", "test.psl")
#' psl <- import.psl(f, score=FALSE)[1:5,]
#' pslCalcMilliBad(psl[3,])
#' psl[3,]
#'
#' psl <- import.psl(f, score=TRUE)[1:5,]
#' pslCalcMilliBad(psl[3,])
#' psl[3,]
#'
pslCalcMilliBad <- function(psl, isMrna=!pslIsProtein(psl)) {
if( nrow(psl) > 1 ) {
stop( "psl should have only 1 row\n" )
}
else {
sizeMul <- ifelse( isMrna, 1, 3 )
milliBad <- 0
qAliSize <- sizeMul * (psl$"Q end" - psl$"Q start") # Qend - Qstart
tAliSize <- psl$"T end" - psl$"T start" # Tend - Tstart
aliSize <- min(qAliSize, tAliSize)
if (aliSize <= 0)
return( 0 )
sizeDif <- qAliSize - tAliSize
if (sizeDif < 0) {
if (isMrna)
sizeDif <- 0
else
sizeDif <- -sizeDif
}
insertFactor <- psl$"Q gapcount" # QgapNum
if (!isMrna)
insertFactor <- insertFactor + psl$"T gapcount" # TgapNum
total <- (sizeMul * (psl$"match" + psl$"rep-match" + psl$"mis-match")) # match + rep.match + mis-match
if (total != 0)
milliBad <- (1000 * (psl$"mis-match"*sizeMul + insertFactor + round(3*log(1+sizeDif)))) / total # psl$"mis-match" is mis-match
return( milliBad )
}
}
## int pslCalcMilliBad(struct psl *psl, boolean isMrna) {
## /* Calculate badness in parts per thousand. */
## int sizeMul = pslIsProtein(psl) ? 3 : 1;
## int qAliSize, tAliSize, aliSize;
## int milliBad = 0;
## int sizeDif;
## int insertFactor;
## int total;
##
## qAliSize = sizeMul * (psl->qEnd - psl->qStart);
## tAliSize = psl->tEnd - psl->tStart;
## aliSize = min(qAliSize, tAliSize);
## if (aliSize <= 0)
## return 0;
## sizeDif = qAliSize - tAliSize;
## if (sizeDif < 0)
## {
## if (isMrna)
## sizeDif = 0;
## else
## sizeDif = -sizeDif;
## }
## insertFactor = psl->qNumInsert;
## if (!isMrna)
## insertFactor += psl->tNumInsert;
##
## total = (sizeMul * (psl->match + psl->repMatch + psl->misMatch));
## if (total != 0)
## milliBad = (1000 * (psl->misMatch*sizeMul + insertFactor +
## round(3*log(1+sizeDif)))) / total;
## return milliBad;
## }
#' is the psl alignment from a protein?
#'
#' fixed the bug on the - strand where psl$"T end" - (_ + 3*_)
#'
#' @inheritParams pslScore
#'
#' @return logical
#'
#' @author Mark Cowley, 12 April 2006
#' @export
#' @importFrom mjcbase uncsv
#'
#' @examples
#' f <- file.path(system.file(package="blat"), "examples", "test.psl")
#' psl <- import.psl(f, score=FALSE)[1:5,]
#' pslIsProtein(psl)
#'
#' psl <- import.psl(f, score=TRUE)[1:5,]
#' pslIsProtein(psl)
#'
pslIsProtein <- function(psl) {
if( nrow(psl) > 1 ) {
scores <- rep(F, nrow(psl))
for(i in 1:nrow(psl))
scores[i] <- pslIsProtein(psl[i,])
return( scores )
}
else {
## is psl a protein psl (are it's blockSizes and scores in protein space)
lastBlock <- psl$"blockcount"
tstarts <- as.numeric( uncsv(psl$"tStarts") )
blockSizes <- as.numeric( uncsv(psl$"blockSizes") )
return( ((psl$"strand" == '+' ) && (psl$"T end" == tstarts[lastBlock] + 3 * blockSizes[lastBlock]))
||
((psl$"strand" == '-') && (psl$"T start" == (psl$"T end"-(tstarts[lastBlock] + 3*blockSizes[lastBlock]))))
)
}
}
## boolean pslIsProtein(const struct psl *psl) {
## /* is psl a protein psl (are it's blockSizes and scores in protein space)
## */
## int lastBlock = psl->blockCount - 1;
##
## return ( ((psl->strand[1] == '+' ) && (psl->tEnd == psl->tStarts[lastBlock] + 3*psl->blockSizes[lastBlock]))
## ||
## ((psl->strand[1] == '-') && (psl->tStart == (psl->tSize-(psl->tStarts[lastBlock] + 3*psl->blockSizes[lastBlock]))))
## );
## }
|
fe2453285295e03a8bee436374af85a7feae6a4f | b2a62cdfeb43bab828e1243ccb825ca46022b81c | /run_analysis.R | f5827b370203cf60838cca0a61e8ac2e6b8c14e9 | [] | no_license | millette17/Coursera-Getting-and-Cleaning-Data | 0e5b15d019ad01ab1e65356afd7f13ae2ef45333 | f47f2a3f4d11ab3d14b0ed1c5351bb39f3d5297d | refs/heads/master | 2021-01-09T06:34:16.870525 | 2017-02-05T20:56:44 | 2017-02-05T20:56:44 | 81,011,589 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,592 | r | run_analysis.R | # Coursera Getting and Cleaning Data - Course Project
# 1. Downloading and unzipping data:
# Download File
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip"
download.file(fileUrl,destfile="./data/Dataset.zip")
# Unzip data
unzip(zipfile="./data/Dataset.zip",exdir="./data")
# 2. Merge training and test sets
# Reading trainings tables:
x_train <- read.table("./data/UCI HAR Dataset/train/X_train.txt")
y_train <- read.table("./data/UCI HAR Dataset/train/y_train.txt")
subject_train <- read.table("./data/UCI HAR Dataset/train/subject_train.txt")
# Reading testing tables:
x_test <- read.table("./data/UCI HAR Dataset/test/X_test.txt")
y_test <- read.table("./data/UCI HAR Dataset/test/y_test.txt")
subject_test <- read.table("./data/UCI HAR Dataset/test/subject_test.txt")
# Reading feature vector:
features <- read.table('./data/UCI HAR Dataset/features.txt')
# Reading activity labels:
activityLabels = read.table('./data/UCI HAR Dataset/activity_labels.txt')
# 3. Use descriptive activity names
# Assigning column names:
colnames(x_train) <- features[,2]
colnames(y_train) <-"activityId"
colnames(subject_train) <- "subjectId"
colnames(x_test) <- features[,2]
colnames(y_test) <- "activityId"
colnames(subject_test) <- "subjectId"
colnames(activityLabels) <- c('activityId','activityType')
# Merging data into one set:
mrg_train <- cbind(y_train, subject_train, x_train)
mrg_test <- cbind(y_test, subject_test, x_test)
setAllInOne <- rbind(mrg_train, mrg_test)
# Extract mean, std deviation
# Read column names:
colNames <- colnames(setAllInOne)
# Create vector for defining ID, mean and std deviation:
mean_std <- (grepl("activityId" , colNames) |
grepl("subjectId" , colNames) |
grepl("mean.." , colNames) |
grepl("std.." , colNames)
)
# Make Nesseary subset:
setForMeanStd <- setAllInOne[ , mean_std == TRUE]
# 4. Descriptive variable names
# Name the activities:
setWithActivityNames <- merge(setForMeanStd, activityLabels,
by='activityId',
all.x=TRUE)
# 5. Create a second tidy data set
# Make second tidy data set:
TidyData <- aggregate(. ~subjectId + activityId, setWithActivityNames, mean)
TidyData <- TidyData[order(TidyData$subjectId, TidyData$activityId),]
# Write to test file:
write.table(secTidySet, "TidyData.txt", row.name=FALSE)
|
8e0f46324064bbb043dbb5ae122d22d06ad48cb2 | b8e9dd065f9cd0290d3f6a488726c549b852fde1 | /code/fig_4_CA-balance.R | 1e277aeee30e0c817aa6ece46270a34928fa577f | [
"MIT"
] | permissive | graebnerc/disintegrating-europe | af97ee13d91e4db706a86daa5be322a9159e051c | cc46a46e8be2dc8401913e50266b4ecd05f299bd | refs/heads/master | 2020-05-29T09:30:34.121594 | 2019-11-11T21:59:38 | 2019-11-11T21:59:38 | 189,065,776 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,272 | r | fig_4_CA-balance.R | rm(list=ls())
library(countrycode)
library(data.table)
library(eurostat)
library(tidyverse)
library(grid)
library(gridExtra)
library(ggrepel)
library(ggpubr)
library(here)
library(reldist)
library(latex2exp)
if (!require("icaeDesign")){
devtools::install_github("graebnerc/icaeDesign")
}
source(here("code/setup_country_classification.R"))
plots_title_size <- 12
plots_axis_title_size <- 11
plots_axis_ticks_size <- 10
macro_data <- fread(here("data/macro_data_fig1-2_4.csv"))
start_year <- 1995
end_year <- 2018
# Plot function----------------------------------------------------------------
make_weighted_plot <- function(data_used, variable_name, y_lab, var_mean,
var_sd, time_span, breaks_x, label_x){
fig <- ggplot(data = data_used,
aes(x = year,
y = get(var_mean),
group=is.core,
colour=is.core)) +
geom_path() +
geom_ribbon(data=data_used,
aes(ymin=get(var_mean)-get(var_sd),
ymax=get(var_mean)+get(var_sd),
linetype=NA, fill=is.core),
alpha=0.25) +
scale_color_icae(palette = "main", reverse = F, guide=FALSE) +
scale_fill_icae(palette = "main", reverse = F) +
scale_y_continuous(name = y_lab,
labels = scales::percent_format(scale = 1,
accuracy = 1)
) +
scale_x_continuous(breaks=breaks_x,
expand = expand_scale(mult = c(0, 0),
add = c(0, 2))
) +
xlab(label_x) +
labs(
title = paste(variable_name),
x = "Years",
y = y_lab,
color = "Core"
) +
geom_vline(xintercept=2007,
color="#798ba0"
) +
coord_cartesian(
xlim = time_span,
expand = FALSE
) +
theme_icae()
return(fig)
}
#' Test uniqueness of data table
#'
#' Tests whether a data.table has unique rows.
#'
#' @param data_table A data frame of data table of which uniqueness should
#' be tested.
#' @param index_vars Vector of strings, which specify the columns of
#' data_table according to which uniqueness should be tested
#' (e.g. country and year).
#' @return TRUE if data_table is unique, FALSE and a warning if it is not.
test_uniqueness <- function(data_table, index_vars, print_pos=TRUE){
data_table <- data.table::as.data.table(data_table)
if (nrow(data_table)!=data.table::uniqueN(data_table, by = index_vars)){
warning(paste0("Rows in the data.table: ", nrow(data_table),
", rows in the unique data.table:",
data.table::uniqueN(data_table, by = index_vars)))
return(FALSE)
} else {
if (print_pos){
print(paste0("No duplicates in ", as.list(sys.call()[[2]])))
}
return(TRUE)
}
}
# Data preparation-------------------------------------------------------------
fig_4_data_weighted <- macro_data %>%
dplyr::select(dplyr::one_of(
"year", "iso3c", "population_ameco", "current_account_GDP_ameco")
) %>%
dplyr::rename(population=population_ameco) %>%
dplyr::mutate(is.core=ifelse(iso3c %in% countries[["core"]], "Core countries",
ifelse(iso3c %in% countries[["peri"]],
"Periphery countries", NA))
) %>%
dplyr::mutate(is.core=ifelse(iso3c=="FRA", "France", is.core)
) %>%
dplyr::filter(!is.na(is.core)
) %>%
dplyr::mutate(is.core=as.factor(is.core)
) %>%
dplyr::filter(year>=start_year & year <= end_year) %>%
dplyr::group_by(year, is.core) %>%
dplyr::mutate(population_group=sum(population)) %>%
dplyr::ungroup() %>%
dplyr::mutate(pop_rel_group=population / population_group) %>%
dplyr::group_by(year, is.core) %>%
dplyr::mutate(test_pop=sum(pop_rel_group)) %>%
dplyr::ungroup() %>%
dplyr::group_by(year, is.core) %>%
dplyr::summarise(
current_account_mean=weighted.mean(current_account_GDP_ameco,
pop_rel_group),
current_account_sd=sd(current_account_GDP_ameco*pop_rel_group)
) %>%
dplyr::ungroup()
head(fig_4_data_weighted)
# Figure 4 plot creation-------------------------------------------------------
x_axis_breaks <- c(1995, 2000, 2005, 2007, 2010, 2014, 2018)
fig_4_titles <- c(
"Current account balance to GDP (population-weighted)" = "current_account"
)
fig_4_CA <- make_weighted_plot(
fig_4_data_weighted,
names(fig_4_titles),
"Current account balance to GDP",
paste0(fig_4_titles, "_mean"),
paste0(fig_4_titles, "_sd"),
c(start_year, end_year),
x_axis_breaks, paste0("(", letters[1],")")) +
ylab("Current account balance to GDP") +
theme(axis.title.x = element_blank(),
axis.title.y = element_text(color="black", size=plots_axis_title_size),
plot.title = element_text(color="black", size=plots_title_size),
axis.text = element_text(color="black", size=plots_axis_ticks_size))
ggsave(plot = fig_4_CA,
filename = "output/fig_4_current-account.pdf",
width = 6, height = 4)
ggsave(plot = fig_4_CA,
filename = "output/fig_4_current-account.png",
width = 6, height = 4)
# Numbers for the text in first paragraph of 2.3-------------------------------
cat("while the population-weighted average of the current account in the core ",
"countries rose from about ",
round(filter(fig_4_data_weighted,
year==2000,
is.core=="Core countries")[["current_account_mean"]], 2),
"% in 2000 to more than ",
round(filter(fig_4_data_weighted,
year==2008,
is.core=="Core countries")[["current_account_mean"]], 1),
"% of GDP in 2008, the weighted average of current account deficits in the ",
"periphery more than doubled from ",
round(filter(fig_4_data_weighted,
year==2000,
is.core=="Periphery countries")[["current_account_mean"]], 2),
"% at the start of the Euro project to ",
round(filter(fig_4_data_weighted,
year==2008,
is.core=="Periphery countries")[["current_account_mean"]], 2),
"% before the financial crisis.", sep = "")
|
2b02129b7dc2cb240b742dcf62827d8dd4689125 | 06d9afe4e9666407ff607b142649d4c6e944d674 | /man/ez-package.Rd | 93e1f4ff304382752ab52039e1b89d25991e3ecf | [] | no_license | cran/ez | fe4ae993c2ed1042d6f84c64e368970c502a5bff | 1d7a35d30f31b1671e7f6548b15864ddfe61c5ef | refs/heads/master | 2021-07-10T23:03:03.489960 | 2016-11-02T18:17:31 | 2016-11-02T18:17:31 | 17,695,925 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,337 | rd | ez-package.Rd | \name{ez-package}
\alias{ez}
\docType{package}
\title{Easy analysis and visualization of factorial experiments}
\description{
This package facilitates easy analysis of factorial experiments, including purely within-Ss designs (a.k.a. \dQuote{repeated measures}), purely between-Ss designs, and mixed within-and-between-Ss designs. The functions in this package aim to provide simple, intuitive and consistent specification of data analysis and visualization. Visualization functions also include design visualization for pre-analysis data auditing, and correlation matrix visualization. Finally, this package includes functions for non-parametric analysis, including permutation tests and bootstrap resampling. The bootstrap function obtains predictions either by cell means or by more advanced/powerful mixed effects models, yielding predictions and confidence intervals that may be easily visualized at any level of the experiment's design.
}
\details{
\tabular{ll}{
Package: \tab ez\cr
Type: \tab Package\cr
Version: \tab 4.4-0\cr
Date: \tab 2016-11-01\cr
License: \tab GPL-3\cr
LazyLoad: \tab yes\cr
}
This package contains several useful functions:
\itemize{
\item{\code{\link{ezANOVA}}}{ Provides simple interface to ANOVA, including assumption checks.}
\item{\code{\link{ezBoot}}}{ Computes bootstrap resampled cell means or lmer predictions}
\item{\code{\link{ezCor}}}{ Function to plot a correlation matrix with scatterplots, linear fits, and univariate density plots}
\item{\code{\link{ezDesign}}}{ Function to plot a visual representation of the balance of data given a specified experimental design. Useful for diagnosing missing data issues.}
\item{\code{\link{ezMixed}}}{ Provides assessment of fixed effects in a mixed effects modelling context.}
\item{\code{\link{ezPerm}}}{ Provides simple interface to the Permutation test.}
\item{\code{\link{ezPlot}}}{ Uses the \code{ggplot2} graphing package to generate plots for any given user-requested effect, by default producing error bars that facilitate visual post-hoc multiple comparisons.}
\item{\code{\link{ezPlot2}}}{ When supplied the results from a call to \code{\link{ezPredict}} or \code{\link{ezBoot}}, plots predictions with confidence intervals.}
\item{\code{\link{ezPrecis}}}{ Provides a summary of a given data frame.}
\item{\code{\link{ezPredict}}}{ Computes predicted values from the fixed effects of a mixed effects model.}
\item{\code{\link{ezResample}}}{ Resamples data, useful when bootstrapping.}
\item{\code{\link{ezStats}}}{ Provides between-Ss descriptive statistics for any given user-requested effect.}\cr\cr\cr
This package also contains two data sets:
\item{\code{\link{ANT}}}{ Simulated data from the Attention Network Test}
\item{\code{\link{ANT2}}}{ Messy version of the ANT data set}
}
}
\author{
Michael A. Lawrence \email{mike.lwrnc@gmail.com}\cr
Visit the \code{ez} development site at \url{http://github.com/mike-lawrence/ez}\cr
for the bug/issue tracker and the link to the mailing list.
}
\seealso{
\code{\link{ANT}}, \code{\link{ANT2}}, \code{\link{ezANOVA}}, \code{\link{ezBoot}}, \code{\link{ezCor}}, \code{\link{ezDesign}}, \code{\link{ezMixed}}, \code{\link{ezPerm}}, \code{\link{ezPlot}}, \code{\link{ezPlot2}}, \code{\link{ezPrecis}}, \code{\link{ezPredict}}, \code{\link{ezResample}}, \code{\link{ezStats}}
}
\keyword{ package }
|
8baf8d773a9f223c5a6ef7442372e5541364360f | 1d6659cebb291872203692da20bcea946c31d0fb | /scripts/ui.R | 60294981b6246a829f3567dbb5d1b54262351f91 | [] | no_license | Talonee/Kickstarter | 704abeac9e3525872164936611c9353967832521 | 556662aef996bcd584d7a9dcb64957d20bc50a1b | refs/heads/master | 2020-06-14T22:33:47.671347 | 2019-12-27T07:03:34 | 2019-12-27T07:03:34 | 195,145,507 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,681 | r | ui.R | library(shiny)
library(plotly)
library(dplyr)
library(shinythemes)
# reading in the data and munging it as required
data <- read.csv("data/ks-data.csv", stringsAsFactors = F)
data$date <- sapply(strsplit(data$launched, " "), head, 1)
cleaned <- data %>%
mutate(date = as.Date(date, format = "%Y-%m-%d"))
cleaned$year <- as.numeric(format(cleaned$date, "%Y"))
latest_data <- cleaned %>% filter(year != 1970)
# a function that displays a select box
choose_maincateg <- function(input_name) {
selectInput(input_name,
label = "Choose the Main Category",
choices = unique(latest_data$main_category)
)
}
# Define a UI using a `navbarPage()` layout with the required
# tabs and content:
ui <- navbarPage(
theme = shinytheme("cosmo"),
"Kickstarter Success",
tabPanel(
"Overview",
tags$h2("What Makes a Successful Kickstarter?"),
tags$em("Ruthvik, Cynthia, & Talon"),
p(),
p(),
tags$p("Crowdfunding: a relatively new concept, but progressively more
popular, and powerful. GoFundMe is used for everything from
plastic surgery to funeral arrangements, Patreon pays the bills of
numerous
online creators, and Facebook charity fundraisers are onmipresent.
Perhaps most influential of all is Kickstarter, which -- with the
success of
electronics like the Pebble, and video games like Pillars of
Eternity --
is an increasingly legitimate avenue for small businesses to reach
large audiences."),
p(),
tags$p("But: what", tags$em("makes"), "a Kickstarter successful?
No doubt, this is an important question for startups &
other small businesses whose dreams ride on an algorithm
and the generosity of an anonymous audience. Is it possible
to ensure -- or at least increase -- the likelihood of
success? Let's find out."),
p(),
tags$h3("Our Data"),
tags$p(
"We are using a very large dataset -- more than 300,000 rows -- of
Kickstarter data, published on, ", tags$a(
href = "google.com",
"Kaggle"
),
"by", tags$a(href = "https://www.kaggle.com/kemical", "Mikael Mouille"),
"with assistance from Anton Savchenko. While the data is technically
through 2018, it was collected early in the year, so the last
complete year of data is 2017. Using this dataset, we hope to
answer the following."
),
tags$h3("Questions:"),
tags$li("What types -- categories -- of Kickstarter are most popular in
each year? Does a category's popularity by number of projects
effect its funding?"),
tags$li("Does the country from which a Kickstarter is launched
significantly affect its project success rate
(as measured by funding)?"),
tags$li("Are the number of backers or the amount pledged affected by the
time duration of the project?")
),
tabPanel(
"Category",
titlePanel(
"Kickstarter by Category"
),
sidebarLayout(
sidebarPanel(
# interactable goes here
selectInput(
inputId = "first_year",
label = "First year:",
choices = c("2012", "2013", "2014", "2015", "2016", "2017")
),
selectInput(
inputId = "second_year",
label = "Second year:",
choices = c("2012", "2013", "2014", "2015", "2016", "2017"),
selected = "2017"
),
radioButtons("med_or_mean",
"% Funded:",
c("Mean", "Median"),
selected = "Median"
)
),
mainPanel(
# viz goes here
plotlyOutput("popularity"),
p(),
p(),
plotlyOutput("success"),
p(),
p(),
p("Kickstarter projects are organized into 15 main categories --
things like film, games, and food. We are interested in the
popularity of these categories, and their influence
on funding status. We wonder: do projects which see high
amounts of success influence the popularity of projects of that
category on the platform? Do the most popular types of projects
also see the most success, or the most reliable success?"),
p(),
p(
"To this end, we have designed two bar graphs. Both separate their
data by year; we want to know if certain categories on the
Kickstarter
platform have increased in popularity over time. The first shows
the total",
tags$em("count"), "of projects in that category in that year;
the second shows the mean
or median funding % of projects in that category in that year."
),
p(),
p(
"Our graphs show some interesting results! As anyone familiar with
Kickstarter
knows, some projects are funded tens of thousands of times -- and
these are
typically the most well-known and publicized. They also",
tags$em("significantly"), "skew the mean, or average,
of our calculations; for example, in 2017, Music-type projects
were funded
an", tags$em("average"), "of 779%, but only 27% by median. It is
very common to see extremely high averages
like this; however, medians cap out at around 100%."
),
p(),
p(
"Interestingly, more popular (by count) categories often have lower
median funding percentages. This makes sense if we think about it:
if there are more projects, it's likely that funding for these
projects is more competitive; however, this does suggest that
creators are not thinking about market saturation when they
announce their products. Some of the", tags$em("least"),
"popular categories -- Theater, Comics, and Dance -- have the
highest median funding percentages, hovering between 50% and
100%; by comparison, Technology-type projects
achieve under 5% median funding in the majority of years --
and achieved a high of 30% in 2012,
when Technology was much less popular on Kickstarter."
),
p(),
p(
"There are many reasons more popular categories like Film and
Technology see less success. Market saturation is a concern, but
also, these types of projects are usually much more expensive
than the typical Comic-type project
(", textOutput("technology", inline = T), "; ",
textOutput("comics", inline = T), "). Creators would be prudent to
note this before launching an expensive,
high-risk product in a saturated category."
)
)
)
),
tabPanel(
"Country",
titlePanel(
"Kickstarter by Country"
),
sidebarLayout(
sidebarPanel(
style = "position:fixed;width:30%;",
checkboxInput(
"usa",
label = strong("Include the United States of America*"),
value = TRUE
),
radioButtons(
"sum",
label = strong("Sum:"),
choices = list(
"Number of backers" = "backers",
"Amount pledged (in USD)" = "pledged",
"Goal amount (in USD)" = "goal",
"Total projects" = "tote"
),
selected = "backers"
),
radioButtons(
"mean",
label = strong("Mean:"),
choices = list(
"Number of backers" = "backers",
"Amount pledged (in USD)" = "pledged",
"Goal amount (in USD)" = "goal"
),
selected = "backers"
),
tags$em("*In order to make a more compelling argument,
the United States of America has the option to be
disqualified as its data values are so large that
other countries often become obscure and difficult to
identify, which makes the statistics less useful.")
),
mainPanel(
h2("Kickstarter Sum Statistic between Countries"),
plotlyOutput("sumplot"),
p(strong("Include USA: "), "The US dominates all other countries by
an astronomical margin in terms of total backers, amount pledged,
and goal amount needed for the project."),
p(strong("Non USA: "), "Other first countries such as UK, Canada and
Australia also hold the highest spots across all source for
total funding."),
p(strong("Hypothesis: "), "First world countries has the ability to
invest a lot more in funding, which translates to higher
success rates for projects."),
p(),
p(),
h2("Kickstarter Mean Statistic between Countries"),
plotlyOutput("meanplot"),
p(strong("Hypothesis: "), "Countries with higher average support
sources means their projects are generally more desired,
which means there are higher success rates for projects."),
p(),
p(),
h2("Kickstarter Funding Ratio"),
plotlyOutput("percent"),
p("Hong Kong, Austria, and Singapore are the top contenders for having
the highest funding relatives to their projected goals"),
p(strong("Hypothesis: "), "Countries with higher funding ratio means
there are more projects with more than enough resources needed to
accomplish their goals, which translates to higher success rates."),
h2("To Answer the Question?"),
h4("Does the country from which a Kickstarter is launched significantly
affect its project success rate (as measured by funding)?"),
plotlyOutput("highest"),
p(
"The data presented are interesting to say the least, different
countries excel in different areas, they all seem to have an equal
potential source of what, thereotically, would produce the highest
rates of successful projects."
),
p(h3("Conclusion: ")),
p(
"The answer varies. Data for mean statistic proves no real arguments
as it fluctuates greatly across countries. In the case of Singapore
and Hong Kong, countries with higher fulfillment rate in their funding
ratio have high success rates. In the case of the US and UK, countries
with ",
tags$em("more"), " resources (higher number of backers, amount
pledged relative to goal, number of overall planned projects, etc.)
have high success rates. In the case of Japan, countries with fewer
projects to begin with have higher chance to boost their overall
success rate. No single country dominates across all data enough to
determine a definite answer. So to reitirate, the country origin do
not determine a project's success rate, there are many other factors
to consider thataffect the success rate of a project: government
funding, advertisement,country economy, backers, audience interest,
etc."
)
)
)
),
# ruthvik's viz
tabPanel(
"3D Plot",
titlePanel("Determining the Pledged Amount"),
sidebarLayout(
sidebarPanel(
choose_maincateg("main_categ2"),
strong("Overall Interprepretation"),
p("Looking at the relations between the three variables for different
categories confirms us that there is no relation between the Number
of Backers or the Pledged Amount and the Time taken for project
completion. So, the backers are really interested in the outcome of
the project rather than the early completion of them. Hence the
students need not be thinking only about quick projects which gives
them better scope to apply their knowledge and expertise."),
p("But it is interesting to note that in every category, Number
of Backers and the Pledged Amount (USD) are positively correlated. So,
attracting more backers can provide students with more funds. This might
also mean that having low number of backers brings only little funds.
But that is not the cvase with every project as there are projects with
low number of backers but high investments. So we need to explore more
variables using better statistical analysis techniques to get more
insight into the relation between Number of Backers and Pledged Amount
(USD)")
),
mainPanel(
strong("Description"),
p("This plot simultaneously explores the relation between the three
variables: Number of Backers, Pledged Amount (USD) and Time Taken for
the project completion. The primary goal is to dteermine if Time
affects the other two variables in any way."),
plotlyOutput("threed"),
strong("Summary"),
textOutput("summary")
)
)
),
tabPanel(
"Data Insights",
sidebarLayout(
mainPanel(
style = "overflow-y:scroll; max-height: 1000px",
h4("Here is a convenient summary table that allows you to
filter individual projects. It helps the user to look at
specific important things like projects with zero backers
or projects with lowest pledged amount"),
h4(strong("Kickstarter Projects (2009-2018)")),
tableOutput("table")
),
sidebarPanel(
choose_maincateg("main_categ1"),
sliderInput("year",
label = "Years of Interest",
value = range(latest_data$year),
min = range(latest_data$year)[1],
max = range(latest_data$year)[2]
),
sliderInput("backers",
label = "Number of Backers",
value = range(latest_data$backers),
min = range(latest_data$backers)[1],
max = range(latest_data$backers)[2]
),
sliderInput("pledged",
label = "Amount Pledged",
value = range(latest_data$usd_pledged_real),
min = range(latest_data$usd_pledged_real)[1],
max = range(latest_data$usd_pledged_real)[2]
),
sliderInput("goal",
label = "Goal Amount",
value = range(latest_data$usd_goal_real),
min = range(latest_data$usd_goal_real)[1],
max = range(latest_data$usd_goal_real)[2]
)
)
)
)
)
|
3951879bf2221ade8b19c8582e974ffb07a614ed | fb6d7b0c143c21402400b1c7d79926ab93ec7b46 | /presentation/regressionFunctions.R | 7bd36bd5f4bd00a55c40d2816ac036b550e8f330 | [] | no_license | jonasnick/regression | ecd659591312212565ebf1a2402bfeb06d756264 | cd782af2ba527eabc30a6badbe069c6b7c67dc5b | refs/heads/master | 2018-12-29T19:19:07.211923 | 2013-02-18T15:37:35 | 2013-02-18T15:37:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,145 | r | regressionFunctions.R | library(vcd)
library(lattice)
library(ggplot2)
g <- function(z) {
return(1/(1+exp(-z)))
}
log1 <- function(x) {
return(-log(x))
}
log2 <- function(x) {
return(-log(1 - x))
}
polynomial <- function(x) {
return(0.5 + x + 2*x^2)
}
J <- function(X, y, theta) {
#X <- matrix(c(c(1, 2, 3), c(1,2,3), c(1,2,3)), 3)
#theta <- c(1, 2, 3)
#y <- c(10, 20, 30)
m <- length(y)
return(sum(((X%*%theta)-y)^2)/(2*m))
}
generateDataset <- function(n, xmin, xmax) {
set.seed(101)
X <- unlist(runif(n, min=xmin,max=xmax))
Y <- rnorm(n, 50 + 0.1*X + 30* X^(-0.2),1)
return(data.frame(X = X, Y = Y))
}
threshold <- function(x) {
if(is.na(x)) {return(NA)}
if(x>=0.5){return(1)}
else{return(0)}
}
#returns vector with values from the first argument, while values from the second are taken when first is NA
takeFrom <- function (x, y) {
tmp = 1:length(x)
if(length(x) != length(y)) {
print("Dimensions unequal")
return(NA)
}
for(i in 1:length(x)) {
val = x[i]
if(is.na(val)) {
tmp[i] = y[i]
} else {
tmp[i] = x[i]
}
}
return(tmp)
}
dontDo <- function() {
titanicTrainData <- read.csv("titanicTrain.csv")
titanicTestData <- read.csv("titanicTest.csv")
head(titanicTestData)
titanicTrainData$pclass <- as.factor(titanicTrainData$pclass)
titanicTrainData$embarked <- as.factor(titanicTrainData$embarked)
titanicTrainData$sex <- as.factor(titanicTrainData$sex)
titanicTestData$pclass <- as.factor(titanicTestData$pclass)
titanicTestData$sex <- as.factor(titanicTestData$sex)
titanicTestData$embarked <- as.factor(titanicTestData$embarked)
titanicTrainTest <- titanicTrainData
titanicTrainData$survived <- as.factor(titanicTrainData$survived)
titanicTrainTest <- na.omit(titanicTrainTest)
testModel <- function() {
titanicTrainPrediction = sapply((predict(model, type="response", newdata=titanicTrainTest)),threshold)
titanicTrainTest$survived <- as.numeric(titanicTrainTest$survived)
difference = titanicTrainTest$survived - titanicTrainPrediction
correctFraction = (1-sum(abs(difference))/length(difference))
return(correctFraction)
}
titanicTrainData <- titanicTrainData[,!names(titanicTrainData) %in% c("name")]
par(cex=1.6)
plot(survived~age,data=titanicTrainData)
colnames(titanicTrainData)
mosaic(survived~sex, data=titanicTrainData,shade = TRUE,
labeling_args=list(gp_labels=gpar(fontsize=16),
gp_varnames=gpar(fontsize=18)),
legend_args=list(fontsize=16),
margins=unit(4, "lines"),
legend_width=unit(7, "lines"))
mosaic(titanicTrainData$survived~titanicTrainData$sex)
table(titanicTrainData$survived,titanicTrainData$sex)
plot(titanicTrainData$survived~titanicTrainData$pclass)
mosaic(titanicTrainData$survived~titanicTrainData$pclass, shade = TRUE,
labeling_args=list(gp_labels=gpar(fontsize=16),
gp_varnames=gpar(fontsize=18)),
legend_args=list(fontsize=16),
margins=unit(4, "lines"),
legend_width=unit(7, "lines"))
table(titanicTrainData$survived,titanicTrainData$pclass)
plot(titanicTrainData$survived~titanicTrainData$sibsp)
mosaic(titanicTrainData$survived~titanicTrainData$sibsp, shade=TRUE)
plot(titanicTrainData$survived~titanicTrainData$fare)
plot(titanicTrainData$survived~titanicTrainData$embarked)
mosaic(titanicTrainData$survived~titanicTrainData$embarked, shade=TRUE)
plot(titanicTrainData$fare~titanicTrainData$embarked)
plot(titanicTrainData$fare~titanicTrainData$pclass)
plot(density(na.omit(titanicTrainData$age)))
model <- glm(survived~age + sex + I(pclass==1) + I(pclass==2),data=titanicTrainData, family=binomial("logit"))
summary(model)
testModel()
drop1(model, test="Chisq")
model <- glm(survived~I(age^3) + I(age) + sex + I(pclass==1) + I(pclass==2),data=titanicTrainData, family=binomial("logit"))
summary(model)
testModel()
model <- glm(survived~I(log(age)) + sex + I(pclass==1) + I(pclass==2) ,data=titanicTrainData, family=binomial("logit"))
summary(model)
testModel()
titanicTrainData[is.na(titanicTrainData)] <- 0
model <- glm(survived~I(log(age)) + sex + fare + pclass + sibsp,data=titanicTrainData, family=binomial("logit"))
testModel()
model <- glm(survived~I(log(age)) + sex + fare * pclass + sibsp,data=titanicTrainData, family=binomial("logit"))
model <- glm(survived~I(log(age)) + sex + pclass + sibsp,data=titanicTrainData, family=binomial("logit"))
model <- glm(survived~I(log(age)) + sex + sex:pclass + sibsp,data=titanicTrainData, family=binomial("logit"))
testModel()
print("Testdaten: 0.751% korrekte Vorhersagen")
summary(model)
#verbesserung durch konditionierung an einer variable
library(lmer4)
#titanicTestData[is.na(titanicTestData)] <- 0
#bad choice
#titanicTestData <- na.omit(titanicTestData)
model <- glm(survived ~ age + sex + I(pclass==1)
+ I(pclass==2),data=titanicTrainData,
family=binomial("logit"))
modelDropAge <- glm(survived ~sex + I(pclass==1)
+ I(pclass==2),data=titanicTrainData,
family=binomial("logit"))
testModel()
model <- glm(survived ~ I(log(age)) + pclass:sex + sibsp,data=titanicTrainData,
family=binomial("logit"))
modelDropAge <- glm(survived~ sex + pclass:sex + sibsp,data=titanicTrainData, family=binomial("logit"))
titanicTestData$dropAgeSurvived = sapply(predict(modelDropAge, type="response", newdata=titanicTestData,na.action=na.exclude),threshold)
testModel()
summary(model)
titanicTrainData$family <- sapply(titanicTrainData$family, function(x){if(x==0) {return(FALSE)} else {return(TRUE)}})
titanicTrainData$family <- as.factor(titanicTrainData$family)
plot(survived ~ family, data = titanicTrainData)
mosaic(survived~ sex + pclass, data=titanicTrainData,)
titanicTestPrediction = sapply(predict(model, type="response", newdata=titanicTestData,na.action=na.pass),threshold)
titanicTestPrediction <- takeFrom(titanicTestPrediction, titanicTestData$dropAgeSurvived)
write.csv(titanicTestPrediction, "~/Dropbox/UNISem5/MaschinellesLernen/regression/titanicTestPrediction.csv", row.names=FALSE)
summary(model)
anova(model, test="Chisq")
}
|
d15a209c277da22710842b70b418505996acad63 | 5f33cd50aeadf12308856f26b59aad71c30b64f0 | /exploreData.R | 8ca07ab7de14271129dcd7d4d3f36616e6930143 | [] | no_license | petershahlevlnow/DS450_DetroitBlight | f5f96f00085cc3d0236c67236ec6a5d29f872a34 | d52f8f7b1c992637ba9119ab34f9deeb0d35d368 | refs/heads/master | 2021-08-23T14:06:47.993240 | 2017-12-05T02:56:01 | 2017-12-05T02:56:01 | 110,785,678 | 0 | 0 | null | 2017-11-15T23:04:12 | 2017-11-15T05:00:21 | R | UTF-8 | R | false | false | 3,819 | r | exploreData.R | # explore data
# 1. plot frequencies of incidents on a google map
source("mutationsData.R")
# get google map
library(ggplot2)
library(ggmap)
library(reshape2)
library(gridExtra)
# citation:
# D. Kahle and H. Wickham. ggmap: Spatial Visualization with ggplot2. The R Journal, 5(1), 144-161. URL
# http://journal.r-project.org/archive/2013-1/kahle-wickham.pdf
centers <- lapply(detAll[, c("lat", "long")], median)
detMap <- get_googlemap(center = c(lon = centers$long, lat = centers$lat),
size = c(640, 640),
scale = 1,
zoom = 11,
maptype = "roadmap")
# melt frequencies for plotting
melt.detAll <- melt(detAll[, c("lat", "long", "nCrime", "n311", "nDemo", "nBlight")], id = c("lat", "long"))
# plot frequencies on map
detMap.freq <- ggmap(detMap) + geom_point(data = sample_n(melt.detAll %>% group_by(variable), 1000),
aes(x = long, y = lat, color = variable, size = value), alpha = 0.3)
detMap.freq
# plot contour density on map
detMap.den.crime <- ggmap(detMap) + stat_density2d(data = melt.detAll %>% filter(variable == "nCrime"),
aes(x = long, y = lat), bins = 30)
# plot raster
# set color ramp
colfunc <- colorRampPalette(c("white", "lightblue", "green", "yellow", "red"))
detMap.den.crime <- ggmap(detMap) + stat_density2d(data = sample_n(melt.detAll %>% filter(variable == "nCrime"), 5000),
aes(x = long, y = lat, fill = ..density..),
geom = "tile", contour = FALSE, alpha = 0.3) +
scale_fill_gradientn(colours=colfunc(400)) + ggtitle("Criminal Incidents 2016")
detMap.den.311 <- ggmap(detMap) + stat_density2d(data = sample_n(melt.detAll %>% filter(variable == "n311"), 5000),
aes(x = long, y = lat, fill = ..density..),
geom = "tile", contour = FALSE, alpha = 0.3) +
scale_fill_gradientn(colours=colfunc(400)) + ggtitle("311 Incidents 2016")
detMap.den.demo <- ggmap(detMap) + stat_density2d(data = sample_n(melt.detAll %>% filter(variable == "nDemo"), 5000),
aes(x = long, y = lat, fill = ..density..),
geom = "tile", contour = FALSE, alpha = 0.3) +
scale_fill_gradientn(colours=colfunc(400)) + ggtitle("Demolitions 2016")
detMap.den.blight <- ggmap(detMap) + stat_density2d(data = sample_n(melt.detAll %>% filter(variable == "nBlight"), 5000),
aes(x = long, y = lat, fill = ..density..),
geom = "tile", contour = FALSE, alpha = 0.3) +
scale_fill_gradientn(colours=colfunc(400)) + ggtitle("Blight Violations YTD 2017")
# detMap.den.crime
# detMap.den.311
# detMap.den.demo
# detMap.den.blight
grid.arrange(detMap.den.crime, detMap.den.311, detMap.den.demo, detMap.den.blight, nrow = 2, ncol = 2)
# correlation plot
# is there a correlation between frequency of crime, 311, demos, and blight?
# There doesn't seem to be much correlation.
library(corrplot)
cors <- cor(detAll %>% select(nCrime, n311, nDemo, nBlight), method = 'pearson')
source('cormtest.R', echo=TRUE)
corm <- cor.mtest.2(cors)
# add cor.mtest for p.value matrix.... look in help
corrplot(cors, p.mat = corm[[1]], insig = "blank", method = "color",
addCoef.col="grey",
order = "AOE", tl.cex = 0.8,
cl.cex = 1/par("cex"), addCoefasPercent = FALSE)
|
6ffe0b7e2c39067e79497a56d9d0f2d800555e58 | c2ccb0dd4b55d054784f94d0ee65423e6e2be91e | /man/handle_error_response.Rd | 3220916eb58a73bc14f8e58bc0c4e6f7f4269f26 | [
"MIT"
] | permissive | skvrnami/uwebasr2 | 630e389cebc2e62bca0cc7f8687d2f4e2c1ea78d | fdfed5c89ba33b35016b917cfbf6bd39db259bcb | refs/heads/main | 2023-08-15T00:18:57.557604 | 2021-09-22T11:33:49 | 2021-09-22T11:33:49 | 403,677,917 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 379 | rd | handle_error_response.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/funs.R
\name{handle_error_response}
\alias{handle_error_response}
\title{Handle response other than 200}
\usage{
handle_error_response(response)
}
\arguments{
\item{response}{Response from the server}
}
\value{
No return value, called for side effects
}
\description{
Handle response other than 200
}
|
e11de23ea0d37216c8e0812c7300529a9a7d0a13 | fd29d3a4c4edc0ebfe76013d61ca18e7cd7d3fde | /R/packagename.R | dc9fe9eba79a1ccd5ceae00395242ba95b6c1284 | [] | no_license | rui0027/lab03group5 | 48e977f20e9d1fb3d497c3cc34628c91f811ab12 | 6f534b669b52999b882128898854f2b5193702c2 | refs/heads/main | 2023-08-28T02:41:17.763887 | 2021-10-06T15:48:30 | 2021-10-06T15:48:30 | 405,479,082 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,145 | r | packagename.R | #'lab03:a package to implement Dijkstra algorithm and Euclidean algorithm.
#'
#'The package provides two function:dijkstra and euclidean
#'
#'@section The dijkstra function: This function uses dijkstra algorithms to find
#'the shortest distance between two nodes in a graph. Starting from the
#'init_node,the algorithm creates two sets for invisited nodes and visited
#'nodes.To every node,set zero to init_node and infinity to other nodes. For the
#'current node,calculate the distance to its neighbors and compare with the
#'known distances,assign the shortest.Once after finding the shortest distance
#'to a node,divide the node into visited set and set a new init_node.Reverse the
#'step until all nodes are visited. The euclidean function:This function is a
#'loop algorithm to find the greatest common divisor (GCD) of two nonzero number
#'x and y using euclidean algorithm. If the lager number devided by the smaller
#'number has a nonzero remainder,replace the lager number with remainder.Reverse
#'the step until the remainder is zero, then the denominator is GCD of the
#'original two numbers.
#'
#'@docType package
#'@name lab03
NULL
|
9210addfd0ca1fb9c139df1d09384325f7daf756 | fb7655e2bcfc5ee8c228eed0684e7516eee432f8 | /03_analysis/paper_figures/Science_Brief_Figure.R | 1f13fbced5e829068490c528202bc3d9f88b75c0 | [] | no_license | galsk223/tribalclimate | 738e7ea2e4c74b142d84f3e00f4eb7575e8f89dd | bced46be1953ae06b54a1b7a9bda48523b98fff8 | refs/heads/main | 2023-07-24T07:12:27.477008 | 2021-08-30T20:16:10 | 2021-08-30T20:16:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,275 | r | Science_Brief_Figure.R |
library(tidyverse)
library(cowplot)
# CI's for Present-Day ----------------------------------------------------
heat_cov <- read_csv("heat_var.csv")
whp_cov <- read_csv("whp_var.csv")
precip_cov <- read_csv("precip_var.csv")
og_cov <- read_csv("og_var.csv")
# Function for SE at T2
se_t2 <- function(cov_matrix) {
sqrt(cov_matrix[1,1] + cov_matrix[2,2] + 2*cov_matrix[1,2])
}
# Confirm function works as it should - test on heat covariance matrix
sqrt(heat_cov[1,1] + heat_cov[2,2] - 2*heat_cov[1,2]) # hard code
se_t2(heat_cov) # function code
# SE's at T2
heat_se <- se_t2(heat_cov)
whp_se <- se_t2(whp_cov)
precip_se <- se_t2(precip_cov)
og_se <- se_t2(og_cov)
# CI's at T2
6.96+heat_se*1.96 # heat upper
6.96-heat_se*1.96 # heat lower
698.26+precip_se*1.96 # precip upper
698.26-precip_se*1.96 # precip lower
.17+og_se*1.96 # og upper
.17-og_se*1.96 # og lower
1.77+whp_se*1.96 # whp upper
1.77-whp_se*1.96 # whp lower
# Heat Days ---------------------------------------------------------------
heat_days <- tibble(
heat = c(1.15, 6.96),
proportion_low = c(.89, 4.48),
proportion_upp = c(1.42, 9.45),
time = c("Historical", "Present Day")
)
my_pal <- c("#994F00", "#006CD1") #cc9966
my_pal <- c("#cc9966", "#006CD1")
heat_plot <- ggplot(heat_days, aes(time, heat, group = 1)) + # label = heat
geom_line(color = "black", linetype = "dashed") +
geom_point(stat = "identity", size = 4, color = my_pal) + # alpha = .7
geom_errorbar(aes(ymin = proportion_low, ymax = proportion_upp),
width = .05, color = my_pal) +
theme_bw() +
ylab("Days per year in excess of 100 degrees F") + xlab("") +
scale_y_continuous(breaks=seq(0, 10, 2), limits=c(0, 10)) +
#geom_text(nudge_x = 0.2) +
theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.title = element_text(size = 10),
# axis.text.x = element_text(size = 10),
axis.text.y = element_text(margin = margin(t = 0, r = 0, b = 0, l = 13)))
# Precipitation -----------------------------------------------------------
# Measured in millimeters
precip <- tibble(
precip = c(905.50, 698.26),
proportion_low = c(835.61, 636.30),
proportion_upp = c(975.40, 760.22),
time = c("Historical", "Present Day")
)
precip_plot <- ggplot(precip, aes(time, precip, group = 1)) + # label = precip
geom_line(color = "black", linetype = "dashed") +
geom_point(stat = "identity", size = 4, color = my_pal) +
geom_errorbar(aes(ymin = proportion_low, ymax = proportion_upp),
width = .05, color = my_pal) +
theme_bw() +
ylab("Mean annual precipitation (millimeters)") + xlab("") +
scale_y_continuous(breaks=seq(500, 1000, 100), limits=c(500, 1000)) +
#geom_text(nudge_x = 0.2) +
theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.title = element_text(size = 10),
#axis.text.x = element_text(size = 10),
axis.text.y = element_text(margin = margin(t = 0, r = 0, b = 0, l = 13)))
# Mineral Potential -------------------------------------------------------
# Proportion
og <- tibble(
og = c(.21, .17),
proportion_low = c(.17, .13),
proportion_upp = c(.25, .21),
time = c("Historical", "Present Day")
)
og_plot <- ggplot(og, aes(time, og, group = 1)) + #label = og
geom_line(color = "black", linetype = "dashed") +
geom_point(stat = "identity", size = 4, color = my_pal) +
geom_errorbar(aes(ymin = proportion_low, ymax = proportion_upp),
width = .05, color = my_pal) +
theme_bw() +
ylab("Portion land with subsurface oil and gas") + xlab("") +
scale_y_continuous(breaks=seq(0, 1, .1), limits=c(0, .3)) +
# geom_text(nudge_x = 0.2) +
theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.title = element_text(size = 10),
#axis.text.x = element_text(size = 10),
axis.text.y = element_text(margin = margin(t = 0, r = 0, b = 0, l = 13)))
# Wildfire -----------------------------------------------------------
wildfire <- tibble(
wildfire = c(1.36, 1.77),
proportion_low = c(1.30, 1.63),
proportion_upp = c(1.42, 1.91),
time = c("Historical", "Present Day")
)
wildfire_plot <- ggplot(wildfire, aes(time, wildfire, group = 1)) + # label = wildfire
geom_line(color = "black", linetype = "dashed") +
geom_point(stat = "identity", size = 4, color = my_pal) + #
geom_errorbar(aes(ymin = proportion_low, ymax = proportion_upp),
width = .05, color = my_pal) +
theme_bw() +
ylab("Mean Wildfire Hazard Potential") + xlab("") +
scale_y_continuous(breaks=seq(1, 3, .5), limits=c(1, 3)) +
#geom_text(nudge_x = 0.2, nudge_y = -.15) +
theme(axis.line = element_line(colour = "black"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
panel.border = element_blank(),
panel.background = element_blank(),
axis.title = element_text(size = 10),
#axis.text.x = element_text(size = 10),
axis.text.y = element_text(margin = margin(t = 0, r = 0, b = 0, l = 13)))
# Combine all plots -------------------------------------------------------
# Combine all plots
all_plots <- plot_grid(heat_plot + theme(legend.position="none"),
wildfire_plot + theme(legend.position="none"),
precip_plot + theme(legend.position="none"),
og_plot + theme(legend.position="none"),
#labels = c("A","B","C","D"),
nrow = 1,
label_size=10)
all_plots <- ggarrange(heat_plot, wildfire_plot, precip_plot, og_plot, nrow = 1)
ggsave("/Users/kathrynmcconnell/Dropbox (Yale_FES)/tribal_lands_scratch/summary_figure.eps",
plot = last_plot(),
width = 10.5,
height = 3.5,
dpi = 300)
|
97521707ab1c83d1fc2207bf15ce344cec88dbbb | 49d3de427798436e84e461d20040cc1a76d18876 | /EqSimWHM/man/eqsr_plot.Rd | ada3223a9000af515d80e922b6b67370e8fe65e4 | [] | no_license | ices-eg/wk_WKREBUILD | 34ce8d2d5a5d0d9165501523b71944b40a0189bc | 6884f5410b009766f5ddde5262d2d9b24fe01636 | refs/heads/master | 2021-06-19T09:19:58.455078 | 2021-04-14T12:56:07 | 2021-04-14T12:56:07 | 200,839,265 | 0 | 0 | null | 2021-03-03T07:36:31 | 2019-08-06T11:38:17 | R | UTF-8 | R | false | true | 1,322 | rd | eqsr_plot.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/eqsr_plot.R
\name{eqsr_plot}
\alias{eqsr_plot}
\title{Plot Simulated Predictive Distribution of Recruitment}
\usage{
eqsr_plot(fit, n = 20000, x.mult = 1.1, y.mult = 1.4,
ggPlot = FALSE, Scale = 1)
}
\arguments{
\item{fit}{an fitted stock recruit model returned from \code{eqsr_fit}}
\item{n}{Number of random recruitment draws to plot}
\item{x.mult}{max value for the y axis (ssb) as a multiplier of maximum
observed ssb}
\item{y.mult}{max value for the x axis (rec) as a multiplier of maismum
observed rec}
\item{ggPlot}{Flag, if FALSE (default) plot using base graphics, if TRUE
do a ggplot}
\item{Scale}{Numeric value for scaling varibles in plot.}
}
\value{
NULL produces a plot
}
\description{
Plot Simulated Predictive Distribution of Recruitment
}
\examples{
\dontrun{
data(icesStocks)
FIT <- eqsr_fit(icesStocks$saiNS,
nsamp = 1000,
models = c("Ricker", "Segreg"))
eqsr_plot(FIT, n = 20000)
# Scale argument only available for ggPlot = TRUE
eqsr_plot(FIT, n = 20000, ggPlot = TRUE, Scale = 1000)
}
}
\seealso{
\code{\link{eqsr_fit}} Fits several stock recruitment models to a data set
and calculates the proportion contribution of each model based on a bootstrap
model averaging procedure.
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.