blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8dd1a4dc90c2228a3336b88bee956fa9b10b5b55 | 1675bba9502914df14305dd269efc1839d9701a0 | /distribution.R | a249d34022fa1640aa13bde0083e20c7c2cecca2 | [] | no_license | jmpasmoi/pubop | 38e336fb748c909937d9c19481ae4319c267013b | d065cb61e1d6e7351485a3b571b915e173e931e0 | refs/heads/master | 2023-03-16T19:42:18.854242 | 2020-01-19T15:02:22 | 2020-01-19T15:02:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 639 | r | distribution.R |
library(tidyverse)
df <- read.csv("data/afbrs_transdf.csv") %>%
mutate(choice=as.character(choice))
#Data Distribution
ggplot(df, aes(region)) +
geom_bar(aes(fill = choice), width = 0.5) +
theme(axis.text.x = element_text(angle = 65, vjust = 0.6)) +
labs(x = "Region", y = "Count") + facet_grid(. ~ sector) +
coord_flip() + ggpubr::rotate_x_text()
ggplot(df, aes(region)) +
geom_bar(aes(fill = sector), width = 0.5) +
theme(axis.text.x = element_text(angle = 65, vjust = 0.6)) +
labs(x = "Region", y = "Count") +
facet_grid(. ~ expectation) + coord_flip() + ggpubr::rotate_x_text()
|
d1aedffc809f502fe65f3489d8680cc13e373cf0 | 74fe29da37e54fb5e49a1ae7d4cf5051428202eb | /R/hl_competitiveness.R | ac15afea522f3f0f9fd9b0fe89962d88a7464048 | [] | no_license | CRAFTY-ABM/craftyr | 7fd8e63f85f4ddc13fbb0a79b67710a7b5a818f2 | 5630d1f0e4a1b1c34e3d10740640d414346f1af4 | refs/heads/master | 2022-08-11T13:20:13.579266 | 2018-06-16T06:55:19 | 2018-06-16T06:55:19 | 266,212,786 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,628 | r | hl_competitiveness.R | #' Calculate and plot competitiveness for every AFT on every cell
#'
#' @param simp
#' @param dirtocapitals
#' @param capitalfilename
#' @param dirtodemand
#' @param demandfilename
#' @param dirtoproduction
#' @param productionfilenamepattern
#' @param returnplot if true the ggplot object is returned
#' @return plot
#'
#' @author Sascha Holzhauer
#' @export
hl_plotCompetitiveness <- function(simp,
dirtocapitals = paste(simp$dirs$alloc, "/worlds/", simp$sim$world, "/regionalisations/",
simp$sim$regionalisation, "/", simp$sim$scenario, sep=""),
capitalfilename = paste(simp$sim$regionalisation, "_", simp$sim$regions, "_Capitals.csv", sep=""),
dirtodemand = paste(simp$dirs$data, "/worlds/", simp$sim$world, "/regionalisations/",
simp$sim$regionalisation, "/", simp$sim$scenario, sep=""),
demandfilename = paste(simp$sim$regionalisation, "_", simp$sim$scenario, "_", simp$sim$regions,
"_demand.csv", sep=""),
dirtoproduction = paste(simp$dirs$data, "/production/", sep=""),
productionfilenamepattern = "<AFT>/AftProduction_<AFT>.csv",
returnplot = FALSE) {
capitals <- read.csv(paste(dirtocapitals, capitalfilename, sep="/"))
demand <- read.csv(paste(dirtodemand, demandfilename, sep="/"))
demand <- demand[demand$Year == simp$sim$starttick,-length(demand)]
celldata <- data.frame()
for (aft in simp$mdata$aftNames[-1]) {
production = read.csv(paste(dirtoproduction, gsub("<AFT>", aft, productionfilenamepattern, fixed=TRUE),
sep=""), row.names = 1)
#capitals = capitals[1:5,]
compet <- t(apply(capitals, MARGIN=1, function(x) {
caps <- x[ -c(1,2)]
caps <- caps - 1
caps <- caps[names(production[-length(production)])]
product <- apply(production[-length(production)], MARGIN=1, function(x,y) {
prod(y^x)
}, caps)
product <- product * production[,length(production)]
cellDemand <- demand/nrow(capitals)
cellResidual <- cellDemand # no supply in first tick
comp <- mapply(function(x, name) {
simp$submodels$comp$sfuncs[[name]](x)
}, cellResidual, names(cellResidual))
competitiveness = sum(comp * product)
c(x[1], x[2], Competitiveness = competitiveness)
}))
celldata <- rbind(celldata, cbind(as.data.frame(compet), AFT = aft))
}
p1 <- visualise_cells_printPlots(simp, list(celldata), idcolumn = "AFT", valuecolumn = "Competitiveness",
title = "Competitiveness", ncol = 3, returnplot = returnplot)
if (returnplot) return(p1)
} |
2a122b384dd6a88ed2bfb19b10b07c9a7be50ff7 | b813091ffddfc5ecaf86b293f796e2d5cb5137f6 | /chapter2.R | 72a19a63c51f0405cf4e56938ec98af03ac47085 | [] | no_license | jtcies/statistical-rethinking | bca346fb54fb8e79f5dee57d67ecd2877826809c | 6d45edc73ea04ce8b27056f178332d67563d49dd | refs/heads/master | 2020-06-21T11:18:18.165620 | 2019-12-26T17:12:12 | 2019-12-26T17:12:12 | 197,434,046 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 837 | r | chapter2.R | library(tidyverse)
library(rethinking)
# grid
prior_grid <- seq(0, 1, length.out = 100)
prior <- ifelse(prior_grid < 0.5, 0, 1)
likelihood <- dbinom(6, size = 9, prob = prior_grid)
posterior <- prior * likelihood
sp <- posterior / sum(posterior)
qplot(prior_grid, sp, geom = "line")
## 2.6
globe_qa <- quap(
alist(
W ~ dbinom(W + L, p),
p <- dunif(0, 1)
), data = list(W = 6, L = 3)
)
precis(globe_qa)
#2.8
n_samples <- 1000
p <- rep( NA, n_samples )
p[1] <- 0.5
W <- 6
L <- 3
for ( i in 2:n_samples ) {
p_new <- rnorm( 1, p[ i - 1 ], 0.1 )
if ( p_new < 0 ) p_new <- abs( p_new )
if ( p_new > 1 ) p_new <- 2 - p_new
q0 <- dbinom( W, W + L, p[2 - 1] )
q1 <- dbinom( W, W + L, p_new )
p[2] <- ifelse( runif(1) < q1 / q0, p_new, p[2 - 1] )
}
## Homework week 1
### Number 3
|
ac64f741881776ab3a20c619b73da73ac5dbd101 | 4eb5cda5f02f054d64745ce91923dd1fa4ea9095 | /Vuln_Index/eck4.vulnSummaryScatterPlots.R | 358b308573f47df1091586c11bdea31a84c9e1ab | [] | no_license | mczapanskiy-usgs/WERC-SC | e2e7cb68616ef0492144c0ef97629abb280103ae | caec92b844c9af737dcfc8d6dbb736de79c3e71c | refs/heads/master | 2021-12-02T16:42:16.529960 | 2021-12-01T16:58:20 | 2021-12-01T16:58:20 | 36,815,737 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,451 | r | eck4.vulnSummaryScatterPlots.R | ## this script is used to graph the final vulnerability scores from the three indices
## for final presentation
## load data and toolboxes
# rank <- read.csv("VulnIndexFinalSensitivityScores.csv") ## matrix of final PV, CPV, and DPV 1-10 rankings
PVscores <- read.csv("PCV&PDVscores.csv") ## matrix of cumulative PV, CPV, and DPV before 1-10 ranking
PVscores <- PVscores[complete.cases(PVscores), ] # remove blank observations at bottom of matrix
scores <- read.csv("VulnScores.csv") ## matrix of final PV, CV, and DV
scores <- scores[complete.cases(scores), ] # remove blank observations at bottom of matrix
library(ggplot2)
## graph CPV vs DPV with species names as points
PVscores$AlphaCode <- factor(PVscores$AlphaCode, levels = PVscores$AlphaCode[order(PVscores$Order)]) # will keep original order of species
x1 <- ggplot(PVscores,
aes(ColBest,
DispBest,
label=as.character(AlphaCode))) +
geom_text(aes(color=Taxonomy),
size=5,
face="bold") +
scale_x_log10(limits = c(5, 1600)) +
scale_y_log10(limits = c(9, 1400), breaks = c(10, 100, 1000)) +
theme_bw(base_size = 14) +
ylab("Population Displacement Vulnerability") +
xlab("Population Collision Vulnerability") +
theme(legend.position = 'bottom') +
guides(color = guide_legend(nrow = 1,title = NULL))
# theme(legend.text = element_text(size=14),
# axis.title.y = element_text(size=rel(1.5)),
# axis.title.x=element_text(size=rel(1.5)))
# x4 <- x3 + theme(legend.position=c("bottom")) # put the legend in the graph, in the top right corner
# ## for raw CV and DV scores
# scores$AlphaCode <- factor(scores$AlphaCode, levels = scores$AlphaCode[order(scores$Order)]) # will keep original order of species
# p1 <- ggplot(scores, aes(ColBest, DispBest, label=as.character(AlphaCode))) + geom_text(aes(color=Groups, size=PopBest), face="bold") + theme_bw()
# p2 <- p1 + ylab("Displacement Vulnerability") + xlab("Collision Vulnerability") # + ylim(0,10) + xlim(0,10)
# p3 <- p2 + theme(legend.text=element_text(size=14), axis.title.y=element_text(size=rel(1.5)), axis.title.x=element_text(size=rel(1.5)))
# p4 <- p3 + theme(legend.justification=c(1,1), legend.position=c(1,1)) # put the legend in the graph, in the top right corner
# p4
# graph size = 1000x800
## for PCV and PDV scores
# y <- ggplot(PVscores, aes(ColBest, DispBest, label=as.character(AlphaCode))) + geom_text(aes(color=Groups, size=PopBest), face="bold") + theme_bw()
# y <- y + ylab("Displacement Vulnerability") + xlab("Collision Vulnerability") # + ylim(0,10) + xlim(0,10)
# y + theme(legend.text=element_text(size=14), axis.title.y=element_text(size=rel(1.5)), axis.title.x=element_text(size=rel(1.5)))
# y <- ggplot(PVscores, aes(ColBest, DispBest, label=as.character(AlphaCode))) + geom_text(aes(color=Groups), size=4, face="bold") + theme_bw()
# y <- y + ylab("Displacement Vulnerability") + xlab("Collision Vulnerability") # + ylim(0,10) + xlim(0,10)
# y + theme(legend.text=element_text(size=14), axis.title.y=element_text(size=rel(1.5)), axis.title.x=element_text(size=rel(1.5)))
## pie chart of collision sensitivities
## maintain order
# scores$AlphaCode <- factor(scores$AlphaCode, levels = scores$AlphaCode[order(scores$Order)]) # will keep original order of species
# CS <- ggplot(scores, aes(x=AlphaCode, fill = ColBest, color="red"))
# CS <- CS + geom_bar(width=1)+coord_polar()
|
6d15bc550555986d52a4d6ec0613c68920294685 | 064d55da94fed4ca7fb1e9b8966ef20db51a942b | /logLikelihood.R | 30b04689cf526395b473752a6d4372435def9e64 | [] | no_license | Jsockin/Rust_1997_R | f94715f9f3ecb8b71a8f479868fc479adf38112a | ec8f326a7e8f70b3c8fc325452a55ccdf0ac650b | refs/heads/master | 2021-08-23T11:33:41.758074 | 2017-12-04T18:34:05 | 2017-12-04T18:34:05 | 113,079,419 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 535 | r | logLikelihood.R | #
# Estimate log-likelihood given policy function
#
logLikelihood <- function(xSamples,aSamples,policyA){
NN <- dim(xSamples)[2]
TT <- dim(xSamples)[1]
likeMatrix <- matrix(0,TT,NN)
for (n in 1:NN){
for (t in 1:TT){
gridPos <- xSamples[t,n] + 1
likeMatrix[t,n] <- aSamples[t,n]*log(policyA[gridPos,2]) + (1-aSamples[t,n])*log(policyA[gridPos,1])
}
}
likelihood <- sum(likeMatrix)
return(likelihood)
} |
12d0fc7b0dbec6b3839aadcc6d4b2331a335ee30 | effe14a2cd10c729731f08b501fdb9ff0b065791 | /cran/paws.security.identity/man/cognitoidentityprovider_admin_confirm_sign_up.Rd | fd799e3c9bcc19dedc95c85af5733d08ccd11708 | [
"Apache-2.0"
] | permissive | peoplecure/paws | 8fccc08d40093bb25e2fdf66dd5e38820f6d335a | 89f044704ef832a85a71249ce008f01821b1cf88 | refs/heads/master | 2020-06-02T16:00:40.294628 | 2019-06-08T23:00:39 | 2019-06-08T23:00:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 876 | rd | cognitoidentityprovider_admin_confirm_sign_up.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cognitoidentityprovider_operations.R
\name{cognitoidentityprovider_admin_confirm_sign_up}
\alias{cognitoidentityprovider_admin_confirm_sign_up}
\title{Confirms user registration as an admin without using a confirmation code}
\usage{
cognitoidentityprovider_admin_confirm_sign_up(UserPoolId, Username)
}
\arguments{
\item{UserPoolId}{[required] The user pool ID for which you want to confirm user registration.}
\item{Username}{[required] The user name for which you want to confirm user registration.}
}
\description{
Confirms user registration as an admin without using a confirmation
code. Works on any user.
}
\details{
Requires developer credentials.
}
\section{Request syntax}{
\preformatted{svc$admin_confirm_sign_up(
UserPoolId = "string",
Username = "string"
)
}
}
\keyword{internal}
|
2d55bc4b95fc49a9d25d914087a16a185ba007c5 | 602c144363277f2efc062d78bce139dd7fb75480 | /tests/testthat/try-parse_ergm_log.R | 211ec1407c213a1bbaa5aea5676b6735a5a252f9 | [] | no_license | mbojan/mbtools | 637c6cfba11a63d7a052867d0afa211af00060ad | ed7680f0f9ae68ea12f6bca403f1b686f20e9687 | refs/heads/master | 2022-07-14T03:25:05.525650 | 2022-06-25T18:43:09 | 2022-06-25T18:43:09 | 29,505,211 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,149 | r | try-parse_ergm_log.R | library(tidyverse)
library(tidytext)
op <- options(
tibble.print_min = 30
)
dane <- tibble(
text = readLines("~/Desktop/dhgwdeg0.1gwesp1-10k-cd.out"),
) %>%
rowid_to_column("line")
readcoef <- function(d, ...) {
con <- textConnection(d)
read.table(con, colClasses = "character", as.is=TRUE, header=TRUE)
}
d <- dane %>%
mutate(
has_iteration = grepl("Iteration [0-9]+ of at most [0-9]+", text),
has_starting = grepl("Starting unconstrained MCMC", text),
iteration = cumsum(has_iteration),
x = cumprod( 1 - 2*(has_iteration | has_starting) )
) %>%
filter(x == -1 & !has_iteration) %>%
group_by(iteration) %>%
mutate(
block = cumsum(i = seq(1, n()) %% 2 != 0)
) %>%
group_by(block) %>%
mutate(
wblock = seq(1, n())
) %>%
group_by(iteration, wblock) %>%
summarise(
z = paste(text, collapse = " ")
) %>%
group_by(iteration) %>%
summarise(
z = map(paste(z, collapse="\n"), readcoef)
) %>%
unnest() %>%
gather(coef, value, -iteration) %>%
mutate(value = as.numeric(value))
ggplot(d, aes(x=iteration, y=value)) +
geom_point() +
facet_wrap(~ coef, scales = "free")
|
84a4b779876ff91803fe3505a23a0f18b48770ec | 1a2917c0c2d5e75904a5089090b5d0b3dba1f11e | /RDA.arrow.R | 12d5f876788327ba2bd2488149b7f6710e59cc96 | [] | no_license | Shapy-Ray/Gutmicrobiome_PretermBirth | 718343ff6f1be4cb08fc37a6b1c9141dd31c9de7 | ddf1680ae805072e37ae0c258e970f4d696bcbe6 | refs/heads/main | 2023-02-15T09:31:10.067206 | 2021-01-15T10:22:48 | 2021-01-15T10:22:48 | 329,878,185 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,186 | r | RDA.arrow.R | library(ggplot2)
library(vegan)
library(dplyr)
library(scales)
library(grid)
library(reshape2)
library(phyloseq)
map <- read.table("Preterm.intestin.txt",sep = "\t",header = T)
map = map[complete.cases(map),]
map$group <- substr(map$SampleID,1,6)
otu <- import_biom(BIOMfilename = "table.json.even3000.biom")
map <- sample_data(map)
rownames(map) <- map$SampleID
moth_merge <- merge_phyloseq(otu, map)
moth_merge
colnames(tax_table(moth_merge)) <- c("Kingdom", "Phylum", "Class","Order", "Family", "Genus","Species")
erie <- moth_merge
bray_not_na <- phyloseq::distance(physeq = erie, method = "bray")
cap_ord <- ordinate(
physeq = erie,
method = "CAP",
distance = bray_not_na,
formula = ~ gestationalage.delivery + apgar.1min + apgar.5min + apgar.10min + BMI.delivery + age )
cap_plot <- plot_ordination(physeq = erie, ordination = cap_ord, color = "group", axes = c(1,2)) +
scale_color_manual(values = c("#a65628", "magenta"))
arrowmat <- vegan::scores(cap_ord, display = "bp")
arrowdf <- data.frame(labels = rownames(arrowmat), arrowmat)
arrow_map <- aes(xend = CAP1, yend = CAP2, x = 0, y = 0, shape = NULL, color = NULL, label = labels)
label_map <- aes(x = 1.3 * CAP1, y = 1.3 * CAP2, shape = NULL, color = NULL, label = labels)
arrowhead = arrow(length = unit(0.02, "npc"))
cap_plot +
geom_segment(mapping = arrow_map, size = .5, data = arrowdf, color = "gray", arrow = arrowhead) +
geom_text(mapping = label_map, size = 4,data = arrowdf, show.legend = FALSE) +
theme_bw()
ggsave("P.H.caparrow.pdf",width = 4,height = 3)
### subgroups
map <- read.table("Preterm.intestin.pretermsubgroup.txt",sep = "\t",header = T)
map = map[complete.cases(map),]
#map$group <- substr(map$SampleID,1,6)
otu <- import_biom(BIOMfilename = "preterm.subgroups.even3000.biom")
map <- sample_data(map)
rownames(map) <- map$SampleID
moth_merge <- merge_phyloseq(otu, map)
moth_merge
colnames(tax_table(moth_merge)) <- c("Kingdom", "Phylum", "Class","Order", "Family", "Genus","Species")
erie <- moth_merge
bray_not_na <- phyloseq::distance(physeq = erie, method = "bray")
erie_CAP <- ordinate(
physeq = erie,
method = "CAP",
distance = bray_not_na,
formula = ~ gestationalage.delivery + apgar.1min + apgar.5min +
apgar.10min + BMI.delivery + age + neoweight + remissionstage.day +CRP
)
RDAplot <- plot_ordination(physeq = erie,ordination = erie_CAP,color = "group") +
scale_color_manual(values = c("#E96446", "#302F3D", "#87CEFA"))
arrowmat <- scores(erie_CAP, display = "bp",)
arrowdf <- data.frame(labels = rownames(arrowmat), arrowmat)
arrow_map <- aes(xend = CAP1*1, yend = CAP2*1, x = 0, y = 0,
shape = NULL, color = NULL, label = labels)
label_map <- aes(x = 1.2 * CAP1,y = 1.2 * CAP2, shape = NULL, color = NULL, label = labels)
RDAplot +
geom_segment(
mapping = arrow_map,
size = .5,
data = arrowdf,
color = "gray",
arrow = arrowhead
) +
geom_text(
mapping = label_map,
size = 4,
data = arrowdf,
show.legend = FALSE
) +
theme_bw()
ggsave("P.sub.caparrow.pdf",width = 5.3,height = 3)
|
ee30fee5689acd48971f1f6b22894a2aa3018b38 | ae92bfc03d51d222741dc83e3ea77f28f2ee6e83 | /tests/testthat/test-double_ml_pliv_multi_z_parameter_passing.R | 770e57776bc58622fd570ac4d61c31a91a83ce3c | [] | no_license | anhnguyendepocen/doubleml-for-r | 947d7f19f4f038cc11090340f36e864a612932b3 | f45de1ff00254c8ccf5c54e0272c0f6a93db597f | refs/heads/master | 2023-04-23T06:39:00.420678 | 2021-04-15T12:33:46 | 2021-04-15T12:33:46 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,284 | r | test-double_ml_pliv_multi_z_parameter_passing.R | context("Unit tests for PLIV")
library("mlr3learners")
lgr::get_logger("mlr3")$set_threshold("warn")
skip_on_cran()
test_cases = expand.grid(
learner = c("regr.lm", "regr.glmnet"),
dml_procedure = c("dml1", "dml2"),
score = "partialling out",
i_setting = 1:(length(data_pliv)),
stringsAsFactors = FALSE)
test_cases["test_name"] = apply(test_cases, 1, paste, collapse = "_")
patrick::with_parameters_test_that("Unit tests for PLIV:",
.cases = test_cases, {
learner_pars = get_default_mlmethod_pliv(learner)
n_rep_boot = 498
n_folds = 5
n_rep = 2
# TODO: Comparison case (functional)
set.seed(i_setting)
params_OOP = rep(list(rep(list(learner_pars$params), 1)), 1)
Xnames = names(data_pliv[[i_setting]])[names(data_pliv[[i_setting]]) %in% c("y", "d", "z", "z2") == FALSE]
data_ml = double_ml_data_from_data_frame(data_pliv[[i_setting]],
y_col = "y",
d_cols = "d", x_cols = Xnames, z_cols = c("z", "z2"))
double_mlpliv_multiz_obj = DoubleMLPLIV$new(data_ml,
n_folds = n_folds,
ml_g = learner_pars$mlmethod$mlmethod_g,
ml_m = learner_pars$mlmethod$mlmethod_m,
ml_r = learner_pars$mlmethod$mlmethod_r,
dml_procedure = dml_procedure,
score = score,
n_rep = n_rep)
double_mlpliv_multiz_obj$set_ml_nuisance_params(
learner = "ml_g",
treat_var = "d",
params = learner_pars$params$params_g)
double_mlpliv_multiz_obj$set_ml_nuisance_params(
learner = "ml_m_z",
treat_var = "d",
params = learner_pars$params$params_m)
double_mlpliv_multiz_obj$set_ml_nuisance_params(
learner = "ml_m_z2",
treat_var = "d",
params = learner_pars$params$params_m)
double_mlpliv_multiz_obj$set_ml_nuisance_params(
learner = "ml_r",
treat_var = "d",
params = learner_pars$params$params_r)
double_mlpliv_multiz_obj$fit()
theta_multiz_obj = double_mlpliv_multiz_obj$coef
se_multiz_obj = double_mlpliv_multiz_obj$se
# Exact passing
export_params_exact_g = rep(list(rep(list(learner_pars$params$params_g), n_folds)), n_rep)
export_params_exact_m = rep(list(rep(list(learner_pars$params$params_m), n_folds)), n_rep)
export_params_exact_r = rep(list(rep(list(learner_pars$params$params_r), n_folds)), n_rep)
set.seed(i_setting)
params_OOP = rep(list(rep(list(learner_pars$params), 1)), 1)
Xnames = names(data_pliv[[i_setting]])[names(data_pliv[[i_setting]]) %in% c("y", "d", "z", "z2") == FALSE]
data_ml = double_ml_data_from_data_frame(data_pliv[[i_setting]],
y_col = "y",
d_cols = "d", x_cols = Xnames, z_cols = c("z", "z2"))
double_mlpliv_mutliz_exact_obj = DoubleMLPLIV$new(data_ml,
n_folds = 5,
ml_g = learner_pars$mlmethod$mlmethod_g,
ml_m = learner_pars$mlmethod$mlmethod_m,
ml_r = learner_pars$mlmethod$mlmethod_r,
dml_procedure = dml_procedure,
score = score,
n_rep = n_rep)
double_mlpliv_mutliz_exact_obj$set_ml_nuisance_params(
learner = "ml_g",
treat_var = "d",
params = export_params_exact_g,
set_fold_specific = TRUE)
double_mlpliv_mutliz_exact_obj$set_ml_nuisance_params(
learner = "ml_m_z",
treat_var = "d",
params = export_params_exact_m,
set_fold_specific = TRUE)
double_mlpliv_mutliz_exact_obj$set_ml_nuisance_params(
learner = "ml_m_z2",
treat_var = "d",
params = export_params_exact_m,
set_fold_specific = TRUE)
double_mlpliv_mutliz_exact_obj$set_ml_nuisance_params(
learner = "ml_r",
treat_var = "d",
params = export_params_exact_r,
set_fold_specific = TRUE)
double_mlpliv_mutliz_exact_obj$fit()
theta_mutliz_exact_obj = double_mlpliv_mutliz_exact_obj$coef
se_mutliz_exact_obj = double_mlpliv_mutliz_exact_obj$se
# bootstrap
# double_mlpliv_obj$bootstrap(method = 'normal', n_rep = n_rep_boot)
# boot_theta_obj = double_mlpliv_obj$boot_coef
# at the moment the object result comes without a name
expect_equal(theta_multiz_obj, theta_mutliz_exact_obj, tolerance = 1e-8)
expect_equal(se_multiz_obj, se_mutliz_exact_obj, tolerance = 1e-8)
# expect_equal(as.vector(pliv_hat$boot_theta), as.vector(boot_theta_obj), tolerance = 1e-8)
}
)
|
323e99986a2019e98172660093d3c8830935add5 | 5cb215dd1d269b4471b91efea988d842bf55de40 | /man/synCreateColumn.Rd | 2c5c4c859fb1a346b0e874e97dbba8e3ce950d48 | [
"Apache-2.0"
] | permissive | Sage-Bionetworks/synapser | 0d308dba0a4a993a1e8f609c25c75b072de78cdc | c9ed6ca9fb5247d56167ff8812ddc780de013127 | refs/heads/master | 2023-06-24T23:10:43.914336 | 2023-06-14T22:33:35 | 2023-06-14T22:33:35 | 34,292,371 | 31 | 16 | Apache-2.0 | 2023-09-10T04:16:43 | 2015-04-20T23:33:04 | R | UTF-8 | R | false | false | 565 | rd | synCreateColumn.Rd | \name{synCreateColumn}
\alias{synCreateColumn}
\docType{methods}
\title{
synCreateColumn
}
\description{
This is redundant with synStore(Column(...)) and will be removed.
}
\usage{
synCreateColumn(name, columnType, maximumSize=NULL, defaultValue=NULL, enumValues=NULL)
}
\arguments{
\item{name}{Column name}
\item{columnType}{Column type}
\item{maximumSize}{maximum length of values (only used when columnType is STRING)}
\item{defaultValue}{default values (otherwise defaults to NULL)}
\item{enumValues}{permitted values}
}
\value{
An object of type Column.
}
|
f771a119093bbc022b454c9b59f7d6584b523fe0 | 1e56ccfb32b8bb3bd231932dfcc2c7fb5fc215bf | /man/isEmpty.Rd | 7042d49cf0770a0d3dd6688e4a09391ba7a78b6f | [] | no_license | michaelhallquist/MplusAutomation | 4602f6a713b8f9aa9a5963991ace0d56d109a893 | ec0c959e0b752fe41d9cb62accd39d5c19d8502a | refs/heads/master | 2023-08-06T10:29:57.004763 | 2023-07-15T13:42:41 | 2023-07-15T13:42:41 | 8,270,378 | 80 | 49 | null | 2023-06-07T13:05:49 | 2013-02-18T14:51:31 | R | UTF-8 | R | false | true | 671 | rd | isEmpty.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/utilityFunctions.R
\name{isEmpty}
\alias{isEmpty}
\title{Check whether a useable function argument was provided}
\usage{
isEmpty(arg)
}
\arguments{
\item{arg}{A function argument}
}
\value{
Logical vector of length 1.
}
\description{
This is a simple utility to check whether a function argument is missing,
\code{NULL}, or has only \code{NA}s.
}
\examples{
\dontrun{
f1 <- function(x) {
if (!isEmpty(x)) return(mean(x, na.rm = TRUE))
return(NULL)
}
f1() #> NULL
f1(x = NA) #> NULL
f1(x = NULL) #> NULL
f1(x = c(NA, 1:2)) #> 1.5
}
}
\keyword{internal}
|
4b972b16a00f4b0593ff6a043d173041390a3736 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/clickstream/examples/frequencies.Rd.R | 0e4d122648b6a0192c510d983365382b88cf6f89 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 592 | r | frequencies.Rd.R | library(clickstream)
### Name: frequencies
### Title: Generates a Data Frame of State Frequencies for All Clickstreams
### in a List of Clickstreams
### Aliases: frequencies
### ** Examples
clickstreams <- c("User1,h,c,c,p,c,h,c,p,p,c,p,p,o",
"User2,i,c,i,c,c,c,d",
"User3,h,i,c,i,c,p,c,c,p,c,c,i,d",
"User4,c,c,p,c,d",
"User5,h,c,c,p,p,c,p,p,p,i,p,o",
"User6,i,h,c,c,p,p,c,p,c,d")
csf <- tempfile()
writeLines(clickstreams, csf)
cls <- readClickstreams(csf, header = TRUE)
frequencyDF <- frequencies(cls)
|
0d65314430bddbfa6cab425bb5749a70199f26b0 | 4744964c0e4b7813847711c53ae604712e156bb2 | /Parte2/12-GGplot2.R | fdcd40ed6158c39d93c202f5f28755f2c3312553 | [] | no_license | giusepper11/RFundamentos | 4871684c9c1493991e6ddb4ca3dc8c5f936496ef | 3d54842efd4b551d29c38037e37dc5ca2d4e370a | refs/heads/master | 2020-04-13T09:44:28.323149 | 2019-01-30T00:34:19 | 2019-01-30T00:34:19 | 163,119,568 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,801 | r | 12-GGplot2.R | ## ggplot2
# um sistema grafico completo, alternativo ao
# sistema basico de graficos do R
# Ofereçe mais opções de modificações, legendas prontas,
# formatação mais solida
install.packages('ggplot2')
library(ggplot2)
# Plotando um gráfico básico com qplot()
data(tips, package = 'reshape2') # carrega o dataset tips do pacote reshape2
qplot(total_bill, tip, data=tips, geom ='point') # formato geometrico de pontos
# Camada 1
camada1 <- geom_point(
mapping = aes(x=total_bill, y=tip, color = sex), # mapear as variaveis dentro do grafico
data = tips,
size =3
)
ggplot() + camada1
?aes # mapeamento estetico dentro do grafico
# Construindo um modelo de regressão
modelo_base <- lm(tip~total_bill, data=tips)
modelo_fit <- data.frame(
total_bill = tips$total_bill,
predict(modelo_base, interval='confidence')
)
head(modelo_fit)
# Camada 2
camada2 = geom_line(
mapping = aes(x=total_bill, y =fit),
data = modelo_fit,
color = 'darkred'
)
ggplot()+camada1+camada2
# Camada 3
camada3 = geom_ribbon(
data=modelo_fit,
mapping = aes(x=total_bill, ymin =lwr, ymax=upr),
alpha=0.3
)
ggplot() + camada1 + camada2 + camada3
# Versão final otimizada
ggplot(tips, aes(x= total_bill, y=tip)) +
geom_point(aes(color=sex)) +
geom_smooth(method = 'lm')
# Gravando o grafico em um objeto
myplot = ggplot(tips, aes(x= total_bill, y=tip)) +
geom_point(aes(color=sex)) +
geom_smooth(method = 'lm') # Gera o conteudo estatistico do grafico
class(myplot)
print(myplot)
# Scatterplot com linha de regressão
# Dados
data = data.frame(cond = rep(c('Obs1','Obs2'),
each=10), var1 = 1:100 + rnorm(100, sd=9),
var2 = 1:100 + rnorm(100, sd=16))
# Plot
ggplot(data,aes(x = var1, y = var2))+
geom_point(shape=1) +
geom_smooth(method = lm, color= 'red', se=FALSE) # Gera o conteudo estatistico do grafico
?lm
# Bar plots
#Dados
data = data.frame(
grupo = c('A','B','C','D'),
valor = c(33,62,56,67),
num_obs = c(100,500, 459, 342)
)
# Gerando a massa de dados
data$rigth = cumsum(data$num_obs) + 30 * c(0:(nrow(data)-1))
data$left = data$rigth - data$num_obs
# plot
ggplot(data, aes(ymin=0))+
geom_rect(aes(xmin=left, xmax=rigth,
ymax = valor, colour=grupo, fill = grupo)) +
xlab('Numero de obs') + ylab('Valor')
# Usando dataset mtcars
head(mtcars)
ggplot(data=mtcars, aes(x = disp, y=mpg)) + geom_point()
# Mapear a cor dos pontos com variavel categorica
ggplot(data=mtcars, aes(x = disp, y=mpg,
colour = as.factor(am))) + geom_point()
# Mapear a cor dos pontos com variavel continua
ggplot(data=mtcars, aes(x = disp, y=mpg,
colour = -cyl)) + geom_point() # o - na variavel cor altera a ordem
# Mapear o tamno dos pontas à uma variavel de interesse
# a legenda é inserida no grafico automaticamente
ggplot(data=mtcars, aes(x = disp, y=mpg,
colour = -cyl, size=wt)) + geom_point()
# Os geoms definem qual forma geometrica será utilixada para a visulização de dados no grafico
ggplot(mtcars, aes(x=as.factor(cyl), y=mpg)) + geom_boxplot()
# histogramas
ggplot(mtcars, aes(x=mpg), binwidth = 30) + geom_histogram()
# Grafico de barras
ggplot(mtcars, aes(x = as.factor(cyl))) + geom_bar()
# Personalizando o grafico
colors()
ggplot(mtcars, aes(x=as.factor(cyl), y=mpg, colour=as.factor(cyl))) + geom_boxplot()
ggplot(mtcars, aes(x=as.factor(cyl), y=mpg, fill=as.factor(cyl))) + geom_boxplot()
ggplot(mtcars, aes(x=as.factor(cyl), y=mpg)) +
geom_boxplot(color='blue', fill='seagreen4')
# Alterando os eixos
ggplot(mtcars, aes(x=mpg)) +
geom_histogram()+
xlab('Milhas por gallon') + ylab('Frequencia')
# Alterar os limites do grafico
ggplot(mtcars, aes(x=mpg)) +
geom_histogram() +
xlab('Milhas por gallon') + ylab('Frequencia') +
xlim(c(0,40)) + ylim(c(0,8))
# Legendas
ggplot(mtcars, aes(x = as.factor(cyl), fill = as.factor(cyl))) +
geom_bar() +
labs(fill = "cyl")
# Trocando a posição da legenda
ggplot(mtcars, aes(x = as.factor(cyl), fill = as.factor(cyl))) +
geom_bar() +
labs(fill = "cyl") +
theme(legend.position="top")
# Sem legenda
ggplot(mtcars, aes(x = as.factor(cyl), fill = as.factor(cyl))) +
geom_bar() +
guides(fill=FALSE)
# Facets # dividdir o grafico de acordo com alguma variavel
ggplot(mtcars, aes(x=mpg, y=disp, color=as.factor(cyl))) +
geom_point() +
facet_grid(am~.) # horizontal
ggplot(mtcars, aes(x=mpg, y=disp, color=as.factor(cyl))) +
geom_point() +
facet_grid(.~am) # vertical
# plots diferentes juntos (diferente do facet que é o mesmo grafico dividido)
install.packages('gridExtra')
library(gridExtra)
library(ggplot2)
# Dataset diamonds
data("diamonds")
# histograma com plot1
plot1 = qplot(price, data=diamonds, binwidth =1000)
# scatterplot como plot2
plot2 = qplot(carat, price, data = diamonds, colour = cut)
# Combina os 2 plots na mesma área
grid.arrange(plot1, plot2, ncol=1)
# Graficos de densidade
ggplot(data = diamonds, aes(x=price, group=cut, fill=cut)) + geom_density(adjust = 1.5)
ggplot(data = diamonds, aes(x=price, group=cut, fill=cut)) + geom_density(adjust = 1.5, alpha=0.2)
ggplot(data = diamonds, aes(x=price, group=cut, fill=cut)) + geom_density(adjust = 1.5, position = 'fill')
# Facets com Reshape
library(reshape2)
install.packages('plotly',dependencies=TRUE, INSTALL_opts = c('--no-lock'))
install.packages('sf',dependencies=TRUE, INSTALL_opts = c('--no-lock'))
library(plotly)
sp <- ggplot(tips, aes(x=total_bill, y=tip/total_bill)) + geom_point(shape=1)
sp + facet_grid(sex ~ .)
ggplotly()
sp + facet_grid(. ~ sex)
ggplotly()
sp + facet_wrap( ~ day, ncol = 2)
ggplotly()
ggplot(mpg, aes(displ, hwy)) + geom_point() + facet_wrap(~manufacturer)
ggplotly()
|
ca30f6aa3cdb8006a176104763810fb413e05ea4 | 34e2217b2255e5bb192c2c724dbe78ca4c1b3c64 | /man/on_shutdown.Rd | 42fbd47269a643fbd33991dc689dbd2d37183bfa | [] | no_license | kongdd/languageserver | df335d28f97868793b6a56b64b9671a24afa57ce | d3ae514ad9b708178217522029e97f087e98b343 | refs/heads/master | 2020-08-01T14:40:13.188378 | 2019-09-26T07:09:25 | 2019-09-26T07:09:25 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 522 | rd | on_shutdown.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/handlers-general.R
\name{on_shutdown}
\alias{on_shutdown}
\title{shutdown request handler}
\usage{
on_shutdown(self, id, params)
}
\arguments{
\item{self}{a \link{LanguageServer} object}
\item{id}{a numeric, the id of the process that started the server}
\item{params}{unused here}
}
\description{
Handler to the \href{https://microsoft.github.io/language-server-protocol/specification#shutdown}{shutdown} \link{Request}
}
\keyword{internal}
|
3e1fc62cc584e4778bb6f09aff188ad1509d1973 | 0dfd1d7920d2e4211a310f443962bac3987eff02 | /app.R | ef3f2f645169cf6114d220a7ecd65f41724bd401 | [] | no_license | nunufung/cetm46 | 6897d50e7d03df5992fd3eef947e36c7cdec35d9 | 8604fcec6c2ed4dff15d03c4ff6c228ffbfda8bc | refs/heads/master | 2022-11-18T12:37:09.106587 | 2020-07-04T03:33:03 | 2020-07-04T03:33:03 | 260,830,253 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,223 | r | app.R | ## app.R ##
library(shiny)
library(shinydashboard)
library(DT)
library(ggplot2)
library(caret)
library(PerformanceAnalytics)
library(evtree)
library(mvtnorm)
library(shinyjs)
library(markdown)
library(ggmosaic)
## Loading the dataset
df <- read.csv("https://github.com/nunufung/cetm46/raw/master/online_shoppers_intention.csv")
df2 <- read.csv("https://github.com/nunufung/cetm46/raw/master/online_shoppers_intention.csv")
ui <- dashboardPage(
dashboardHeader(title = "Shopper Intensions"),
## Sidebar content
dashboardSidebar(
sidebarMenu(
menuItem("Introduction", tabName = "intro", icon = icon("tags")),
menuItem("Visualization: Rev vs others", tabName = "revenue", icon = icon("money-check-alt")),
menuItem("Visualization: Cross table", tabName = "cross", icon = icon("money-check")),
menuItem("Shopper Dataset", tabName = "data", icon = icon("globe")),
menuItem("Confusion Matrix", tabName = "cm", icon = icon("user-astronaut")),
menuItem("Conclusion", tabName = "conclu", icon = icon("flask"))
)
),
dashboardBody(
tabItems(
# Second Tab content
tabItem(tabName = "revenue",
fluidRow(
box(width=6, plotOutput("bar1")),
box(width=6, plotOutput("bar2")),
),
fluidRow(
box(width=6, plotOutput("bar3")),
box(width=6, plotOutput("bar4"))
),
fluidRow(
box(width=6, plotOutput("bar5")),
box(width=6, plotOutput("bar6"))
),
fluidRow(
box(width=6, plotOutput("bar7")),
box(width=6, plotOutput("bar8"))
),
fluidRow(
box(width=6, plotOutput("bar9")),
box(width=6, plotOutput("bar10"))
)
),
# Third tab content
tabItem(tabName = "cross",
fluidRow(
box(width=6, plotOutput("bar11")),
box(width=6, plotOutput("bar12")),
),
fluidRow(
box(width=6, plotOutput("bar13")),
box(width=6, plotOutput("bar14"))
),
fluidRow(
box(width=6, plotOutput("bar15")),
box(width=6, plotOutput("bar16"))
),
fluidRow(
box(width=6, plotOutput("bar17")),
)
),
# Fourth tab content
tabItem(tabName = "data",
fluidRow(
box(width=3, checkboxGroupInput("show_vars", "columns in listing to show:",
names(df2), selected = names(df2))),
box(width=9, dataTableOutput("table1")))),
# Fifth tab content ( to be confirmed)
tabItem(tabName = "cm",
fluidRow(
column(12,
includeHTML("cm.html")
))),
#First Tab content
tabItem(tabName = "intro",
fluidRow(
column(12,
includeHTML("intro.html")
))),
#Fifth Tab content
tabItem(tabName = "conclu",
fluidRow(
column(12,
includeHTML("conclu.html"),
)))
))
)
server <- function(input, output) {
set.seed(122)
histdata <- rnorm(500)
output$plot1 <- renderPlot({
data <- histdata[seq_len(input$slider)]
hist(data)
})
output$bar1 <- renderPlot({
df %>%
ggplot() +
aes(x = Administrative) +
geom_bar() +
facet_grid(Revenue ~ .,
scales = "free_y")
})
output$bar2 <- renderPlot({
df %>%
ggplot() +
aes(x = Administrative_Duration) +
geom_histogram(bins = 50) +
facet_grid(Revenue ~ .,
scales = "free_y")
})
output$bar3 <- renderPlot({
df %>%
ggplot() +
aes(x = Informational) +
geom_bar() +
facet_grid(Revenue ~ .,
scales = "free_y")
})
output$bar4 <- renderPlot({
df %>%
ggplot() +
aes(x = Informational_Duration) +
geom_histogram(bins = 50) +
facet_grid(Revenue ~ .,
scales = "free_y")
})
output$bar5 <- renderPlot({
df %>%
ggplot() +
aes(x = ProductRelated) +
geom_bar() +
facet_grid(Revenue ~ .,
scales = "free_y")
})
output$bar6 <- renderPlot({
df %>%
ggplot() +
aes(x = ProductRelated_Duration) +
geom_histogram(bins = 100) +
facet_grid(Revenue ~ .,
scales = "free_y")
})
output$bar7 <- renderPlot({
df %>%
ggplot() +
aes(x = BounceRates) +
geom_histogram(bins = 100) +
facet_grid(Revenue ~ .,
scales = "free_y")
})
output$bar8 <- renderPlot({
df %>%
ggplot() +
aes(x = ExitRates) +
geom_histogram(bins = 100) +
facet_grid(Revenue ~ .,
scales = "free_y")
})
output$bar9 <- renderPlot({
df %>%
ggplot() +
aes(x = PageValues) +
geom_histogram(bins = 50) +
facet_grid(Revenue ~ .,
scales = "free_y")
})
output$bar10 <- renderPlot({
df %>%
ggplot() +
aes(x = SpecialDay) +
geom_bar() +
facet_grid(Revenue ~ .,
scales = "free_y") +
scale_x_continuous(breaks = seq(0, 1, 0.1))
})
# Cross Table in second tap
# default theme for ggplot2
theme_set(theme_gray())
# setting default parameters for mosaic plots
mosaic_theme = theme(axis.text.x = element_text(angle = 90,
hjust = 1,
vjust = 0.5),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())
output$bar11 <- renderPlot({
df %>%
ggplot() +
aes(x = Month, Revenue = ..count../nrow(df), fill = Revenue) +
geom_bar() +
ylab("relative frequency")
month_table <- table(df$Month, df$Revenue)
month_tab <- as.data.frame(prop.table(month_table, 2))
colnames(month_tab) <- c("Month", "Revenue", "perc")
ggplot(data = month_tab, aes(x = Month, y = perc, fill = Revenue)) +
geom_bar(stat = 'identity', position = 'dodge', alpha = 2/3) +
xlab("Month")+
ylab("Percent")
})
output$bar12 <- renderPlot({
df %>%
ggplot() +
geom_mosaic(aes(x = product(Revenue, OperatingSystems), fill = Revenue)) +
mosaic_theme +
xlab("OS Types") +
ylab(NULL)
})
output$bar13 <- renderPlot({
df %>%
ggplot() +
geom_mosaic(aes(x = product(Revenue, Browser), fill = Revenue)) +
mosaic_theme +
xlab("Broswer Types") +
ylab(NULL)
})
output$bar14 <- renderPlot({
df %>%
ggplot() +
geom_mosaic(aes(x = product(Revenue, Region), fill = Revenue)) +
mosaic_theme +
xlab("Regions") +
ylab(NULL)
})
output$bar15 <- renderPlot({
df %>%
ggplot() +
geom_mosaic(aes(x = product(Revenue, TrafficType), fill = Revenue)) +
mosaic_theme +
xlab("Traffic Type") +
ylab(NULL)
})
output$bar16 <- renderPlot({
df %>%
ggplot() +
geom_mosaic(aes(x = product(Revenue, VisitorType), fill = Revenue)) +
mosaic_theme +
xlab("Visitor Type") +
ylab(NULL)
})
output$bar17 <- renderPlot({
df %>%
ggplot() +
geom_mosaic(aes(x = product(Revenue, Weekend), fill = Revenue)) +
mosaic_theme +
xlab("Weekend") +
ylab(NULL)
})
output$table1 <- DT::renderDataTable({DT::datatable(df2[, input$show_vars, drop = FALSE],
options = list (
scrollX = TRUE,
class = 'cell-border stripe')
)
})
}
shinyApp(ui, server) |
1c83048cf2366ef87c2184c40d494893b0bbd2da | f4009f33fd508377bbadbfd5d552f5e03e7fa5e5 | /Phylofest_code/sites_model_values.r | 58249b5956153533179444f488463d0c81308a17 | [] | no_license | web1mhz/danrosauer-scripts | 0f2280d049647aa3c497f0d1882a73da890815e0 | 8d16dda8f044c5c3a28ece5f7a2589ad15b1de9d | refs/heads/master | 2021-01-19T13:32:00.967115 | 2015-05-12T04:33:38 | 2015-05-12T04:33:38 | 39,985,905 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,860 | r | sites_model_values.r | # a script to extract environment values for a set of points, from a set of ascii grids
# Dan Rosauer - October 2012
library(SDMTools)
########## Parameters ##########
#inputs
work.dir <- 'C:/Users/u3579238/Work/Phylofest/Models/skinks/L_delicata_Tingley/'
samples.dir <- 'C:/Users/u3579238/Work/Phylofest/Models/skinks/L_delicata_Tingley/'
samples.filename <- 'Ldelicata_ALA.csv'
lat_col <- 1
lon_col <- 2
env.dir <- 'C:/Users/u3579238/Work/Phylofest/Models/combined/lineage_models/'
env.pattern <- 'lin_model_lampropholis_delicata_tingley_dr(.)+.asc' #regex
minimum_value <- 0.0005
#outputs
name = substr(samples.filename,1, nchar(samples.filename)-4)
output.filename <- paste(samples.dir,"clades_at_",name,"_dr.csv",sep="")
################################
setwd(work.dir)
points <- read.csv(paste(samples.dir,samples.filename,sep=""))
pointsxy <- points[,c(lon_col,lat_col)]
#extract values from each environmental layer in the folder
grids_to_use <- list.files(env.dir,pattern=env.pattern,full.names=TRUE)
to_exclude <- grep("aux.xml",grids_to_use)
to_exclude <- c(to_exclude, grep("asc.ovr",grids_to_use))
#to_exclude <- c(to_exclude, grep("tingley_dr",grids_to_use))
grids_to_use <- grids_to_use[- to_exclude]
for (tfile in grids_to_use) {
cat("\nabout to do", tfile)
tasc = read.asc(tfile) #read in the data
dataname = gsub(env.dir,'',tfile); dataname = gsub('\\_msk.asc','',dataname)
dataname = gsub('/','',dataname) #define the column name
points[dataname] = round(extract.data(pointsxy,tasc),4) #append the data
points[dataname][points[dataname]<minimum_value] <- 0 # set values below minimum_value to 0
}
cat("\nAbout to write table to file\n")
write.csv(points,output.filename)
cat("\nFinished\n")
|
8302765d7ca74f83bcdc9c504d59f39bdb763706 | 42733230cc656ed5848b44d7eaa1c9732bdbdba0 | /BFE_RShiny/flamingo/man/plotIL.Rd | 06233c3284a4ce053c7358da53bba9ea78d36aaa | [
"BSD-3-Clause"
] | permissive | miraisolutions/OasisUI | 9fa544f1bbcd90e75e58e284a1243ca6094b27d0 | 69aa499018d9b0cdab86da4bd55476825067797d | refs/heads/master | 2021-07-10T08:55:09.356478 | 2018-10-11T21:30:45 | 2018-10-11T21:30:45 | 135,453,807 | 0 | 1 | BSD-3-Clause | 2019-01-25T15:00:38 | 2018-05-30T14:21:35 | CSS | UTF-8 | R | false | true | 484 | rd | plotIL.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plots.R
\name{plotIL}
\alias{plotIL}
\title{IL plot}
\usage{
plotIL(outputPlotData, interactive = FALSE, ...)
}
\arguments{
\item{outputPlotData}{data for plot}
\item{interactive}{create interactive plot using \link{plotly::plot_ly}}
\item{...}{extra arguments to \link{plot}}
}
\value{
nothing; the interactive plot object if \code{interactive = TRUE}
}
\description{
plot IL using the current device
}
|
ce08722d92fb9540998ffbf9866fe2e5e00e8953 | 7a74461fcd925f66eb752c251b045b6a2b1bc90c | /plot4.R | f5b909747956d7cd9159820cf229361de5c75d31 | [] | no_license | fpani0/ExData_Plotting1 | 3ea13826c22ad072d432f8bfd1358c575f584571 | efe734de307e1a57d17d48362210db5a409ff647 | refs/heads/master | 2020-12-31T01:10:30.941342 | 2016-09-16T19:34:20 | 2016-09-16T19:34:20 | 68,405,403 | 0 | 0 | null | 2016-09-16T18:33:13 | 2016-09-16T18:33:12 | null | UTF-8 | R | false | false | 1,715 | r | plot4.R | ##PLOT 4
#load data
df <- read.table("household_power_consumption.txt", header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
df$Datetime <- paste(df$Date, df$Time)
#Convert the Date Date/Time classes in R using the as.Date() function
#df$Datetime <- strptime(df$Datetime, "%d/%m/%Y %H:%M:%S")
df$Datetime <- as.POSIXct(df$Datetime, format="%d/%m/%Y %H:%M:%S", tz="AST")
#Filter data
DATE1 <- as.POSIXct("2007-02-01 00:00:00", "%Y-%m-%d %H:%M:%S", tz="AST")
DATE2 <- as.POSIXct("2007-02-03 00:00:00", format="%Y-%m-%d %H:%M:%S", tz="AST")
df <- df[df$Datetime >= DATE1 & df$Datetime <= DATE2,]
df$Global_active_power <- as.numeric(df$Global_active_power)
df$Sub_metering_1 <- as.numeric(df$Sub_metering_1)
df$Sub_metering_2 <- as.numeric(df$Sub_metering_2)
df$Sub_metering_3 <- as.numeric(df$Sub_metering_3)
df$Global_reactive_power <- as.numeric(df$Global_reactive_power)
df$Voltage <- as.numeric(df$Voltage)
#Open device png and plot all 4 segments
png(filename="plot4.png", width = 480, height = 480)
par(mfrow = c(2,2))
plot(Global_active_power ~ Datetime, df, type = "l", ylab="Global Active Power (kilowatts)")
plot(Voltage ~ Datetime, df, type = "l")
plot(Sub_metering_1 ~ Datetime, df, type = "l", ylim=c(0.0,40), xlab='', ylab="Energy sub metering")
par(new=T)
plot(Sub_metering_2 ~ Datetime, df, type = "l", ylim=c(0.0,40), xaxt = "n", col="red", xlab='', ylab='')
par(new=T)
plot(Sub_metering_3 ~ Datetime, df, type = "l", ylim=c(0.0,40), xaxt = "n", col="blue", xlab='', ylab='')
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=c(1,1,1), lwd=c(2.5, 2.5, 2.5), col=c("black", "red", "blue"))
plot(Global_reactive_power ~ Datetime, df, type = "l")
dev.off()
|
96c1b9289eff388019ef083eb0c691f339aae2ec | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/qat/examples/qat_call_save_slide_distribution.Rd.R | 8f2c64653ad48d62042e39cb2a008ea66ad4208d | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 417 | r | qat_call_save_slide_distribution.Rd.R | library(qat)
### Name: qat_call_save_slide_distribution
### Title: Produce a savelist-entry for a Slide Distribution Test
### Aliases: qat_call_save_slide_distribution
### Keywords: utilities
### ** Examples
vec <- rnorm(100)
workflowlist_part <- list(blocksize=10)
resultlist <- qat_call_slide_distribution(vec, workflowlist_part, element=1)
savelist <- qat_call_save_slide_distribution(resultlist[[2]])
|
2e94ef3d952a354b195fb410d7a168411402cb36 | a9015b020e232f9082b549ed06ad929fdea79352 | /notebooks/benchmark/r_scripts/rCBA_movies.R | 3f2ec6da127410d83794655f04a607bb37ec0071 | [
"MIT"
] | permissive | viaduct-ai/pyARC | 09987e9e33f574970209e8ffb5250f454287fcd3 | 858e77a83b83da3910ee36f1c46540a6c45988a5 | refs/heads/master | 2022-10-28T01:22:17.493608 | 2020-06-18T19:13:40 | 2020-06-18T19:13:40 | 273,298,867 | 1 | 0 | MIT | 2020-06-18T19:13:41 | 2020-06-18T17:22:21 | null | UTF-8 | R | false | false | 585 | r | rCBA_movies.R | moviesdf <- read.csv("c:/code/python/CBA/notebooks/data/movies_discr.csv", sep = ";", stringsAsFactors = TRUE)
# drop empty id column
drops <- c("")
train <- moviesdf[, !(names(moviesdf) %in% drops)]
txns <- as(train, "transactions")
appearance = list(rhs=c("class=critical-success", "class=box-office-bomb", "class=main-stream-hit"),default="lhs")
rules = apriori(txns, parameter=list(support=0.01, confidence=0.05), appearance = appearance)
rulesFrame <- as(rules, "data.frame")
prunedRulesFrame <- rCBA::pruning(train, rulesFrame, method="m1cba")
prunedRulesFrame
|
e6000195a70000dab63b705e4a430b2c47ca68cc | d9485ffc49f5b8309bf5ce34abd76e86abc721ff | /R-libraries/myUtilities/R/proportion.ci.R | 7469950dc6aa689659b76695aed99c8100f9da3f | [] | no_license | jyqalan/myUtilities | a42944cd6e370c23ce8d232017f6db4949f09cca | 73de6f8defc617c1fdf5537f4d159d653d876a0d | refs/heads/master | 2020-12-10T23:35:22.603147 | 2018-04-20T13:50:58 | 2018-04-20T13:50:58 | 55,338,987 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 675 | r | proportion.ci.R | proportion.ci <-
function(r, n, ci = 0.95)
{
# uses exact F distribution to determine the exact confidence intervals
# r can be a proportion or a number
r <- ifelse(r < 1, round(r * n), r)
t1 <- 1 - (1 - ci)/2
old.warn <- options()$warn
options(warn = -1)
F1 <- qf(t1, 2 * n - 2 * r + 2, 2 * r)
F2 <- qf(t1, 2 * r + 2, 2 * n - 2 * r)
options(warn = old.warn)
lower.ci <- r/(r + (n - r + 1) * F1)
upper.ci <- (r + 1)/(r + 1 + (n - r)/F2)
lower.ci <- ifelse(is.na(lower.ci) & !is.na(n) & !is.na(r), 0, lower.ci)
upper.ci <- ifelse(is.na(upper.ci) & !is.na(n) & !is.na(r), 1,upper.ci)
RES <- data.frame(r, n, p = r/n, lower.ci, upper.ci)
return(RES)
}
|
a7e1bdd314e92be62e73a487eeeb49d2d2ab78d1 | dade9459484e9f79207fa40ac000c2aeaf1101f0 | /man/authors_names_interactive.Rd | b6614b4f7eaac888fc27d7a2d16ac18a6289d2d5 | [] | no_license | caterinap/frontpage | d16fa9da06daddc7a12ee2177c62ba35656eb063 | ef8a766797673b0c284054bd8de356059efd11b9 | refs/heads/master | 2020-03-21T20:54:05.256334 | 2018-06-21T16:02:02 | 2018-06-21T16:02:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 394 | rd | authors_names_interactive.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/authors_names_interactive.R
\name{authors_names_interactive}
\alias{authors_names_interactive}
\title{Create a list of authors}
\usage{
authors_names_interactive()
}
\value{
A list of author/affiliation to be used with coverpage()
}
\description{
This function creates a list of authors/affiliations interactively
}
|
b393501ae35465f78066d238857fad09366865c0 | 21841dbd5ea72df6fdd422211d4c9acfc3c80f77 | /R/topo_tpx.R | 29dcacd1a1571a735d28dd8f295f46500c0d2ba5 | [] | no_license | kkdey/topotpx | 6d8b4943b1dc0f8c636e3a08da847dd914d3afdc | d4de298ba763edc80083be63ffed5e37c0b57aa7 | refs/heads/master | 2021-01-10T08:28:32.183479 | 2016-02-03T08:08:23 | 2016-02-03T08:08:23 | 50,854,675 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,800 | r | topo_tpx.R | ####### Undocumented "tpx" utility functions #########
## ** Only referenced from topics.R
## check counts (can be an object from tm, slam, or a simple co-occurance matrix)
CheckCounts <- function(fcounts){
if(class(fcounts)[1] == "TermDocumentMatrix"){ fcounts <- t(fcounts) }
if(is.null(dimnames(fcounts)[[1]])){ dimnames(fcounts)[[1]] <- paste("doc",1:nrow(fcounts)) }
if(is.null(dimnames(fcounts)[[2]])){ dimnames(fcounts)[[2]] <- paste("wrd",1:ncol(fcounts)) }
empty <- row_sums(fcounts) == 0
if(sum(empty) != 0){
fcounts <- fcounts[!empty,]
cat(paste("Removed", sum(empty), "blank documents.\n")) }
return(as.simple_triplet_matrix(fcounts))
}
## theta initialization
## ** main workhorse function. Only Called by the above wrappers.
## topic estimation for a given number of topics (taken as ncol(theta))
tpxfit <- function(fcounts, X, param_set, del_beta, a_mu, b_mu, ztree_options, tol, verb,
admix, grp, tmax, wtol, qn)
{
## inputs and dimensions
if(!inherits(X,"simple_triplet_matrix")){ stop("X needs to be a simple_triplet_matrix") }
mu_tree_set <- mu_tree_build_set(param_set);
K <- length(param_set);
levels <- length(mu_tree_set[[1]]);
theta <- do.call(cbind, lapply(1:K, function(l) mu_tree_set[[l]][[levels]]/mu_tree_set[[l]][[1]]));
n <- nrow(X)
p <- ncol(X)
m <- row_sums(X)
## recycle these in tpcweights to save time
xvo <- X$v[order(X$i)]
wrd <- X$j[order(X$i)]-1
doc <- c(0,cumsum(as.double(table(factor(X$i, levels=c(1:nrow(X)))))))
## Initialize
omega <- tpxweights(n=n, p=p, xvo=xvo, wrd=wrd, doc=doc, start=tpxOmegaStart(X,theta), theta=theta)
## tracking
iter <- 0
dif <- tol+1+qn
update <- TRUE
if(verb){
cat("log posterior increase: " )
digits <- max(1, -floor(log(tol, base=10))) }
Y <- NULL # only used for qn > 0
Q0 <- col_sums(X)/sum(X)
L <- tpxlpost(fcounts, omega=omega, param_set=param_set, del_beta, a_mu, b_mu, ztree_options=1);
# if(is.infinite(L)){ L <- sum( (log(Q0)*col_sums(X))[Q0>0] ) }
## Iterate towards MAP
while( update && iter < tmax ){
## sequential quadratic programming for conditional Y solution
if(admix && wtol > 0){ Wfit <- tpxweights(n=nrow(X), p=ncol(X), xvo=xvo, wrd=wrd, doc=doc,
start=omega, theta=theta, verb=0, nef=TRUE, wtol=wtol, tmax=20) }
else{ Wfit <- omega }
## Construct the MRA of z-values given the current iterates of omega /theta
z_tree <- z_tree_construct(fcounts, omega_iter = Wfit, theta_iter = t(theta), ztree_options = 1);
## Extract the beta and mu_0 parameters from the MRA tree
param_set_fit <- param_extract_ztree(z_tree, del_beta, a_mu, b_mu);
## Build a MRA of mu-tree sets (set of clusters)
mu_tree_set_fit <- mu_tree_build_set(param_set_fit);
## Extract the theta updates from the MRA tree
levels <- length(mu_tree_set_fit[[1]]);
theta_fit <- do.call(cbind, lapply(1:nclus, function(l) mu_tree_set_fit[[l]][[levels]]/mu_tree_set_fit[[l]][[1]]));
move <- list(theta=theta_fit, omega=Wfit);
## joint parameter EM update
## move <- tpxEM(X=X, m=m, theta=theta, omega=Wfit, alpha=alpha, admix=admix, grp=grp)
## quasinewton-newton acceleration
QNup <- tpxQN(move=move, fcounts=fcounts, Y=Y, X=X, del_beta=del_beta, a_mu=a_mu, b_mu=b_mu,
ztree_options=ztree_options, verb=verb, admix=admix, grp=grp, doqn=qn-dif)
move <- QNup$move
Y <- QNup$Y
if(QNup$L < L){ # happens on bad Wfit, so fully reverse
if(verb > 10){ cat("_reversing a step_") }
##move <- tpxEM(X=X, m=m, theta=theta, omega=omega, alpha=alpha, admix=admix, grp=grp)
z_tree <- z_tree_construct(fcounts, omega_iter = omega, theta_iter = t(theta), ztree_options = 1);
param_set_fit <- param_extract_ztree(z_tree, del_beta, a_mu, b_mu);
mu_tree_set_fit <- mu_tree_build_set(param_set_fit);
levels <- length(mu_tree_set_fit[[1]]);
theta_fit <- do.call(cbind, lapply(1:nclus, function(l) mu_tree_set_fit[[l]][[levels]]/mu_tree_set_fit[[l]][[1]]));
move <- list(theta=theta_fit, omega=omega);
QNup$L <- tpxlpost(fcounts, move$omega, param_set_fit, del_beta, a_mu, b_mu, ztree_options=1) }
## calculate dif
dif <- (QNup$L-L)
L <- QNup$L
## check convergence
if(abs(dif) < tol){
if(sum(abs(theta-move$theta)) < tol){ update = FALSE } }
## print
if(verb>0 && (iter-1)%%ceiling(10/verb)==0 && iter>0){
cat( paste( round(dif,digits), #" (", sum(abs(theta-move$theta)),")",
", ", sep="") ) }
## heartbeat for long jobs
if(((iter+1)%%1000)==0){
cat(sprintf("p %d iter %d diff %g\n",
nrow(theta), iter+1,round(dif))) }
## iterate
iter <- iter+1
theta <- move$theta;
theta_tree_set <- lapply(1:K, function(k) mra_bottom_up(theta[,k]));
param_set <- param_extract_mu_tree(theta_tree_set)
omega <- move$omega
}
## final log posterior
L <- tpxlpost(fcounts, omega, param_set, del_beta, a_mu, b_mu, ztree_options=1);
## summary print
if(verb>0){
cat("done.")
if(verb>1) { cat(paste(" (L = ", round(L,digits), ")", sep="")) }
cat("\n")
}
out <- list(param_set=param_set, omega=omega, K=K, L=L, iter=iter)
invisible(out) }
## ** called from topics.R (predict) and tpx.R
## Conditional solution for topic weights given theta
tpxweights <- function(n, p, xvo, wrd, doc, start, theta, verb=FALSE, nef=TRUE, wtol=10^{-5}, tmax=1000)
{
K <- ncol(theta)
start[start == 0] <- 0.1/K
start <- start/rowSums(start)
omega <- .C("Romega",
n = as.integer(n),
p = as.integer(p),
K = as.integer(K),
doc = as.integer(doc),
wrd = as.integer(wrd),
X = as.double(xvo),
theta = as.double(theta),
W = as.double(t(start)),
nef = as.integer(nef),
tol = as.double(wtol),
tmax = as.integer(tmax),
verb = as.integer(verb),
PACKAGE="ordtpx")
return(t(matrix(omega$W, nrow=ncol(theta), ncol=n))) }
## ** Called only in tpx.R
## Quasi Newton update for q>0
tpxQN <- function(move, fcounts, Y, X, del_beta, a_mu, b_mu, ztree_options, verb, admix, grp, doqn)
{
## always check likelihood
theta_tree_set_in <- lapply(1:K, function(k) mra_bottom_up(move$theta[,k]));
param_set_in <- param_extract_mu_tree(theta_tree_set_in)
L <- tpxlpost(fcounts, move$omega, param_set_in, del_beta, a_mu, b_mu, ztree_options)
if(doqn < 0){ return(list(move=move, L=L, Y=Y)) }
## update Y accounting
Y <- cbind(Y, tpxToNEF(theta=move$theta, omega=move$omega))
if(ncol(Y) < 3){ return(list(Y=Y, move=move, L=L)) }
if(ncol(Y) > 3){ warning("mis-specification in quasi-newton update; please report this bug.") }
## Check quasinewton secant conditions and solve F(x) - x = 0.
U <- as.matrix(Y[,2]-Y[,1])
V <- as.matrix(Y[,3]-Y[,2])
sUU <- sum(U^2)
sVU <- sum(V*U)
Ynew <- Y[,3] + V*(sVU/(sUU-sVU))
qnup <- tpxFromNEF(Ynew, n=nrow(move$omega),
p=nrow(move$theta), K=ncol(move$theta))
## check for a likelihood improvement
theta_tree_set_nup <- lapply(1:K, function(k) mra_bottom_up(qnup$theta[,k]));
param_set_nup <- param_extract_mu_tree(theta_tree_set_nup)
Lqnup <- try(tpxlpost(X=X, qnup$omega, param_set_nup, del_beta, a_mu, b_mu, ztree_options), silent=TRUE)
if(inherits(Lqnup, "try-error")){
if(verb>10){ cat("(QN: try error) ") }
return(list(Y=Y[,-1], move=move, L=L)) }
if(verb>10){ cat(paste("(QN diff ", round(Lqnup-L,3), ")\n", sep="")) }
if(Lqnup < L){
return(list(Y=Y[,-1], move=move, L=L)) }
else{
L <- Lqnup
Y <- cbind(Y[,2],Ynew)
return( list(Y=Y, move=qnup, L=L) )
}
}
tpxOmegaStart <- function(X, theta)
{
if(!inherits(X,"simple_triplet_matrix")){ stop("X needs to be a simple_triplet_matrix.") }
omega <- try(tcrossprod_simple_triplet_matrix(X, solve(t(theta)%*%theta)%*%t(theta)), silent=TRUE )
if(inherits(omega,"try-error")){ return( matrix( 1/ncol(theta), nrow=nrow(X), ncol=ncol(theta) ) ) }
omega[omega <= 0] <- .5
return( normalize(omega, byrow=TRUE) )
}
## fast computation of sparse P(X) for X>0
tpxQ <- function(theta, omega, doc, wrd){
if(length(wrd)!=length(doc)){stop("index mis-match in tpxQ") }
if(ncol(omega)!=ncol(theta)){stop("theta/omega mis-match in tpxQ") }
out <- .C("RcalcQ",
n = as.integer(nrow(omega)),
p = as.integer(nrow(theta)),
K = as.integer(ncol(theta)),
doc = as.integer(doc-1),
wrd = as.integer(wrd-1),
N = as.integer(length(wrd)),
omega = as.double(omega),
theta = as.double(theta),
q = double(length(wrd)),
PACKAGE="ordtpx" )
return( out$q ) }
## model and component likelihoods for mixture model
tpxMixQ <- function(X, omega, theta, grp=NULL, qhat=FALSE){
if(is.null(grp)){ grp <- rep(1, nrow(X)) }
K <- ncol(omega)
n <- nrow(X)
mixhat <- .C("RmixQ",
n = as.integer(nrow(X)),
p = as.integer(ncol(X)),
K = as.integer(K),
N = as.integer(length(X$v)),
B = as.integer(nrow(omega)),
cnt = as.double(X$v),
doc = as.integer(X$i-1),
wrd = as.integer(X$j-1),
grp = as.integer(as.numeric(grp)-1),
omega = as.double(omega),
theta = as.double(theta),
Q = double(K*n),
PACKAGE="ordtpx")
## model and component likelihoods
lQ <- matrix(mixhat$Q, ncol=K)
lqlhd <- log(row_sums(exp(lQ)))
lqlhd[is.infinite(lqlhd)] <- -600 # remove infs
if(qhat){
qhat <- exp(lQ-lqlhd)
## deal with numerical overload
infq <- row_sums(qhat) < .999
if(sum(infq)>0){
qhat[infq,] <- 0
qhat[n*(apply(matrix(lQ[infq,],ncol=K),1,which.max)-1) + (1:n)[infq]] <- 1 }
}
return(list(lQ=lQ, lqlhd=lqlhd, qhat=qhat)) }
## functions to move theta/omega to and from NEF.
tpxToNEF <- function(theta, omega){
n <- nrow(omega)
p <- nrow(theta)
K <- ncol(omega)
return(.C("RtoNEF",
n=as.integer(n), p=as.integer(p), K=as.integer(K),
Y=double((p-1)*K + n*(K-1)),
theta=as.double(theta), tomega=as.double(t(omega)),
PACKAGE="ordtpx")$Y)
}
## 'From' NEF representation back to probabilities
tpxFromNEF <- function(Y, n, p, K){
bck <- .C("RfromNEF",
n=as.integer(n), p=as.integer(p), K=as.integer(K),
Y=as.double(Y), theta=double(K*p), tomega=double(K*n),
PACKAGE="ordtpx")
return(list(omega=t( matrix(bck$tomega, nrow=K) ), theta=matrix(bck$theta, ncol=K)))
}
|
e542fe63b9fa4e69cc41397c875220f30970b46a | d0bde1b4c9396b13252c0f1f7fbceb49887ff155 | /man/userAnalysis.Rd | c55ef163cc52eb35cea794b5a91ad35cc682f771 | [] | no_license | bpb824/orcycler | eec3335cc07bea7f3e2f1620145fcdb6d6fe5f73 | 22f571100905c322a291283e33d404509f97d077 | refs/heads/master | 2021-01-01T05:48:22.208599 | 2015-10-18T21:16:05 | 2015-10-18T21:16:05 | 42,019,202 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 564 | rd | userAnalysis.Rd | % Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/userAnalysis.R
\name{userAnalysis}
\alias{userAnalysis}
\title{User Analysis}
\usage{
userAnalysis(db = "test", db_path = "source_data/db_credentials.json")
}
\arguments{
\item{db}{String indicating which db to use, default is "test", also can be "release"}
\item{db_path}{Absolute or relative path to db_credentials.json file.}
}
\value{
User analysis results
}
\description{
Initializes database connection and calls series of functions used to analyze ORcycle user data.
}
|
8b34f636724b76145729aa75e69fce4c893ede9b | 4e957be131dd5c735f8f23c61cb9187a7622cdcd | /ReadingData.R | 9aaf5d05f6fbee7f2f0c9a43a9c2ce3ab6c19c82 | [] | no_license | Dora-dongying/ExData_Plotting1 | dbbaacfffcf7ea85338f7189aef3722a24649001 | 8a0af8ccf35d08c1e2cb7381a4f1013038a36d32 | refs/heads/master | 2022-11-24T13:04:44.728358 | 2020-08-04T02:26:41 | 2020-08-04T02:26:41 | 284,842,012 | 0 | 0 | null | 2020-08-04T01:01:43 | 2020-08-04T01:01:42 | null | UTF-8 | R | false | false | 1,213 | r | ReadingData.R | ## Download the ZIP file and then unzip it. Check if the files exist before processing.
zipname <- "ElectricPowerConsumption.zip"
fileURL <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
if (!file.exists(zipname)){
download.file(fileURL, zipname, method = "curl")
}
filename <- "Dataset"
if (!file.exists(filename)){
unzip(zipname)
}
## First calculate a rough estimate of how much memory the dataset will require
## in memory before reading into R.
## Rough calculation: memory required = no. of column * no. of rows * 8 bytes/numeric
## = 9 * 2075259 * 8 bytes
estimated_memory <- 9 * 2075259 * 8 /1024 /1024 ## in MB
## Read the data into R as data.frames
PowerData_all <- read.table("household_power_consumption.txt", sep = ";", header = TRUE)
## Estimate the real size
library(pryr)
real_memory <- object_size(PowerData_all) /1024 /1024 ## in MB
## Subseting the data
PowerData <- subset(PowerData_all, Date == "1/2/2007" | Date == "2/2/2007")
## Convert date and time into format
PowerData$Date <- as.Date(PowerData$Date, format = "%d/%m/%Y")
PowerData$Time <- strptime(paste(PowerData$Date, PowerData$Time),"%F %T") |
e8f256fa7ea253c032b6ad15d6ff3a4f0dad93b3 | 2b0e7454e2c87076f4f97d35000bf3426b7d9aaa | /man/str_risk_fls_default.Rd | d53edc54b1b175cd9b41dc0ae68239b33b659233 | [] | no_license | raphael210/QDataGet | 52df9d791d7d1d8933555dbdfa9d81e42558a5ee | 83531020e180fe8d07fdfa4a75413fd2b95cd6b4 | refs/heads/master | 2020-04-12T06:29:33.198718 | 2019-02-01T07:50:14 | 2019-02-01T07:50:14 | 64,194,185 | 0 | 5 | null | 2017-03-16T03:29:45 | 2016-07-26T06:00:12 | R | UTF-8 | R | false | true | 353 | rd | str_risk_fls_default.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/risk01_BARRA_Model.R
\name{str_risk_fls_default}
\alias{str_risk_fls_default}
\title{default structure risk factor list}
\usage{
str_risk_fls_default()
}
\description{
This function is the default factor list setting which would be used for regression in sigma prediction.
}
|
e05c52c2b05404aea7e197fe2797d6954086d7ce | 08cd5eeaebf69cca029ce366ffc6f6a277290c98 | /Time series project/processing.R | 3a6d7a6b3ce3d1edfb180ec96e6c914359047911 | [] | no_license | BachQuangMinh/PracticeProjects | c06975f2e311147eddf66a193f2bbe3e63ceec6a | 711b2e4508501bc00e10cd6b7eaa0aca3b75b220 | refs/heads/master | 2021-04-26T23:46:12.783316 | 2018-05-09T04:50:14 | 2018-05-09T04:50:14 | 123,853,644 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 10,158 | r | processing.R | setwd("C:/Users/DELL/Google Drive/JVN couse materials/Projects/Practice projects/Time series project")
library("forecast")
library("Metrics")
library("ggplot2")
library('rugarch')
library('lubridate')
rawdata=read.csv("energydata_complete.csv",row.names = 'date')
selectedcol=rawdata[2]
appliances_ts=ts(selectedcol)
#appliances_ts=ts(selectedcol, frequency = 144)
#summary
summary(appliances_ts)
#take a sample of the first 10 days
#sample=ts(appliances_ts[43:1482],frequency=144)
sample=ts(appliances_ts[43:1482])
logsample=log10(sample)
n=length(sample)
logdiff1=diff(logsample,lag=144,differences=1) #log transformation
#take the next 10 days as for testing
#testsample=ts(appliances_ts[1482:2922], frequency=144)
testsample=ts(appliances_ts[1482:2922])
logtestsample=log10(testsample)
testlogdiff1=c(0,log(testsample[2:n])-log(testsample[1:(n-1)]))
#plot the sample
plot.ts(sample,type="l",xlab="Time point",ylab="Energy(Wh)",main="Energy Consumption - The first ten days",col='red')
#Plot the transformation of the time series
plot.ts(logdiff1,type="l",xlab="Time point",ylab="Log difference energy(Wh)",main="Log Difference Energy Consumption - The first ten days",col='red')
#plot acf
acfResult <- acf(logdiff1, lag.max=400
, type='correlation', main='ACF plot',
plot = FALSE)
plot(acfResult)
abline(v=c(144,288))
#->show sign of stationary
#plot pacf
pacf(logdiff1, lag.max=400, plot=TRUE, main='PACF plot')
abline(v=c(144,288))
#->pretty nice pacf-> highest correlated lag is 255 - pacf=0.65
#decompose to locate trend, seasonal and random
decomposelogdiff1=decompose(ts(logdiff1,frequency = 144),'additive')
plot(decomposelogdiff1)
#->the trend indicates that this is not a stationary
#AR(1) model with 1st difference order
ar1_model=arima(x=logsample, order = c(1L, 1L, 0L))
ar1_model
fittedvalues=fitted(ar1_model)
mape(logsample,fittedvalues)
plot.ts(logsample,type="l",xlab="Time point",ylab="Log energy(Wh)",main="AR1 model",col='red')
lines(fittedvalues,lty=1,col="blue")
grid()
legend("topleft", legend=c("Fitted", "Observed"), col=c("blue","red"), lty=1:1, cex=0.8, box.lty=0)
hist(ar1_model$residuals,breaks=25, freq=FALSE, main='Residual Plot - AR(1)',col='blue')
qqnorm(ar1_model$residuals, main='Quantile-Quantile plot - AR(1)',col='blue')
acf(ar1_model$residuals, lag.max=1440, type='correlation', plot=TRUE, main='ACF residual plot-AR1')
ar1_forecasts=forecast(ar1_model,h=12)
autoplot(ar1_forecasts, main='Forecast-log scale-AR(1)', ylab='log energy', xlim=c(1400,1453))
#AR(6) with 1st difference order, lag 1hour
ar6_model=arima(x=logsample, order = c(6L, 1L, 0L))
ar6_model
fittedvalues=fitted(ar6_model)
mape(logsample,fittedvalues)
plot.ts(logsample,type="l",xlab="Time point",ylab="Log difference energy(Wh)",main="AR6 model",col='red')
lines(fittedvalues,lty=1,col="blue")
grid()
legend("topleft", legend=c("Fitted", "Observed"), col=c("blue","red"), lty=1:1, cex=0.8, box.lty=0)
hist(ar6_model$residuals,breaks=25, freq=FALSE, main='Residual Plot - AR(6)',col='blue')
qqnorm(ar6_model$residuals, main='Quantile-Quantile plot- AR(6)',col='blue')
acf(ar6_model$residuals, lag.max=1440, type='correlation', plot=TRUE, main='ACF residual plot-AR6')
ar6_forecasts=forecast(ar6_model,h=12)
autoplot(ar6_forecasts, main='Forecast-log scale-AR(6)', ylab='log energy', xlim=c(1400,1453))
#ARMA(6,6) with 1st difference order
arma66_model=arima(x=logsample, order = c(1L, 1L, 1L))
arma66_model
fittedvalues=fitted(arma66_model)
mape(logsample,fittedvalues)
plot.ts(logsample,type="l",xlab="Time point",ylab="Log energy(Wh)",main="ARMA(6,6) model",col='red')
lines(fittedvalues,lty=1,col="blue")
grid()
legend("topleft", legend=c("Fitted", "Observed"), col=c("blue","red"), lty=1:1, cex=0.8, box.lty=0)
hist(arma66_model$residuals,breaks=25, freq=FALSE, main='Residual Plot - ARMA(6,6)',col='blue')
qqnorm(arma66_model$residuals, main='Quantile-Quantile plot - ARMA(6,6)',col='blue')
acf(arma66_model$residuals, lag.max=1440, type='correlation', plot=TRUE, main='ACF residual plot-ARMA66')
arma66_forecasts=forecast(arma66_model,h=12)
autoplot(arma66_forecasts, main='Forecast-log scale-ARMA(6,6)', ylab='log energy', xlim=c(1400,1453))
#ARIMA(6,2,6) with 1st difference order
arima626_model=arima(x=logsample, order = c(6L, 2L, 6L))
arima626_model
fittedvalues=fitted(arima626_model)
mape(logsample,fittedvalues)
plot.ts(logsample,type="l",xlab="Time point",ylab="Log energy(Wh)",main="ARIMA(6,2,6) model",col='red')
lines(fittedvalues,lty=1,col="blue")
grid()
legend("topleft", legend=c("Fitted", "Observed"), col=c("blue","red"), lty=1:1, cex=0.8, box.lty=0)
hist(arima626_model$residuals,breaks=25, freq=FALSE, main='Residual Plot - ARIMA(6,2,6)',col='blue')
qqnorm(arima626_model$residuals, main='Quantile-Quantile plot - ARIMA(6,2,6)',col='blue')
acf(arima626_model$residuals, lag.max=1440, type='correlation', plot=TRUE, main='ACF residual plot-ARIMA626')
arima626_forecasts=forecast(arima626_model,h=12)
autoplot(arima626_forecasts, main='Forecast-log scale-ARIMA(6,2,6)', ylab='log energy', xlim=c(1400,1453))
#Exponential smoothing
MAPE=c(1:4)
for (i in 1:4){
HW_ADD_model = HoltWinters(ts(logsample,frequency=144),
alpha=i/10+0.4, beta=FALSE, gamma=FALSE,
l.start=logsample[1])
MAPE[i]=mape(logsample[2:1440],HW_ADD_model$fitted[,1])
}
MAPE
#Alpha plot
plot(c(0.5,0.6,0.7,0.8),MAPE, main='Exponential Smoothing - Alpha', xlab='Alpha', ylab='MAPE', type='l')
#plot for optimal model
exp_model = HoltWinters(logsample, beta=FALSE, gamma=FALSE, l.start=logsample[1])
exp_model
mape(logsample,exp_model$fitted[,1])
plot(logsample, main = 'Exponential Smoothing', ylab='Log Energy', col='red')
lines(c(2:1440),exp_model$fitted[,1], col='blue')
legend("topleft", legend=c("Fitted", "Observed"), col=c("blue","red"), lty=1:1, cex=0.8, box.lty=0)
exp_forecast=forecast(exp_model, h=12)
autoplot(exp_forecast, main='Forecast-log scale-Exponential smoothing', ylab='log energy', xlim=c(1400,1453))
#Holt-Winter - Addictive
MAPE=c(1:4)
for (i in 1:4){
HW_ADD_model = HoltWinters(ts(logsample,frequency=144),
alpha=0.6, beta=0.1, gamma=i/10,
l.start=logsample[1],
seasonal='additive')
MAPE[i]=mape(logsample[145:1440],HW_ADD_model$fitted[,1])
}
MAPE
HW_ADD_model
#alpha plot
plot(c(0.1,0.2,0.3,0.4),MAPE, main='alpha=0.6-beta=0.1-Gamma', xlab='Gamma', ylab='MAPE', type='l')
#plot for the obtimal model
HW_ADD_model = HoltWinters(ts(logsample,frequency=144),
alpha=NULL, beta=NULL, gamma=NULL,
l.start=logsample[1],
seasonal='additive',
optim.start = c(alpha = 0.3, beta = 0.1, gamma = 0.1)
)
mape(logsample[145:1440],HW_ADD_model$fitted[,1])
plot(logsample, main = 'HW_ADDITIVE model', ylab='Log Energy', col='red')
lines(c(145:1440),HW_ADD_model$fitted[,1], col='blue')
legend("topleft", legend=c("Fitted", "Observed"), col=c("blue","red"), lty=1:1, cex=0.8, box.lty=0)
HW_ADD_forecast=forecast(HW_ADD_model, h=12)
autoplot(HW_ADD_forecast, main='Forecast-log scale-HW_ADDITIVE model', ylab='log energy', xlim=c(10.5,11.1))
#GARCH model
xt=logdiff1
xtsquared=xt**2
par(mfrow=c(1,2))
acf(xt, lag.max=288, type='correlation', plot=TRUE, main='xt ACF plot')
acf(xtsquared, lag.max=288, type='correlation', plot=TRUE, main='xt squared ACF plot')
dev.off()
p = 6;
q = 6; # orders of the GARCH model
PP = 6;
QQ = 6; # orders of the ARMA model for the observed process.
# More complicated model with the observed y_t as an ARMA(2,2) with GARCH(1,1) for the variance
spec = ugarchspec(variance.model=list(model="sGARCH",garchOrder=c(p,q)),
mean.model=list(armaOrder=c(PP,QQ),include.mean = TRUE))
GARCHfit=ugarchfit(data=logsample, spec=spec, solver='solnp')
mape(logsample,GARCHfit@fit$fitted.values)
m=4
AIC=-2*prod(GARCHfit@fit$log.likelihoods)+2*m
plot.ts(logsample,type="l",xlab="Time point",ylab="Log energy(Wh)",main="GARCH-ARMA(6,6)",col='red')
lines(c(1:1440),GARCHfit@fit$fitted.values,lty=1,col="blue")
grid()
legend("topleft", legend=c("Fitted", "Observed"), col=c("blue","red"), lty=1:1, cex=0.8, box.lty=0)
hist(GARCHfit@fit$residuals,breaks=25, freq=FALSE, main='Residual Plot - GARCH-ARMA(6,6)',col='blue')
plot(GARCHfit@fit$residuals, main='Residual Plot - GARCH-ARMA(6,6)',col='blue',type='l')
qqnorm(GARCHfit@fit$residuals, main='Quantile-Quantile plot - GARCH-ARMA(6,6)',col='blue')
acf(GARCHfit@fit$residuals, lag.max=1440, type='correlation', plot=TRUE, main='ACF residual plot-GARCH-ARMA(6,6)')
GARCHforecast=ugarchforecast(fitORspec=GARCHfit, n.ahead=12)
show(GARCHforecast)
plot(GARCHforecast)
#optimal arima model
opt.arimamodel=auto.arima(logsample)
plot.ts(logsample,type="l",xlab="Time point",ylab="Log energy(Wh)",main="ARIMA(3,0,3) model",col='red')
lines(fittedvalues,lty=1,col="blue")
grid()
legend("topleft", legend=c("Fitted", "Observed"), col=c("blue","red"), lty=1:1, cex=0.8, box.lty=0)
hist(opt.arimamodel$residuals,breaks=25, freq=FALSE, main='Residual Plot - ARIMA(3,0,3)',col='blue')
qqnorm(opt.arimamodel$residuals, main='Quantile-Quantile plot - ARIMA(3,0,3)',col='blue')
acf(opt.arimamodel$residuals, lag.max=1440, type='correlation', plot=TRUE, main='ACF residual plot-ARIMA(3,0,3)')
opt.arimaforecast=forecast(opt.arimamodel,h=12)
autoplot(opt.arimaforecast, main='Forecast-log scale-ARIMA(3,0,3)', ylab='log energy', xlim=c(1400,1453))
lines(c(1441:1452),logtestsample[1441:1452],col='red')
plot.ts(logsample[1400:1440],main='Forecast-log scale-ARIMA(3,0,3)',xlab='Time',ylab='log energy',xlim=c(1400,1453),ylim=c(-3,3),type='l')
lines(c(1441:1452),logtestsample[1:12],col='red')
lines(c(1441:1452),opt.arimaforecast$fitted,col='blue')
lines(c(1441:1452),opt.arimaforecast$lower[,1],col='red',lty=2)
lines(c(1441:1452),opt.arimaforecast$upper[,1],col='red',lty=2)
lines(c(1441:1452),opt.arimaforecast$lower[,2],col='green',lty=2)
lines(c(1441:1452),opt.arimaforecast$upper[,2],col='green',lty=2)
|
f7c95dcccc5f33694e4c5c6a8025c1f2416bf944 | 4cf955931849f66bf3aace946e1ccac30c78d0e4 | /plot2.R | d1cd263991d978ee6f0d8e347eab97bc40de6d43 | [] | no_license | ariel32/ExData_Plotting1 | fabc9e21d10fa3f5d1bf70785f5fb75a8e233758 | 892a76ec59268f4acf07840e26ddcd8ae3409d6b | refs/heads/master | 2020-03-23T18:11:27.512895 | 2018-07-22T13:10:58 | 2018-07-22T13:10:58 | 141,894,602 | 0 | 0 | null | 2018-07-22T12:41:00 | 2018-07-22T12:40:59 | null | UTF-8 | R | false | false | 509 | r | plot2.R | d = read.csv("household_power_consumption.txt", sep = ";", stringsAsFactors = F, header = T, na.strings = "?")
d$Date <- as.Date(d$Date, format = "%d/%m/%Y")
d <- d[d$Date >= "2007-02-01" & d$Date <= "2007-02-02",]
d$Global_active_power <- as.numeric(d$Global_active_power)
d$Datetime = paste(d$Date, d$Time)
d$Datetime <- as.POSIXct(d$Datetime)
png("plot2.png", width=480, height=480)
plot(d$Global_active_power ~ d$Datetime, type = "l",
ylab = "Global Active Power (kilowatts)", xlab = "")
dev.off() |
8ed087a82ac4006d41f44d7ed0dad794b4eaa57f | 229a06eff625c59a3813050aec6d07b1b9041d96 | /OtherPackages/DAVIDQuery/man/getAnnotationChoices.Rd | c97c0a1a3192568443e59a009b5f5b99ff929176 | [] | no_license | rikenbit/PubMedQuery | 75e161dec8cf792ef5d9be669bb8447c21e8bf3a | 964eeb30436ef93b8f1b34c216f15e8cbad51fef | refs/heads/master | 2021-01-10T20:07:15.630529 | 2014-08-07T08:31:54 | 2014-08-07T08:31:54 | 22,540,329 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,382 | rd | getAnnotationChoices.Rd | \name{getAnnotationChoices}
\alias{getAnnotationChoices}
\title{Retrieve all possible annotation values used in the annotation report tool...}
\usage{getAnnotationChoices(urlBase=DAVIDURLBase, curl=RCurl::getCurlHandle(),
verbose=TRUE)}
\description{Retrieve all possible annotation values used in the annotation report tool}
\details{When the getAnnotationChoices gets called the first time within the R session,
it retrieves the set of annotation values from the DAVID web services, stores them
within the DAVIDAnnotChoices data structure and then reuses it in subsequent calls.}
\value{the list of possible annotation tags, i.e. GOTERM_MF_4, GOTERM_MF_5, BLOCKS_ID etc.
used with the annotationReport tool.}
\seealso{{\code{\link{getIdConversionChoices}}, \code{\link{getAffyChipTypes}}, \code{\link{convertIDList}}, \code{\link{DAVIDQuery}}}}
\author{Roger Day, Alex Lisovich}
\arguments{\item{urlBase}{the DAVID main page url. Default is DAVIDURLBase.}
\item{curl}{RCurl handle. Default is getCurlHandle()}
\item{verbose}{if TRUE enables diagnostic messages}}
\examples{\dontrun{
#retrieve annotation values
annotChoices<-getAnnotationChoices();
#display choice dialog
item<-menu(graphics = TRUE, title = "Select Identifier", annotChoices$from[,"name"]);
#retrieve identifier for subsequent conversion
ident<-annotChoices$from[item,"value"];
}}
|
a5a2fc19eeb033b9dade05be9aa1f8c3fd9902cf | 144f7a47ac2f246fba2e2a89d8aaaf02a16a1497 | /plot1.R | bb1078d958a2ab38727ee1c748affdff4e5394a6 | [] | no_license | neucast/Exploratory_Data_Analysis_Project2 | d57d0bde058603f6263352908caaf5d411a37c4e | 3cf8b1dd90fded397c6d21dae6c23dffed3606ba | refs/heads/master | 2020-05-29T12:52:38.227680 | 2019-05-30T03:45:53 | 2019-05-30T03:45:53 | 189,142,124 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,815 | r | plot1.R | #####################################################################################################################
# 1.- Have total emissions from PM2.5 decreased in the United States from 1999 to 2008?
# Using the base plotting system, this program makes a plot showing the total PM2.5 emission from all sources
# for each of the years 1999, 2002, 2005, and 2008.
#####################################################################################################################
#######################################
# Set working directories.
#######################################
setwd("D:/OneDrive/Documentos/CourseraDataScience/ExploratoryDataAnalysis/Week4/Project2/Exploratory_Data_Analysis_Project2")
#######################################
# Set required libraries.
#######################################
library("data.table")
library("RColorBrewer")
#######################################
# Download and unzip data.
#######################################
# Get data.
path <- getwd()
fileName <- "dataFiles.zip"
# Checking if archieve already exists.
if (!file.exists(fileName)){
download.file(url = "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
, destfile = paste(path, fileName, sep = "/"))
unzip(zipfile = fileName)
}
#######################################
# Read data.
#######################################
# National Emissions Inventory.
NEI <- data.table::as.data.table(x = readRDS(file = "summarySCC_PM25.rds"))
# Source Classification Code.
SCC <- data.table::as.data.table(x = readRDS(file = "Source_Classification_Code.rds"))
#######################################
# Filter the data of interest.
# Clean and adjust data.
#######################################
# Histogram prints in scientific notation.
NEI[, Emissions := lapply(.SD, as.numeric), .SDcols = c("Emissions")]
# Aggregate emissions by year.
totalNEI <- NEI[, lapply(.SD, sum, na.rm = TRUE), .SDcols = c("Emissions"), by = year]
#######################################
# Remove unnecessary data.
# in order to save RAM memory.
#######################################
# Remove data table.
rm(NEI, SCC)
#######################################
# Create the graph and save it to a
# png file.
#######################################
png(filename='plot1.png')
cols <- brewer.pal(9,"Blues")
barplot(height=totalNEI[, Emissions]
, names.arg=totalNEI[, year]
, xlab="Years"
, ylab=expression('Aggregated Emissions (Tons)')
, main=expression('Aggregated PM'[2.5]*' Emmissions by Year')
, col = cols)
dev.off()
#######################################
# Remove unnecessary data.
# in order to save RAM memory.
#######################################
# Remove data table.
rm(totalNEI, cols, fileName, path) |
7796d64f88c331ab106d79ffe40a928ca4de90f2 | c9696a9e9def19ff601c8bccfc207949fe555ef1 | /regTCGA.R | e562b34c6ca48abf1e63738fecf9f00c602f1dea | [] | no_license | Jesse-Islam/caseBaseRegularization | dd814cca3a6571ac59009a0e9e60fb1787a7e5b7 | e4bb2ea3b7c89d4380c3a0874787b7fe5eb4baf7 | refs/heads/master | 2020-06-23T15:03:18.444819 | 2020-04-07T10:45:44 | 2020-04-07T10:45:44 | 198,657,691 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,240 | r | regTCGA.R | #these packages were taken from github on 23 July 2019
library(casebase)
library(TCGA2STAT)
#these packages were pulled from bioconductor
library(BiocManager)
library(glmnet)
library(doParallel)
fitSmoothHazard.fitted <- function(x, y, formula_time, time, event, family = c("glm", "gbm", "glmnet"),
censored.indicator, ratio = 100, ...) {
family <- match.arg(family)
if (family == "gam") stop("The matrix interface is not available for gam")
if (family == "gbm" && !requireNamespace("gbm", quietly = TRUE)) {
stop("Pkg gbm needed for this function to work. Please install it.",
call. = FALSE)
}
if (family == "glmnet" && !requireNamespace("glmnet", quietly = TRUE)) {
stop("Pkg glmnet needed for this function to work. Please install it.",
call. = FALSE)
}
# Default to linear term
if (missing(formula_time)) {
formula_time <- formula(paste("~", time))
timeVar <- time
} else {
timeVar <- if (length(formula_time) == 3) all.vars(formula_time[[3]]) else all.vars(formula_time)
}
# There should only be one time variable
stopifnot(length(timeVar) == 1)
# Try to infer event from
if (missing(event)) {
varNames <- checkArgsTimeEvent(data = as.data.frame(y), time = timeVar)
eventVar <- varNames$event
} else eventVar <- event
typeEvents <- sort(unique(y[,eventVar]))
# Call sampleCaseBase
originalData <- list("x" = x,
"y" = y)
class(originalData) <- c(class(originalData), "data.fit")
if (missing(censored.indicator)) {
sampleData <- sampleCaseBase(as.data.frame(cbind(y, x)),
timeVar, eventVar,
comprisk = (length(typeEvents) > 2),
ratio)
} else {
sampleData <- sampleCaseBase(as.data.frame(cbind(y, x)),
timeVar, eventVar,
comprisk = (length(typeEvents) > 2),
censored.indicator, ratio)
}
sample_event <- as.matrix(sampleData[,eventVar])
sample_time_x <- cbind(as.matrix(sampleData[,!names(sampleData) %in% c(eventVar, timeVar, "offset")]),
model.matrix(update(formula_time, ~ . -1), sampleData))
sample_offset <- sampleData$offset
# Fit a binomial model if there are no competing risks
if (length(typeEvents) == 2) {
out <- switch(family,
"glm" = glm.fit(sample_time_x, sample_event,
family = binomial(),
offset = sample_offset),
"glmnet" = glmnet::cv.glmnet(sample_time_x, sample_event,
family = "binomial",
...),
"gbm" = gbm::gbm.fit(sample_time_x, sample_event,
distribution = "bernoulli",
offset = sample_offset,
verbose = FALSE, ...))
out$originalData <- originalData
out$typeEvents <- typeEvents
out$timeVar <- timeVar
out$eventVar <- eventVar
out$matrix.fit <- TRUE
out$formula_time <- formula_time
out$offset<- sample_offset
} else {
stop("Not implemented yet")
# Otherwise fit a multinomial regression
# withCallingHandlers(model <- vglm(formula, family = multinomial(refLevel = 1),
# data = sampleData),
# warning = handler_fitter)
#
# out <- new("CompRisk", model,
# originalData = originalData,
# typeEvents = typeEvents,
# timeVar = timeVar,
# eventVar = eventVar)
}
return(out)
}
#lusc.rnaseq2 <- getTCGA(disease="LUSC", data.type="RNASeq2", clinical=TRUE)
lusc.rnaseq2 <-readRDS('lusc.rnaseq2.rds')
highDimSurvData=na.omit(lusc.rnaseq2$merged.dat)
highDimNames=colnames(highDimSurvData)
fmla=as.formula(paste("status~ bs(OS) +",paste(highDimNames[20200:length(highDimNames)],collapse = "+")))
#highDimSurvData[, -c(1:3)]
cbTCGA=sampleCaseBase(highDimSurvData,event="status",time="OS",ratio=10)
y=as.matrix(cbTCGA[,c(2)])
x=as.matrix(cbTCGA[,c(3:100,20505)])
timeTest=cv.glmnet(x,y,family=c("binomial"))
new_data=as.data.frame(t(x[5,]))
ab=absoluteRisk(timeTest,time = seq(0,300, 1),newdata = new_data)
#hard coded fixes
timeTest$matrix.fit=1
#defaulting to matrix version, for cv.glmnet makes sense, as it doesn't have a native formula interface
y=as.matrix(highDimSurvData[,c(2,3)])
x=as.matrix(highDimSurvData[,c(4:length(highDimSurvData[1,]))])
uppers=rep(Inf, each=length(x[1,]))
lowers=rep(-Inf,each=length(x[1,]))
uppers=c(uppers,)
lowers=c(lowers,0)
registerDoParallel(2)
fit=fitSmoothHazard.fit(x,y,time="OS",event="status",family=c("glmnet"),ratio=10,lower.limits=lowers,upper.limits=uppers,parallel=TRUE,nfold=3)
wholeFit=fitSmoothHazard.fit(x,y,time="OS",event="status",family=c("glmnet"),ratio=10,parallel=FALSE,nfold=8,alpha=0)
new_data=as.data.frame(t(x[5,]))
#new_data$offset=fit$offset[1]
ab=absoluteRisk(wholeFit,time = seq(0,10000, 100),newdata = new_data)
plot(ab,type = "l")
|
bd827156d02b8a9bf2a4e2ba8050eeb9cdbea392 | fa177ce117c273bd6046718f0682f4eabe1ce75e | /R_code_EXAM.r | d68a23beb152ba1b4eea7a87da711df02bb4f23f | [] | no_license | galluccichiara/monitoring_2021 | c975967e4922a6a3252e33a4e0f92ed4d5da3894 | 994e046c7174b58d177bda9f56ee9c76f05b8ff0 | refs/heads/main | 2023-06-15T20:58:36.198856 | 2021-07-09T00:32:24 | 2021-07-09T00:32:24 | 309,310,262 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,648 | r | R_code_EXAM.r | #source: # https://land.copernicus.vgt.vito.be/PDF/portal/Application.html
#library function= recalling packages
#ncdf4 package (already installed with the function install.packages("ncdf4")) = using to open and read easily binary data files that are portable across platformsand include metadata information in addition to the data sets
#"..." (brackets) = importing data from an external source;
library(ncdf4)
#raster package (already installed with the function install.packages("raster")) = reading, writing, manipulating, analyzing and modeling of spatial (Geographic Data Analysis and Modeling)
library(raster) #raster package (already installed with the function install.packages("raster")) = raster function used for importing,read and model spatial data analysis
setwd("C:/lab/") #setwd function= setting a new working directory
#instead of importing all files referring Albedo one by one, let's import them all together
rlist<-list.files(pattern="c_gls_ALDH_")
rlist # to recall the list files
list_rast <- lapply(rlist,raster) #lapply=apply the raster fuction to the list of file regarding Surface Albedo
ALBstack<- stack(list_rast) #stack function is used to transform data available as separate columns in a data frame or list into a single column
#let's plot in a single overview all plots referring Albedo and let's set up them in 3 rows and 3 columns
par(mfrow=c(3,3))
cl <- colorRampPalette(c('green','orange','yellow')) (100) #colorRampPalette = using and edit color schemes, yellow is used for maximum values because it is the colour that has the maximum impact to our eyes, 100 is the number of color in the used color scale; #c= setting things ("c" is for "characters") before the array
plot(ALBstack,col =cl, main=c ("ALBEDO 25-09-19/25-10-19", "ALBEDO 26-10-19/25-11-19", "ALBEDO 26-11-19/25-12-19", "ALBEDO 26-12-19/25-01-20", "ALBEDO 26-01-20/25-02-20", "ALBEDO 26-02-20/25-03-20", "ALBEDO 26-03-20/25-04-20 ","ALBEDO 26-04-20/25-05-20", "ALBEDO 26-05-20/25-06-20"))
#plot = plotting/showing R objects
#main = giving a title
#my analysis focuses on a specific timeframe (from 13/10/2019 to 30/06/2020, but there is a mismatch in timeframe btw Albedo Data and LAI/FAPAR Data
#Let's perform a pathway of selection in order to choose the most suitable Albedo output taken as sample reference in the Albedo Data Collection
boxplot(ALBstack,horizontal=T,axes=T,outline=F, col="sienna1",xlab="Albedo", ylab="Period",names=c ("01", "02", "03", "04", "05", "06", "07", "08", "09"))
#x- and y-lab means labelling axys -> x- and y-axys annotation
#info about the comparaison among the 9 boxplots: the minimum, maximum and mean values of each ones diversifies because they change according to the related month we are dealing with.
#x- and y-lab means labelling axys -> x- and y-axys annotation
#names=c function numbers the boxplots
#Let's take the boxplot 05 as sample reference because firstly, it is the median output and secondly, it assumes, including Albedo 06, a wider range of values which may result in a more accurate comparison.
ALB05 <- raster("c_gls_ALDH_05.nc")
cl <- colorRampPalette (c('green','chocolate3','darkblue')) (100)
plot(ALB05, col=cl,main ="ALBEDO 26-01-20/25-02-20")
#lackness: Albedo 05 plot has very low resolution quality exactly in the case study area, so let’s check through dif function the equality btw Albedo 05 and 06 in terms of values
#let’s graphically verify how much they diversify from one each other.
difALB <- ALB06 - ALB05
cldif<- colorRampPalette(c('red','wheat','red'))(100)
plot(difALB, col=cldif, main= "Difference Alb06 - Alb05")
#as we can observe exactly where the colour red is much more intense, Alb05 and Alb06 show different values for a specific area, but in this case no significant diversification occurs, especially in the case study area.
#Let's plot Albedo 06
ALB06 <- raster("c_gls_ALDH_06.nc")
cl <- colorRampPalette (c('green','chocolate3','darkblue')) (100)
plot(ALB06, col=cl,main ="ALBEDO 26-02-20/25-03-20")
#let's do the same regarding Vegetation Properties - FAPAR 300m V1 during the period: 13/10/2019-30/06/2020
fapar <-raster("c_gls_FAPAR300_202005100000_GLOBE_PROBAV_V1.0.1.nc")
cl <- colorRampPalette (c('burlywood4','yellow','green4')) (100)
plot(fapar, col=cl, main ="FAPAR 13/10/2019-30/06/2020")
#let's do the same regarding Vegetation Properties - LAI 300m V1 during the period: 13/10/2019-30/06/2020
lai <- raster("c_gls_LAI300_202005100000_GLOBE_PROBAV_V1.0.1.nc")
cl <- colorRampPalette (c('burlywood4','yellow','green4')) (100)
plot(lai, col=cl, main ="LAI 13/10/2019-30/06/2020")
#focus on a specific area in order to analyse the correlation-> this area, whose extent may be overlapped to Europe extent, is representative in order to understand my case study
ext <- c(0,50,40,60) #ext = defining minimum and maximum of x, y variables
EUALBEDO <- crop (albedo, ext) #crop= zooming in on a specific part of the map (the specific area analyzed), it's for geographic subset; #,ext = the extension previously declared
cl <- colorRampPalette (c('green','blue','yellow')) (100)
plot(EUALBEDO, col=cl, main ="EU.ALBEDO 15/04-15/05/20")
#let's do the same with FAPAR in order to obtain plot having the same extent
ext <- c(0,50,40,60)
EUFAPAR <- crop (fapar, ext)
cl <- colorRampPalette (c('brown','yellow','red')) (100)
plot(EUFAPAR, col= cl, main="EU.FAPAR 13/12/2019-31/08/2020")
#let's do the same with LAI
ext <- c(0,50,40,60)
EULAI <- crop (lai, ext)
cl <- colorRampPalette (c('black','yellow','green')) (100)
plot(EULAI, col= cl, main="EU.LAI 13/12/2019-31/08/2020")
#let's graphically compare the three plots and let's locate the plot regarding EU.ALBEDO in-between EU.FAPAR and EU.LAI respectively in order to have a clearer overall picture about albedo-fapar and albedo-lai correlationship
par(mfrow=c(1,3)) #par = setting graphical parameters => par(mfrow = c (nrows,ncolumns) = creating a matrix of nrows, ncolumns to plot the two obtained maps together and compare them
plot(EUFAPAR, col=cl,main="EU.FAPAR 13/12/2019-31/08/2020")
plot(EUALBEDO, col=cl,main="EU.ALBEDO 15/04-15/05/20")
plot(EULAI,col=cl, main="EU.LAI 13/12/2019-31/08/2020")
#CONCLUSIONS:
#This specific geographic area demonstrates that where Albedo maintains a low level, this is not necessarily due to high values of LAI
#High level of FAPAR compensates for this deficiency
#Despite the green and alive elements of the canopy doesn’t provide a high level of LAI due to their typology and quantity of canopy, the fraction of the solar radiation absorbed by live leaves for the photosynthesis activity is significantly intense anyway.
|
66208fcff98fab89bbfd785a55bf893306015729 | effe14a2cd10c729731f08b501fdb9ff0b065791 | /cran/paws.customer.engagement/man/pinpoint_delete_event_stream.Rd | f825d89c72dd6554f9b957fdc9232ed786647b93 | [
"Apache-2.0"
] | permissive | peoplecure/paws | 8fccc08d40093bb25e2fdf66dd5e38820f6d335a | 89f044704ef832a85a71249ce008f01821b1cf88 | refs/heads/master | 2020-06-02T16:00:40.294628 | 2019-06-08T23:00:39 | 2019-06-08T23:00:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 540 | rd | pinpoint_delete_event_stream.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pinpoint_operations.R
\name{pinpoint_delete_event_stream}
\alias{pinpoint_delete_event_stream}
\title{Deletes the event stream for an app}
\usage{
pinpoint_delete_event_stream(ApplicationId)
}
\arguments{
\item{ApplicationId}{[required] The unique ID of your Amazon Pinpoint application.}
}
\description{
Deletes the event stream for an app.
}
\section{Request syntax}{
\preformatted{svc$delete_event_stream(
ApplicationId = "string"
)
}
}
\keyword{internal}
|
a7181418bff07757d7190c1dfc72392f0165bc59 | 93730e6fee0ae4e3bb028714d6e8a5e335632121 | /man/pivot_fileInput.Rd | 57c55f13a39b32a624a3f946b66cbef532f63531 | [] | no_license | jeevanyue/PIVOT | 56ef7c2d2eb30a197b1c0387ae79e73605ab7eae | 46fa6af11f19c320ee338452ccff745aa93a1f6d | refs/heads/master | 2021-01-01T06:30:45.988532 | 2017-07-17T02:51:20 | 2017-07-17T02:51:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 291 | rd | pivot_fileInput.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/file.R
\name{pivot_fileInput}
\alias{pivot_fileInput}
\title{PIVOT help modules, server}
\usage{
pivot_fileInput(input, output, session, reset = FALSE, return_df = T)
}
\description{
PIVOT help modules, server
}
|
69031eaeb52f1e7dc39a94449d80a9d7992c48d3 | 66fcd73c639c308b030f834ab3ece0725cfd712c | /SRC/Shared/Functions/FILTERING_Function.R | be779db3cc6699d337a69f45b655a17dc5e567bf | [] | no_license | chrismazzeo/Tesis_Marcadores_Glicoinmunologicos | ae871350a259afb7c08cde70d76e262bddd49e4e | 2626a75ceedb2c59531fe39f02f5590ac8ebea85 | refs/heads/master | 2023-06-30T13:54:01.223662 | 2021-08-03T03:28:47 | 2021-08-03T03:28:47 | 286,357,178 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,073 | r | FILTERING_Function.R | #********************
# Filtering
#*******************
FILTERING_DESEQ2_RES_ByAdjPvalue = function (res, padj){
temp = res[!is.na(res$padj) & res$padj < padj,]
return (temp)
}
FILTERING_ByGeneList = function (sourceDataFrame,sourceColumn,filterColumn){
temp = sourceDataFrame[sourceColumn %in% filterColumn,]
return (temp)
}
#TODO esto hay que mejorarlo para que filtre cualquier campo que le pase
#Filter Sample Type & remove duplicated & sort
FILTERING_Samples = function (inputFile, outputPath = NULL){
samples = loadTSV(inputFile)
#seleccionamos que tipo de tejido
samples = samples [samples$`Sample Type` == "Primary Tumor" | samples$`Sample Type` == "Solid Tissue Normal",]
#removemos top down
samples = samples %>% filter(str_detect(samples$`Sample ID`,"-01A") | str_detect(samples$`Sample ID`,"-11A"))
#removemos replicas
samples = samples[!duplicated(samples$`Sample ID`),]
#ordenamos
samples = samples[order(samples$`Sample Type`, samples$`Sample ID`),]
saveTSV(samples,outputPath)
return (samples)
}
|
5b507ae4987ffd8a830af10d969cebab50346317 | dc690d00abdeada088f7a87f37a43465f4e45e82 | /Initial Commit.R | bb287ff74c85b96a9498c0da4113cb1046973d0f | [] | no_license | kumaann/Temporary_add_to_version_control | d9079706429fef36ed834609f26c7f3c4ec897dd | 7b735ab0a93f3d09f540074c00fd14c90e7be2ed | refs/heads/master | 2022-11-05T01:24:32.656564 | 2020-06-30T21:16:33 | 2020-06-30T21:16:33 | 276,210,707 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 30 | r | Initial Commit.R | This is another testing file. |
46f40e425df7509b4cbd933878ec53165411ae43 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/BaPreStoPro/examples/plot-est.mixedRegression-method.Rd.R | 4778ce7214843b8a91f495ebe05e57e1f7c94d85 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,090 | r | plot-est.mixedRegression-method.Rd.R | library(BaPreStoPro)
### Name: plot,est.mixedRegression-method
### Title: Plot method for the Bayesian estimation results
### Aliases: plot,est.mixedRegression-method
### ** Examples
mu <- c(1, 3); Omega = c(0.4, 0.01)
phi <- sapply(1:2, function(i) rnorm(20, mu[i], sqrt(Omega[i])))
model <- set.to.class("mixedRegression", fun = function(phi, t) phi[1]*t + phi[2],
parameter = list(mu = mu, Omega = Omega, phi = phi, gamma2 = 0.1))
data <- simulate(model, t = seq(0, 1, by = 0.02), plot.series = TRUE)
est <- estimate(model, t = seq(0, 1, by = 0.02), data, 100) # nMCMC small for example
plot(est, burnIn = 10, thinning = 2, reduced = TRUE)
plot(est, par.options = list(mar = c(5, 4.5, 4, 2) + 0.1, mfrow = c(2,1)), xlab = "iteration")
plot(est, style = "acf", main = "")
plot(est, style = "density", lwd = 2, priorMean = FALSE)
plot(est, style = "density", col.priorMean = 1, lty.priorMean = 2, main = "posterior")
plot(est, style = "acf", par.options = list(), main = "", par2plot = c(rep(FALSE, 4), TRUE))
plot(est, style = "int.phi", phi = phi, par2plot = c(TRUE, FALSE))
|
493e382597c244f9d8e7e52dbf093c6561ff7561 | 345c3ba120d28a65ecb7e953bdb021d3e3d8d760 | /cachematrix.R | 3774e7bc9b622b735fdb7e66f2824f2fcce45a7a | [] | no_license | utkucansa/assignment2 | 744523d13c375ace009b44af20369788fde5349c | 94faab1918fd44bca54c70a521decc463e058f8d | refs/heads/master | 2020-04-02T21:38:11.621477 | 2016-07-03T11:27:38 | 2016-07-03T11:27:38 | 62,234,455 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 794 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## After creating a matrix, code provided below does caching the created
## matrix
makeCacheMatrix <- function(x = matrix()) {
m <- NULL
set <- function(y) {
x <<- y
m <<- NULL
}
get <- function() x
setinverse <- function(solve) m <<- solve
getinverse <- function() m
list(set = set, get = get,
setinverse = setinverse,
getinverse = getinverse)
}
## What the code provided below does is that it returns the inverse
## of cached matrix above
cacheSolve <- function(x, ...) {
m <- x$getinverse()
if(!is.null(m)) {
message("getting cached data")
return(m)
}
data <- x$get()
m <- solve(data, ...)
x$setinverse(m)
m
}
|
375cb4d640e03d1a0795be0fdcd6ba814aceadad | 6acc4bafe04da8be735f98bbcf3da36d5f9c3f61 | /visitModeling.R | e50bd372262371721d5ddac67b7caac5990a60da | [] | no_license | sarmapar/bios611-project1 | 4d0579ea3c35040e44b0404e993b686485f9b2e7 | 56683521180fe9a5f01ac4ee4acd33524e816aae | refs/heads/master | 2023-01-12T22:43:25.088567 | 2020-11-17T23:29:20 | 2020-11-17T23:29:20 | 290,551,672 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,634 | r | visitModeling.R | library(tidyverse)
library(ggplot2)
library(MLmetrics)
library(reshape2)
parks <- read_csv("derived_data/parks.csv")
species <- read_csv("derived_data/species.csv")
visits <- read_csv("derived_data/visits.csv")
parks <- species %>%
group_by(ParkName) %>%
summarize(numSpecies=n()) %>%
inner_join(parks,by="ParkName") %>%
inner_join(visits,by="ParkName")
animal <- c("Mammal","Bird","Reptile","Amphibian","Fish","Spider/Scorpion","Insect","Invertebrate","Crab/Lobster/Shrimp","Slug/Snail")
plant <- c("Vascular Plant", "Nonvascular Plant", "Algae")
parks <- species %>%
group_by(ParkName) %>%
summarize(numAnimalSpecies=sum(Category %in% animal)) %>% inner_join(parks,by="ParkName")
parks <- species %>%
group_by(ParkName) %>%
summarize(numPlantSpecies=sum(Category %in% plant)) %>% inner_join(parks,by="ParkName")
parks$highVisits <- (parks$Avg10YrVisits > 1000000)
cor(parks$Avg10YrVisits,parks$numSpecies/parks$Acres)
cor(parks$Avg10YrVisits,parks$Acres)
cor(parks$Avg10YrVisits,parks$numSpecies)
cor(parks$Avg10YrVisits,parks$numAnimalSpecies)
cor(parks$Avg10YrVisits,parks$numPlantSpecies)
parks.m <- melt(parks, id.vars="Avg10YrVisits",
measure.vars = c("numAnimalSpecies","numPlantSpecies"))
p <- ggplot(parks.m, aes(x=Avg10YrVisits,y=value, color=variable)) +
geom_point() +
labs(title="Correlation of the number of plant and animal species to annual visitation",
y="Number of Species",
x="Annual Visitors (10 year average)",
fill="Category") +
geom_smooth(method=lm, se=FALSE)
ggsave("figures/species_visit_correlation.png",plot=p)
#split data
parks$label <- c(rep("Train",30),rep("Validate",9),rep("Test",9)) %>%
sample(48,replace=FALSE)
train <- parks %>% filter(label=="Train");
validate <- parks %>% filter(label=="Validate");
test <- parks %>% filter(label=="Test");
model <- glm(highVisits ~ numSpecies +
numAnimalSpecies +
numPlantSpecies,
data=train)
pred <- predict(model, newdata=validate, type="response");
sum((pred>0.5) == validate$highVisits)/nrow(validate);
f1 <- MLmetrics::F1_Score;
f1(validate$highVisits, pred > 0.5)
roc <- do.call(rbind, Map(function(threshold){
p <- pred > threshold;
tp <- sum(p[validate$highVisits])/sum(validate$highVisits);
fp <- sum(p[!validate$highVisits])/sum(!validate$highVisits);
tibble(threshold=threshold,
tp=tp,
fp=fp)
},seq(100)/100))
p2 <- ggplot(roc, aes(fp,tp)) + geom_line() + xlim(0,1) + ylim(0,1) +
labs(title="ROC Curve",x="False Positive Rate",y="True Positive Rate");
ggsave("figures/roc.png",plot=p2)
|
00fe71cfe2168a84ac85eeee5b6ebdac5edc24fb | cd298aa036d3c974c18c22623ad1a58d120463df | /man/plot_dimensionality_reduction.Rd | 1b77ed04186192ad12c89b9f34a6ecbc6c0ddc2c | [
"MIT"
] | permissive | YutongWangUMich/corgi | eaecf19836c27985f7e4129e6b2cb40f6b8bd08b | d7d6f2fab63065268ddc5f463d45e74d9f121a64 | refs/heads/master | 2020-04-18T21:20:00.199195 | 2019-10-24T20:29:02 | 2019-10-24T20:29:02 | 167,762,558 | 0 | 0 | MIT | 2019-04-08T06:30:03 | 2019-01-27T02:47:21 | R | UTF-8 | R | false | true | 537 | rd | plot_dimensionality_reduction.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plotting.R
\name{plot_dimensionality_reduction}
\alias{plot_dimensionality_reduction}
\title{Wrapper around qplot for making cell scatter plots}
\usage{
plot_dimensionality_reduction(emb, batch, cell_type)
}
\arguments{
\item{emb}{n-by-2 matrix of cell coordinates, where n is the number of cells}
\item{batch}{factor or vector of length n}
\item{cell_type}{factor or vector of length n}
}
\description{
Wrapper around qplot for making cell scatter plots
}
|
ca676c1a3255dfd379e36da6c90ec4d6a4f1d3da | 8c6664e9dc7e2d00f099e2f9e18fd1c559763f2f | /R/mywhere.R | 41419e044b1082bd7fa1c9115e092eb85363753a | [
"MIT"
] | permissive | hendersontrent/theft | 111f0b862b91d0935d2df31b4228ee860b04422a | 49c88a351c280d8dfd023118fe5dd93345a6ae4b | refs/heads/main | 2023-09-03T19:41:19.293557 | 2023-09-03T08:47:06 | 2023-09-03T08:47:06 | 351,259,952 | 30 | 6 | NOASSERTION | 2023-09-10T05:50:17 | 2021-03-25T00:17:37 | R | UTF-8 | R | false | false | 481 | r | mywhere.R | # NOTE: This is from {tidyselect} but due to import limitations for CRAN (and it not being namespaced) it's rebuilt here
# See https://github.com/r-lib/tidyselect/blob/main/R/helpers-where.R for implementation
mywhere <- function(fn) {
predicate <- rlang::as_function(fn)
function(x, ...) {
out <- predicate(x, ...)
if (!rlang::is_bool(out)) {
rlang::abort("`where()` must be used with functions that return `TRUE` or `FALSE`.")
}
out
}
}
|
52b2a16ba54ca7d3239a14c3f25a078f517c4da8 | e915fd9373af45615bce05b837b94eac5158633f | /plot1.R | 89ac287bb1f1053cd21058f1e2e220c631f3ca5b | [] | no_license | sina-bot/ExData_Plotting1 | 534c75639638fcf51a8a52f9c4e06e5a85013f95 | a3c84144142d720d44ee729eb95e913bbdbf31e0 | refs/heads/master | 2022-10-28T22:25:47.467577 | 2020-06-11T11:16:02 | 2020-06-11T11:16:02 | 271,458,102 | 0 | 0 | null | 2020-06-11T05:19:04 | 2020-06-11T05:19:03 | null | UTF-8 | R | false | false | 1,470 | r | plot1.R | ## change local settings
Sys.setlocale("LC_TIME", "English")
## checking and creating a "data" directory
if (!file.exists("data")){
dir.create("data")
}
## checking and download data
if (!file.exists("./data/household_power_consumption.zip")){
fileUrl <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip"
download.file(fileUrl, destfile = "./data/household_power_consumption.zip")
dateDownloaded <- date()
}
## reading txt-file
householdTxt <- unz("./data/household_power_consumption.zip", "household_power_consumption.txt")
householdData <- read.table(householdTxt, header = TRUE, sep = ";", na.strings = "?", stringsAsFactors = FALSE)
## subset data from the dates 2007-02-01 and 2007-02-02
householdSubData <- subset(householdData, Date == "1/2/2007" | Date == "2/2/2007")
## convert the Date and Time variables
householdSubData$Date <- as.Date(householdSubData$Date, format = "%d/%m/%Y")
householdSubData$Time <- paste(householdSubData$Date, householdSubData$Time, sep = " ")
householdSubData$Time <- strptime(householdSubData$Time, format = "%Y-%m-%d %H:%M:%S")
## draw a histogram
hist(householdSubData$Global_active_power, breaks = 12, col = "red",
main = "Global Active Power", xlab = "Global Active Power (kilowatts)",
ylab = "Frequency")
## copy my plot to a PNG file
dev.copy(png, file = "plot1.png", width = 480, height = 480)
dev.off() |
c1f7118a71db78cabb91fba9da52ee5af5283af4 | 426f78aadc1bdbb2e4bcb4b28b11f452ca3173df | /R/control.R | 930e2c3b7944f77cd16ebe8a6f34db159f9bc1da | [] | no_license | cran/MVT | e4cc4358535482fee976db8102667ec8bb7eddf7 | 303434e36a7038f8214ceb79e218b5592ccb7930 | refs/heads/master | 2023-02-09T02:35:57.837723 | 2023-01-27T08:30:08 | 2023-01-27T08:30:08 | 48,084,674 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 245 | r | control.R | ## ID: control.R, last updated 2021-03-05, F.Osorio
MVT.control <-
function(maxiter = 2000, tolerance = 1e-6, fix.shape = FALSE)
{ ## control parameters for EM algorithm
list(maxiter = maxiter, tolerance = tolerance, fix.shape = fix.shape)
}
|
646bf8ca07aa288361f315d42b09f65f990aba74 | fe9960d66c6f877d538f86290808e1a06d3c4d3d | /run_analysis.R | 559507c4fbe5d82fa80638f341e68fe485d3b2c6 | [] | no_license | Verdant89/ExploratoryDataAnalysis | 185900626b56eb003f3e08733d535c8178980f60 | 2caff7fa2b4245c169162112dafd6d0a490195d9 | refs/heads/master | 2020-05-05T07:22:17.417883 | 2014-06-10T16:27:47 | 2014-06-10T16:27:47 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,061 | r | run_analysis.R | #This script uses the data provided for the Programming Assignment and creates a tidy dataset
#that has the average value of mean + std for each activity (e.g. BodyAcc X) according to
#the type of data (i.e. train vs test)
setwd("C:\\Users\\David\\Documents\\Coursera\\3 - Getting and Cleaning Data\\Course Project")
download.file("https://d396qusza40orc.cloudfront.net/getdata%2Fprojectfiles%2FUCI%20HAR%20Dataset.zip",
destfile = "Samsung Galaxy Data.zip")
data <- read.table(unz("Samsung Galaxy Data", filename = "Samsung Galaxy Data"))
#Loading the data and the feature names
test.data <- read.table("X_test.txt")
train.data <- read.table("X_train.txt")
features <- read.table ("features.txt")
#Setting the right names for each of the columns of the data frames
names(test.data) <- features[,2]
names(train.data) <- features[,2]
#Creating an additional column for the type of data (train vs. test)
train.data$type <- "train"
test.data$type <- "test"
#Merging train and test data frames together
merge.data <- rbind(test.data, train.data)
#Manually subsetting all the columns named either mean() or std()
subset.data <- merge.data[,c(1:6, 41:46, 81:86, 121:126, 161:166, 201, 202, 214,
215, 227,228,240,241,253,254, 266:271, 345:350, 424:429, 503,
504,516,517,529,530,542,543,562)]
#Descriptive names of the varaibles were previously added
#Creating the final tidy dataset
#First I'll manually add the mean and std columns for each variable (e.g. tBodyAcc X,
#tBodyAcc Y, tGravityAcc X...). Then I will do the mean for each activity (train and test)
#As for the first 30 activities in the subset.data the mean and std of the same
#activity are 3 columns apart(e.g. subset.data[,2] = tBodyAcc-mean()-Y and
#subset.data[,4] = tBodyAcc-std()-Y), this loop will automatically sum these first 15
#activities together
tidy <- as.data.frame(subset.data[,1]+subset.data[,4])
#Now that I've created the data.frame object I will start the loop previously mentioned
col.order <- c(2,3,7:9,13:15,19:21,25:27)
for(i in col.order) {
col.name <- paste(i)
tidy[,col.name] <- (subset.data[,i] + subset.data[,(i+3)])
}
#From column 31 to 40 the mean and std for the same activity are set one after the other
#i.e.subset.data[,31] = BodyAccMag mean; subset.data[,32] = BodyAccMag std...
col.order2 <- seq(31,40, by = 2)
for(i in col.order2) {
col.name <- paste(i)
tidy[,col.name] <- (subset.data[,i] + subset.data[,(i+1)])
}
#From 41 to 58 the order is the same as it was in step 1 (i.e. every 3 columns)
col.order3 <- c(41:43, 47:49, 53:55)
for(i in col.order3) {
col.name <- paste(i)
tidy[,col.name] <- (subset.data[,i] + subset.data[,(i+3)])
}
#From column 59 to 66 the order is the same as in step 2
col.order4 <- seq(59,66, by = 2)
for(i in col.order4) {
col.name <- paste(i)
tidy[,col.name] <- (subset.data[,i] + subset.data[,(i+1)])
}
#Now we need to add the Test/Train variable
tidy$type <- subset.data$type
#And rename the columns accordingly
names(tidy) <- c("tBodyAcc.x", "tBodyAcc.y", "tBodyAcc.z",
"tGravityAcc.x", "tGravityAcc.y", "tGravityAcc.z",
"tBodyAccJerk.x", "tBodyAccJerk.y", "tBodyAccJerk.z",
"tBodyGyro.x", "tBodyGyro.y", "tBodyGyro.z",
"tBodyGyroJerk.x", "tBodyGyroJerk.y", "tBodyGyroJerk.z",
"tBodyAccMag", "tGravityAccMag", "tBodyAccJerkMag",
"tBodyGyroMag", "tBodyGyroJerkMag",
"fBodyAcc.x", "fBodyAcc.y", "fBodyAcc.z",
"fBodyAccJerk.x", "fBodyAccJerk.y", "fBodyAccJerk.z",
"fBodyGyro.x", "fBodyGyro.y", "fBodyGyro.z",
"fBodyAccMag", "fBodyAccJerkMag","fBodyGyroMag",
"fBodyGyroJerkMag", "type")
#Now we just need to split the data by type (test vs train) and do the mean of the columns
split.data <- split(tidy[,1:33], tidy[,34])
final.tidy <- sapply(split.data, colMeans)
|
d044cbbc5d4011a649e73d03e446d7b2f504833f | 6e32987e92e9074939fea0d76f103b6a29df7f1f | /googlemlv1.auto/man/GoogleCloudMlV1_StudyConfigParameterSpec_MatchingParentDiscreteValueSpec.Rd | 8cd0fbadbe626f75dd75dfa18ba3ef9569b3c73b | [] | no_license | justinjm/autoGoogleAPI | a8158acd9d5fa33eeafd9150079f66e7ae5f0668 | 6a26a543271916329606e5dbd42d11d8a1602aca | refs/heads/master | 2023-09-03T02:00:51.433755 | 2023-08-09T21:29:35 | 2023-08-09T21:29:35 | 183,957,898 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 958 | rd | GoogleCloudMlV1_StudyConfigParameterSpec_MatchingParentDiscreteValueSpec.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ml_objects.R
\name{GoogleCloudMlV1_StudyConfigParameterSpec_MatchingParentDiscreteValueSpec}
\alias{GoogleCloudMlV1_StudyConfigParameterSpec_MatchingParentDiscreteValueSpec}
\title{GoogleCloudMlV1_StudyConfigParameterSpec_MatchingParentDiscreteValueSpec Object}
\usage{
GoogleCloudMlV1_StudyConfigParameterSpec_MatchingParentDiscreteValueSpec(
values = NULL
)
}
\arguments{
\item{values}{Matches values of the parent parameter with type 'DISCRETE'}
}
\value{
GoogleCloudMlV1_StudyConfigParameterSpec_MatchingParentDiscreteValueSpec object
}
\description{
GoogleCloudMlV1_StudyConfigParameterSpec_MatchingParentDiscreteValueSpec Object
}
\details{
Autogenerated via \code{\link[googleAuthR]{gar_create_api_objects}}
Represents the spec to match discrete values from parent parameter.
}
\concept{GoogleCloudMlV1_StudyConfigParameterSpec_MatchingParentDiscreteValueSpec functions}
|
0ce8e68ec1ae6e5bc2956dd644280dae51241db2 | 335a7b6225011db2d6b890428666d662fb81dfa8 | /server.R | 852ef5313242a7dff91644dc603f3731478e1481 | [] | no_license | earino/Military-Surplus | f035a9a6752bcd34264a68297a9d78540213cbeb | ccc3494d2c3e9d18c299ff1eda8f86dc70385914 | refs/heads/master | 2021-01-02T12:56:37.576604 | 2014-08-21T17:53:07 | 2014-08-21T17:53:07 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,393 | r | server.R |
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
library(ggplot2)
library(maps)
library(plyr)
library(dplyr)
library(mapproj)
military <- read.csv("1033-program-foia-may-2014.csv")
shinyServer(function(input, output) {
output$mapPlot <- renderPlot({
if (input$state != "NA") {
map_of_state = map_data("county")
state_of_interest = state.abb[match(input$state, state.name)]
#state_of_interest = state.abb[match("Texas", state.name)]
expenditures <- military %>% filter(State == state_of_interest) %>% group_by(State, County) %>% summarise(Thousands = sum(Acquisition.Cost)/1000)
expenditures$region <- tolower(state.name[match(expenditures$State, state.abb)])
expenditures$subregion <- tolower(expenditures$County)
expenditures <- expenditures[complete.cases(expenditures),]
military_map <- map_of_state %>% left_join(expenditures)
military_map <- subset(military_map, region == tolower(input$state))
military_map$Thousands <- ifelse(is.na(military_map$Thousands), 0, military_map$Thousands)
#military_map <- military_map[complete.cases(military_map),]
p <- ggplot(military_map, aes(x=long, y=lat, group=group, fill=Thousands)) +
scale_fill_gradient2(low="#559999", mid="grey90", high="#ff0000") +
geom_polygon(colour="black") + coord_map("polyconic") + xlab("") + ylab("") +
ggtitle("Thousands of Dollars of Transferred Military Surplus Gear")
print(p)
}
else {
state_map <- map_data("state")
expenditures <- military %>% group_by(State) %>% summarise(Millions = sum(Acquisition.Cost)/1000000)
expenditures$region <- tolower(state.name[match(expenditures$State, state.abb)])
expenditures <- expenditures[complete.cases(expenditures),]
military_map <- state_map %>% left_join(expenditures)
p <- ggplot(military_map, aes(x=long, y=lat, group=group, fill=Millions)) +
scale_fill_gradient2(low="#559999", mid="grey90", high="#ff0000") +
geom_polygon(colour="black") + coord_map("polyconic") + xlab("") + ylab("") +
ggtitle("Millions of Dollars of Transferred Military Surplus Gear")
print(p)
}
})
})
|
e6f5dc0001c84f941e691c2d6128bf49c6c32f00 | d84023e27935dcea2a42d09b6d765e82482d54b0 | /proxy-target/run.R | 69b9debf007d5657c20e5f4561c611fcd86696bb | [] | no_license | LEDfan/undertow-proxy | a2a599821cb1a8bd31f7f10c8bd82c9e8525900d | bc3409334b3f02a1ddccc6ea3fc42a2931b8ac86 | refs/heads/main | 2023-03-12T20:30:03.126190 | 2021-03-01T14:12:27 | 2021-03-01T14:12:56 | 343,371,809 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 309 | r | run.R | rm(list=ls())
# if TRUE helps with optimising the app during development
options(shiny.trace = FALSE)
# This is required for ShinyProxy
port <- 3838
print(paste0('run.R script, User: ', Sys.getenv("SHINYPROXY_USERNAME")))
shiny::runApp(
appDir = ".",
host = '0.0.0.0',
port = as.numeric(port)
)
|
18ab3add4a51f5b20a7d57dae25d4bd678ef22b4 | 424a109c5f16ab0417c7f9ecc4fded3c0f38ae14 | /batch/pecan/define_categories_pecan.r | 130a75968a3cc3dcd132b9948707965f64f78662 | [] | no_license | adrianalbert/EnergyAnalytics | f784aca1e549be96b865db89f2190d2dd7566f83 | 39a5d5a6ee05a643ab723d4ef8d864282870cec8 | refs/heads/master | 2020-05-29T08:51:31.888860 | 2016-03-21T15:03:47 | 2016-03-21T15:03:47 | 7,062,053 | 6 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,642 | r | define_categories_pecan.r | # define_categories_pecan.r
#
# Defines appliance categories for the Pecan St dataset.
#
# Adrian Albert
# Last modified: May 2014.
# ---------------------------------------------------------
# __________________________________________________
# Define appliance categories
# some interesting components
# appliances = as.character(read.csv('~/energy-data/pecan_street/metadata/appliances.csv')$Appliance)
select.keep = c('dataid', 'localminute', 'use')
select.AC = c("air1", "air2", "air3", "housefan1")
# select.HV = c("furnace1", "furnace2", "heater1")
select.HV = c("heater1", "airwindowunit1", "furnace1", "furnace2")
select.light = c("lights_plugs1", "lights_plugs2", "lights_plugs3", "lights_plugs4", "lights_plugs5", "lights_plugs6",
"outsidelights_plugs1", "outsidelights_plugs2")
select.alwOn = c('refridgerator1', 'refridgerator2', 'winecooler1', 'aquarium1',
"freezer1")
select.sched = c("pool1", "pool2", 'sprinkler1', "poolpump1", "pump1")
select.total = c('use')
select.dhw = c('waterheater1', 'waterheater2')
select.user = c("bathroom1", "bathroom2", "bedroom1", "bedroom2", "bedroom3", "bedroom4", "bedroom5",
"clotheswasher1", "clotheswasher_dryg1", "diningroom1", "diningroom2", "dishwasher1",
"disposal1", "drye1", "dryg1", "garage1", "garage2", "icemaker1", "jacuzzi1",
"kitchenapp1", "kitchenapp2", "livingroom1", "livingroom2", "heater1",
"microwave1", "office1", "oven1", "poollight1", "range1", "security1", "shed1", "utilityroom1", "venthood1")
select.solar = c('gen')
select.ev = c('car1')
|
541b9aead5f207b5985b6e91b82056c104520a3f | e5f915851f04388a30707a87dcae64462ebcf06d | /02_PrepareTablesForMatching.R | afb7b307cc9fdd678c423a148e98d3b29edbcd8d | [
"MIT"
] | permissive | pacheco-andrea/tenure-defor-br | 2a7e25cc32b9ac54f5e71853cd2c59f24b76dd3c | c0ff020fdc2d8893cebf981176b1466599f9d444 | refs/heads/master | 2023-04-13T14:14:29.984462 | 2022-09-08T15:03:15 | 2022-09-08T15:03:15 | 275,821,151 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,244 | r | 02_PrepareTablesForMatching.R | # ************************************************************ #
# Prepare treatment and control tables for matching
# this script is run as a job on the HPC
# this multiplies our 49 datasets by 6(comparisons), and in turn, by 2 (other counterfactual)
# ************************************************************ #
# libraries
library(readr)
library(dplyr)
library(fastDummies)
library(tidyr)
# parameters
controlVars = c("private","public")
treatmentVars = c("private","public", "protected", "sustainable_use", "indigenous", "communal", "quilombola")
# --------------------------------------------------------------------------#
# 1. READ IN DATA ----
wdmain <- "/gpfs1/data/idiv_meyer/01_projects/Andrea/P1"
wd_data_formatching <- paste0(wdmain, "/inputs/00data/for_matching/forMatchAnalysis/")
wd_out <- paste0(wdmain, "/inputs/00data/for_matching/forMatchAnalysisCEM")
# 2. Load match-analysis-ready datasets ----
# (these are parcel-level datasets for the extent of all Brazil that include joined data from Ruben's extractions of variables to be matched on)
setwd(wd_data_formatching)
input <- list.files()
i=as.integer(Sys.getenv('SGE_TASK_ID'))
dataset <- readRDS(input[i])
n <- gsub("_allAnalysisData.rds", "", input[i])
# 3. Set up tables for matching (creating dummies and separate dataframes for each match we're making) ----
# (e.g. indigenous tenure) and control (e.g. private tenure)
datalist <- dummy_cols(dataset, select_columns = "tenure")
# we need to create a table listing for each spatial-temporal scale combination of all individual matches that have to be built, e.g. indigenous against private, etc.
# create function to compare tenures: creates column where treatment is coded as 1, and control is coded as 0. everything else is coded as NA
# the function should also return a dataframe that keeps only the treatment and control observations (dropping all NA's)
# datalist original looked like: datalist[[i]][[j]] (i=extents)(j=data)
compareTenures <- function(datalist, control, treatment){
comparison_table <- datalist[,-grep("tenure_", colnames(datalist))]
comparison_table[,paste0(control, "_vs_", treatment)] <- ifelse(datalist[,paste0("tenure_", treatment)] == 1,1,
ifelse(datalist[,paste0("tenure_", control)] == 1,0,NA ) ) # give me a column that re-codes treatment and control variables
comparison_table <- drop_na(comparison_table) # give me a table that keeps only those observations which I'm specifically compariing (not NA's)
return(comparison_table)
}
# create function to apply compareTenures to all tenure forms
# returns a table with only one column that specifies the control compared to the treatment. e.g. public_vs_private
createTable_control_vs_treatment <- function(match_list, control) {
table_c_vs_t <- list()
# for(i in 1:length(match_list)) # for each extent (whether that's spatial or temporal)
# {
for(j in 1:length(treatmentVars)) # for each tenure type (except the one you're comparing to)
{
if(treatmentVars[j] != control) {
if(match(treatmentVars[j], gsub("tenure_", "", colnames(match_list)), nomatch = 0) != 0 ){
table_c_vs_t[[length(table_c_vs_t)+1]] <- compareTenures(match_list, control, treatmentVars[j])
names(table_c_vs_t)[length(table_c_vs_t)] <- paste0(n, "_", control, "_", treatmentVars[j])
}
}
}
# }
return(table_c_vs_t) # this should return all dataframes needed for matching, within this control established
}
# create function to apply "createTable_control_vs_treatment" for all controls by looping through our pre-established controlVars
loopThruControls <- function(match_extents_list,controlVars) {
tableForMatching <- list()
for(i in 1:length(controlVars))
{
tableForMatching[[i]] <- createTable_control_vs_treatment(match_extents_list, controlVars[i])
}
names(tableForMatching) <- controlVars
return(tableForMatching)
}
mydataset <- loopThruControls(datalist, controlVars)
# write data to be matched on
setwd(wd_out)
for(i in 1:length(mydataset))
{
for(j in 1:length(mydataset[[i]]))
{
write_csv(mydataset[[i]][[j]], paste0(names(mydataset[[i]][j]), ".csv"))
}
}
|
51fdfa74eaf3c68a80630ee776030dc7a8bbad0d | 6121b2499765e3c2ec502f62e4e5840bc43830ab | /courseproject1.R | f007097b9b2c1f90bfc493029f4eb265215328fe | [] | no_license | iceman1265/ExData_Plotting1 | 75325401976ccd5f5b1eda1b281d6f7de493f77c | 2b66e1e766d9fe973ef875b51a12377b767d7dbb | refs/heads/master | 2021-01-22T05:42:37.638619 | 2014-12-07T18:37:01 | 2014-12-07T18:37:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,929 | r | courseproject1.R | #downloading the file used for the purposes of this assignment
if(!file.exists("exdata-data-household_power_consumption.zip")) {
#storing the download into a temporary file on the system
temp <- tempfile()
#downloading the actual file by pointing R to the file URL
download.file("http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip",temp)
#unzipping the file
file <- unzip(temp)
unlink(temp)
}
#reading the data with R
power <- read.table(file, header=T, sep=";")
#formatting the date of the data
power$Date <- as.Date(power$Date, format="%d/%m/%Y")
#specifying the certain time periods used in this assignment
df <- power[(power$Date=="2007-02-01") | (power$Date=="2007-02-02"),]
df$Global_active_power <- as.numeric(as.character(df$Global_active_power))
df$Global_reactive_power <- as.numeric(as.character(df$Global_reactive_power))
df$Voltage <- as.numeric(as.character(df$Voltage))
#formatting the time
df <- transform(df, timestamp=as.POSIXct(paste(Date, Time)), "%d/%m/%Y %H:%M:%S")
#specifying which columns of interest are neededed and storing them in a data frame
df$Sub_metering_1 <- as.numeric(as.character(df$Sub_metering_1))
df$Sub_metering_2 <- as.numeric(as.character(df$Sub_metering_2))
df$Sub_metering_3 <- as.numeric(as.character(df$Sub_metering_3))
#plotting the data, changing certain variables such as the title to Global Active
#Power and changing the color of the ploat to red. Specying the width and height
#of the plot as well. Plotting Global Active Power against frequency. Saves the image
#in a file called Plot1.png in the active working directory
plot1 <- function() {
hist(df$Global_active_power, main = paste("Global Active Power"), col="red", xlab="Global Active Power (kilowatts)")
dev.copy(png, file="plot1.png", width=480, height=480)
dev.off()
cat("Plot1.png has been saved in", getwd())
}
plot1()
#creating plot 2 and setting the variables. Creating a plot that maps Global active power against
#specific days of the week and saving the image in a file called Plot2.png
plot2 <- function() {
plot(df$timestamp,df$Global_active_power, type="l", xlab="", ylab="Global Active Power (kilowatts)")
dev.copy(png, file="plot2.png", width=480, height=480)
dev.off()
cat("plot2.png has been saved in", getwd())
}
plot2()
#plotting the data and adjusting the various variables such as color, width and height. This plot
#maps the daata Energy sub metering against the day of the week and saves the resulting plot in a
#file within the working directory called plot3.png
plot3 <- function() {
plot(df$timestamp,df$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(df$timestamp,df$Sub_metering_2,col="red")
lines(df$timestamp,df$Sub_metering_3,col="blue")
legend("topright", col=c("black","red","blue"), c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 "),lty=c(1,1), lwd=c(1,1))
dev.copy(png, file="plot3.png", width=480, height=480)
dev.off()
cat("plot3.png has been saved in", getwd())
}
plot3()
#creating four smaller plot graphics that inlude Global Acitve power against the day of the week,
#Voltage against the day of the week, Energy sub metering against a specified date and time,
#and global reactive power against specified date and time periods. The function also
#sets certain characteristics of the plot, such as color and which data inputs to use.
plot4 <- function() {
par(mfrow=c(2,2))
#plot 1 which maps Global Active Power against specified date and time stamps
plot(df$timestamp,df$Global_active_power, type="l", xlab="", ylab="Global Active Power")
#Plot 2 maps voltage data against specified date and time stamp from the dataframe
plot(df$timestamp,df$Voltage, type="l", xlab="datetime", ylab="Voltage")
#plot3 maps Energy sub metering data against the date and time data frame
plot(df$timestamp,df$Sub_metering_1, type="l", xlab="", ylab="Energy sub metering")
lines(df$timestamp,df$Sub_metering_2,col="red")
lines(df$timestamp,df$Sub_metering_3,col="blue")
legend("topright", col=c("black","red","blue"), c("Sub_metering_1 ","Sub_metering_2 ", "Sub_metering_3 "),lty=c(1,1), bty="n", cex=.5) #bty removes the box, cex shrinks the text, spacing added after labels so it renders correctly
#plot 4 maps Global Reactive Power against the specified date and time dataframe
plot(df$timestamp,df$Global_reactive_power, type="l", xlab="datetime", ylab="Global_reactive_power")
#saves the plots into a file called Plot4.png in the working directory folder
dev.copy(png, file="plot4.png", width=480, height=480)
dev.off()
cat("plot4.png has been saved in", getwd())
}
plot4()
|
d1309d7f44cf09699a0650047388ae899e1f7082 | 3c5ef044f03173522b64acd205ed389cdeda9dfb | /myncurve.R | 3f9abe2cf8ca58aa11d69d1cb669584ced486305 | [] | no_license | paigefrey/MATH4753freyy | 02295b8f0aef4ff9f835245313a58a7c64019c11 | e99ef9725bbcc0e0e1670671705a077dbea732df | refs/heads/master | 2023-03-27T11:05:48.340069 | 2021-03-16T16:33:16 | 2021-03-16T16:33:16 | 334,574,337 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 202 | r | myncurve.R | #' @Title myncurve
#'
#' @param mu
#' @param sigma
#'
#' @return
#' @export
#'
#' @examples
myncurve = function(mu, sigma){
curve(dnorm(x,mean=mu,sd=sigma), xlim = c(mu-3*sigma, mu + 3*sigma))
}
|
f0af845219d0d652a10d02b6102282ad1f299d2e | ed823b6da656fb94805c7ff74dfd7b921c5624c9 | /man/define_feature.Rd | 69bbf7c7b252a2213d4960722cc97689a025ca03 | [] | no_license | vallotlab/ChromSCape | cbde454c903445706e75b27aade45a7a68db5986 | 382eac1015cd7f67e448124faf5a917f4c973aa1 | refs/heads/master | 2023-03-15T20:18:37.915065 | 2023-03-13T16:46:50 | 2023-03-13T16:46:50 | 191,729,569 | 11 | 5 | null | 2019-07-03T13:06:05 | 2019-06-13T09:10:39 | R | UTF-8 | R | false | true | 914 | rd | define_feature.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preprocessing_filtering_reduction.R
\name{define_feature}
\alias{define_feature}
\title{Define the features on which reads will be counted}
\usage{
define_feature(ref = c("hg38","mm10")[1],
peak_file = NULL,
bin_width = NULL,
genebody = FALSE,
extendPromoter = 2500)
}
\arguments{
\item{ref}{Reference genome}
\item{peak_file}{A bed file if counting on peaks}
\item{bin_width}{A number of bins if divinding genome into fixed width bins}
\item{genebody}{A logical indicating if feature should be counted in
genebodies and promoter.}
\item{extendPromoter}{Extension length before TSS (2500).}
}
\value{
A GRanges object
}
\description{
Define the features on which reads will be counted
}
\examples{
gr_bins = define_feature("hg38", bin_width = 50000)
gr_genes = define_feature("hg38", genebody = TRUE, extendPromoter = 5000)
}
|
7d7716f6961d8a04b35cf331f11400ab29ce9967 | 9f0fb18e5f8762999ab0c9fb47082252a9da1536 | /man/check_gender_balance.Rd | 5ad5b4e1280b3d6972b1440a0b9c0313ab41f409 | [
"MIT"
] | permissive | cforgaci/groupmaker | ceed24fec44976e056f35f7fc0c6121be8910943 | e7dc8be60d6ae8d5f9ceacc03267eed217028d09 | refs/heads/master | 2023-09-02T03:15:34.125002 | 2021-10-10T18:32:36 | 2021-10-10T18:32:36 | 385,901,289 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 585 | rd | check_gender_balance.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/check_gender_balance.R
\name{check_gender_balance}
\alias{check_gender_balance}
\title{Check Gender Balance in a Group}
\usage{
check_gender_balance(students)
}
\arguments{
\item{students}{A data frame.}
}
\value{
Return a message and the value TRUE if gender distribution is OK, FALSE otherwise.
}
\description{
After groups have been made, check gender balance.
}
\examples{
# Check gender balance on the example dataset included in the makegroups package
check_gender_balance(make_groups(students, 3))
}
|
44980a84815739c4e17de3c07c69891f41377c14 | 688185e8e8df9b6e3c4a31fc2d43064f460665f1 | /man/frames.time.Rd | 5b25139af8e1889740753e59ef68229207491ef2 | [] | no_license | IPS-LMU/emuR | 4b084971c56e4fed9032e40999eeeacfeb4896e8 | eb703f23c8295c76952aa786d149c67a7b2df9b2 | refs/heads/master | 2023-06-09T03:51:37.328416 | 2023-05-26T11:17:13 | 2023-05-26T11:17:13 | 21,941,175 | 17 | 22 | null | 2023-05-29T12:35:55 | 2014-07-17T12:32:58 | R | UTF-8 | R | false | true | 942 | rd | frames.time.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dfuns.R
\name{frames.time}
\alias{frames.time}
\title{Find the time and position of a data element.}
\usage{
frames.time(dataset, datanum)
}
\arguments{
\item{dataset}{A dataset returned by \code{track} or \code{frames}.}
\item{datanum}{An integer, an index into the \code{data} component of
\code{dataset}.}
}
\value{
The segment number which contains the element \code{datanum} of
\code{dataset$data}.
}
\description{
Finds the time and position of a data element.
}
\details{
The dataset returned from \code{track} or \code{frames} consists of a
matrix of data (the \code{data} component) and two index components
(\code{index} and \code{ftime}). The data for all segments is concatenated
together in \code{$data}. This function can be used to find out which
segment a particular row of \code{$data} corresponds to.
}
\seealso{
track, frames
}
\keyword{misc}
|
39f779b25a036ffa0bafcc133647c7d29c84e1d8 | a93ce2c4d41b132423353841220fb54d1c487e63 | /plot1.R | 1e1fac9c57d99b07cf273e7ada0b17deb314db00 | [] | no_license | Rian-Kurnia/ExData_Plotting1 | 25676fceaa3d9873e62d9e8c2dd5956b220204ec | 5aa1608ba9b3a134d344814784bde967c397cfca | refs/heads/master | 2020-03-17T03:16:59.211403 | 2018-05-14T02:50:01 | 2018-05-14T02:50:01 | 133,228,258 | 0 | 0 | null | 2018-05-13T10:51:03 | 2018-05-13T10:51:03 | null | UTF-8 | R | false | false | 342 | r | plot1.R | library(sqldf)
fi <- file('household_power_consumption.txt')
df <- sqldf("select * from fi where Date in ('1/2/2007','2/2/2007')", file.format =
list(header=TRUE, sep=";"))
close(fi)
png('plot1.png')
hist(df$Global_active_power,col="Red",xlab='Global Active Power (kilowatts)',
main='Global Active Power')
dev.off() |
b13cbe47bd2f37e9362c2de1062f72515f4d9919 | bab87157294070473c5f1e7864f0316e4d1e496d | /R/get_addresses.R | e13f13c0d3dc1c87f309de01fed5f606b69ff655 | [
"MIT"
] | permissive | Amice13/drv | a736febb6c575ba2e63b43aa6a5961129ee8618f | bbb24764624e50ef2d93ad0cf9c1b6fd2f65d44a | refs/heads/main | 2023-03-16T04:01:52.576507 | 2021-03-14T12:10:13 | 2021-03-14T12:10:13 | 347,479,962 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,976 | r | get_addresses.R | get_addresses <- function(id) {
#' Get the list of addresses of the specified territory
#'
#' Returns a data frame with information about territories in the
#' specified region.
#'
#' The data frame contains the following variables
#'
#' \itemize{
#' \item \strong{ATO_Id} - a territory identifier
#' \item \strong{Geon_Id} - a street id
#' \item \strong{Geon_Name} - a name of the street
#' \item \strong{Geon_OldNames} - old names of the street
#' \item \strong{Bld_ID} - a building identifier
#' \item \strong{Bld_Area} - a building area
#' \item \strong{Bld_PS} - a polling station which the building belongs to
#' \item \strong{Bld_Flats} - a number of flats in the building
#' \item \strong{Bld_Ind} - an address index
#' \item \strong{Bld_Korp} - an address block
#' \item \strong{Bld_Num} - an address number
#' }
#'
#' @param id a territory identifier from \strong{ATO_Id} in \strong{get_territories}
#' @export
# Assign constants
user_agent <- httr::user_agent("http://github.com/amice13/drv")
headers <- httr::add_headers('Content-Type'='text/xml;charset=UTF-8')
base_url <- "https://www.drv.gov.ua/ords/svc/personal/API/Opendata"
encoding <- "UTF-8"
soap <- "
<soap:Envelope xmlns:soap=\"http://www.w3.org/2003/05/soap-envelope\" xmlns:drv=\"http://www.drv.gov.ua/\">
<soap:Header/>
<soap:Body>
<drv:GetAdrReg>
<drv:AdrRegParams>
<drv:ATO_ID>REPLACE</drv:ATO_ID>
</drv:AdrRegParams>
</drv:GetAdrReg>
</soap:Body>
</soap:Envelope>"
soap <- gsub("REPLACE", id, soap)
result <- httr::POST(base_url, user_agent, headers, body = soap)
xml_content <- httr::content(result, "text", encoding = encoding)
if (grepl("XXXXX|QUERRY_RESULT>-1", xml_content)) {
warning("The provided ID is wrong!")
return(FALSE)
}
xml_data <-xml2::read_xml(xml_content)
xml_find <- xml2::xml_find_all(xml_data, ".//d:GEONIM")
xml_res <- lapply(xml_find, function (datum) {
general <- xml2::as_list(datum, recursive = F)
Geon_Id <- general$Geon_Id[[1]]
Geon_Name <- general$Geon_Name[[1]]
if (length(general$Geon_OldNames) > 0) {
Geon_OldNames <- general$Geon_OldNames
} else {
Geon_OldNames <- NA
}
datum <- xml2::xml_children(datum)
builds_find <- xml2::xml_find_all(datum, ".//d:BUILD")
builds_list <- lapply(xml2::as_list(builds_find), function(x) {
x[["Geon_Id"]] <- Geon_Id
x[["Geon_Name"]] <- Geon_Name
x[["Geon_OldNames"]] <- Geon_OldNames
x[["ATO_Id"]] <- id
for (name in names(x)) {
if (length(x[[name]]) == 0) x[[name]] <- NA
}
unlist(x)
})
if (length(builds_list) == 0) return(F)
n <- names(builds_list[[1]])
d <- as.data.frame(builds_list, stringsAsFactors = FALSE)
d <- as.data.frame(t(d), stringsAsFactors = FALSE, row.names = F)
names(d) <- n
d
})
data <- do.call(rbind, xml_res)
data
}
|
5d767ed8a690ab3ec6f69729ea8c23162b17c408 | 77adeb996aa86cf4c27c51fd2eb66a17adaa95e1 | /2018 - 01/03. 시계열분석/2018-05-08 (2) 수업.R | 42dd70c6be3599a0cd70f079d8f84dbc126244dc | [] | no_license | ajskdlf64/Bachelor-of-Statistics | 44e5b5953ac0c17406bfc45dd868efbfab18e70f | bc7f92fce9977c74c09d4efb0ead35e2cd38e843 | refs/heads/master | 2021-07-20T00:22:26.981721 | 2021-07-14T08:47:43 | 2021-07-14T08:47:43 | 220,665,955 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,701 | r | 2018-05-08 (2) 수업.R | library(forecast)
ex7_5d <- scan("C:/Users/user/Desktop/학교수업/시게열분석/수업 자료/ex7_5d.txt")
ex7_5d.ts <- ts(ex7_5d)
plot(ex7_5d.ts)
acf(ex7_5d.ts)
pacf(ex7_5d.ts)
# 차분 실시
ndiffs(ex7_5d.ts) # 단위근 검정 결과는 1차 차분
ex7_5d2 <- diff(ex7_5d)
ex7_5d2.ts <- ts(ex7_5d2)
plot(ex7_5d2.ts)
acf(ex7_5d2)
pacf(ex7_5d2)
# 잠정모형 : ARIMA(1,1,1), ARIMA(2,1,1), ARIMA(1,1,2), ARIMA(2,1,2)
Arima(ex7_5d,order=c(1,1,1),include.drift = TRUE)$aic
Arima(ex7_5d,order=c(1,1,2),include.drift = TRUE)$aic
Arima(ex7_5d,order=c(2,1,1),include.drift = TRUE)$aic
Arima(ex7_5d,order=c(2,1,2),include.drift = TRUE)$aic
# include.drift = TRUE은 d가1 일 때, include.mean = TRUE은 d가 0 일 때.
fit5 <- Arima(ex7_5d,order=c(2,1,1),include.drift = TRUE)
confint(fit5)
fit5.1 <- Arima(ex7_5d,order=c(2,1,1),include.drift = TRUE, fixed=c(0,NA,NA,NA))
confint(fit5.1)
# 과대적합해도 추가된 모수가 모두 비유의적 따라서 최종모형 ARIMA(2,1,1) 이다.
fit5.2 <- Arima(ex7_5d,order=c(3,1,1),include.drift = TRUE, fixed=c(0,NA,NA,NA,NA))
confint(fit5.2)
fit5.3 <- Arima(ex7_5d,order=c(2,1,2),include.drift = TRUE, fixed=c(0,NA,NA,NA,NA))
confint(fit5.3)
auto.arima(ex7_5d, ic="bic")
# auto arima 는 디폴트값이 aic가 최소가 되는 값을 찾는건데, ic="bic"를 쓰면 기준값이 bic 이다.
auto.arima(ex7_5d, stepwise = FALSE)
fit5.4 <- Arima(ex7_5d,order=c(1,1,3),include.drift = TRUE, fixed=c(NA,NA,0,NA,NA))
confint(fit5.4)
# ARIMA(1,1,3) wiht ma2=0 이 최종모형.
# 시작점에 따라 최종모형이 여러개가 나오는데, 결국에는 BIC와 AIC 값을 비교해서 최종적인 모형을 선택한다. |
ebf6efdbc7a4279d8f221c4ffddccbda32ae5190 | b0019ad01d4080516e7ed5c2c27926d33ba46ce5 | /R/getCount.r | 65e661c8fcdd69293d725fdf73666177556a1e14 | [
"MIT"
] | permissive | dbescond/Artemis | e8dc4fbfd4adb3e438ba0fe17b420c16d9efde91 | 0ab905206e4ab366a5063470871e1c9d1349de0d | refs/heads/master | 2021-01-21T19:51:04.459772 | 2017-05-23T10:41:50 | 2017-05-23T10:41:50 | 46,415,914 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,940 | r | getCount.r | #' Query Data ILOSTAT count
#'
#' Query data from ILOSTAT SDMX API
#'
#' Helper function to efficiently query data from ILOSTAT SDMX API.
#'
#' @param DSD a datastructure definition identified by the triplet \code{[collection; country; indicator]}. Arguments description come from 'http://www.ilo.org/ilostat/content/conn/ILOSTATContentServer/path/Contribution Folders/statistics/web_pages/static_pages/technical_page/ilostat_appl/SDMX_User_Guide.pdf' .
#' @author ILO bescond
#' @keywords ILO, SDMX, R
#' @seealso \code{\link{getCodelist}} \code{\link{getDataStructure}} \code{\link{getData}}
#' @export
#' @import xml2
#' @examples
#' ################################## use to check data available
#'
#' # example with attribute
#' res <- getCount("YI_AFG_EMP_TEMP_SEX_AGE_NB/....")
#'
#' # example without attribute
#' res <- getCount("YI_AFG_ALL/.....?detail=dataonly")
#'
#' # example of last N data
#' res <- getCount("YI_AFG_EMP_TEMP_SEX_AGE_NB/.....?lastNObservations=1")
#'
#' # example of first N data
#' res <- getCount("YI_AFG_EMP_TEMP_SEX_AGE_NB/.....?firstNObservations=2")
#'
#' # example with multi country
#' res <- getCount("YI_ALL_EMP_TEMP_SEX_AGE_NB/.MEX+ESP")
#'
#' # check availability of time series
#' res <- getCount("YI_ALL_EMP_TEMP_SEX_AGE_NB/.....?detail=serieskeysonly")
#' ### as from 2009
#' res <- getCount("YI_ALL_EMP_TEMP_SEX_AGE_NB/.....?startPeriod=2009-01-01&detail=serieskeysonly")
getCount <- function( DSD,
test = "-test"){
Detail <- grep("\\?", DSD)%in%1 ; if(length(Detail)%in%0) {Detail <- FALSE}
DSD <- ifelse( str_detect(DSD,"[?]"),
paste0(DSD, "&format=compact_2_1"),
paste0(DSD, "?format=compact_2_1"))
# set if SeriesKeysOnly is requested (NO Obs, No Attrs)
SeriesKeysOnly <- grep("DETAIL=SERIESKEYSONLY", toupper(DSD))%in%1 ; if(length(SeriesKeysOnly)%in%0) {SeriesKeysOnly <- FALSE}
# set if DataOnly are requested (No Attrs)
DataOnly <- grep("DETAIL=DATAONLY", toupper(DSD))%in%1 ; if(length(DataOnly)%in%0){DataOnly <- FALSE}
if(Detail & !SeriesKeysOnly & !DataOnly){
DSD <- paste0(DSD,"&detail=dataonly")
DataOnly = TRUE
}
if(!Detail){
DSD <- paste0(DSD,"?detail=dataonly")
DataOnly = TRUE
}
X <- try(
read_xml(paste0("http://www.ilo.org/ilostat/sdmx",test,"/ws/rest/data/ILO,DF_",DSD)),
silent = TRUE)
# test error message
if(substr(X[1], 1, 5)%in%"Error"){
return(NULL)
}
# extract namespace of the xml doc
ns <- xml_ns(X)
# test dataset exist
if(length(xml_find_all(X, ".//message:DataSet", ns))==0){
return(NULL)
}
if(DataOnly){
length(xml_find_all(X, ".//Obs", ns))
} else {
length(xml_find_all(X, ".//Series", ns))
}
}
nths <- function (x, n, order_by = NULL, default = default_missing(x))
{
n <- trunc(n)
if (n == 0 || n > length(x)) {
return(default)
}
if (is.null(order_by)) {
x[[n]]
}
else {
x[[order(order_by)[n]]]
}
}
|
4b3c82da7ef6d9916cb69a4d2b34c4e91e74c99a | 2bec5a52ce1fb3266e72f8fbeb5226b025584a16 | /stocks/man/sharpe.Rd | 8cd7bb1ce458201dac38c61fa0f9448fa40dc7ec | [] | no_license | akhikolla/InformationHouse | 4e45b11df18dee47519e917fcf0a869a77661fce | c0daab1e3f2827fd08aa5c31127fadae3f001948 | refs/heads/master | 2023-02-12T19:00:20.752555 | 2020-12-31T20:59:23 | 2020-12-31T20:59:23 | 325,589,503 | 9 | 2 | null | null | null | null | UTF-8 | R | false | true | 891 | rd | sharpe.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sharpe.R
\name{sharpe}
\alias{sharpe}
\title{Sharpe Ratio}
\usage{
sharpe(gains = NULL, prices = NULL, rf = 0)
}
\arguments{
\item{gains}{Numeric matrix with 1 column of gains for each investment (can
be a vector if there is only one).}
\item{prices}{Numeric matrix with 1 column of prices for each investment (can
be a vector if there is only one).}
\item{rf}{Numeric value.}
}
\value{
Numeric value.
}
\description{
Calculates Sharpe ratio from vector of gains or prices. The formula is:
\code{(mean(gains) - rf) / sd(gains)}, where \code{rf} is some risk-free rate
of return.
}
\examples{
# Simulate daily gains over a 5-year period
set.seed(123)
stock.gains <- rnorm(252 * 5, 0.0005, 0.01)
# Calculate Sharpe ratio using risk-free return of 0
sharpe(stock.gains)
}
|
62a0e4d73e097933689d7c6e6e6aabc73dcf5c22 | 36245deb4a0db810e1faa88c024ccce6fd1321e4 | /man/update_rho_inexact.Rd | 3a2fca0e1f60ce96b4b6cdbbef767dcbd10ccef9 | [] | no_license | cran/multiband | 1dde88f5ea1896803ec62d1659fcb3ce2b825627 | 279beb62847a86157b1692012b02472da6ebe9fa | refs/heads/master | 2021-01-10T18:41:11.297675 | 2014-12-18T00:00:00 | 2014-12-18T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,844 | rd | update_rho_inexact.Rd | % Generated by roxygen2 (4.0.1): do not edit by hand
\name{update_rho_inexact}
\alias{update_rho_inexact}
\title{Update Phase parameter}
\usage{
update_rho_inexact(tms, beta, a, rho, omega, gamma, max_iter = 5)
}
\arguments{
\item{tms}{list of matrices whose rows are the triple (t,mu,sigma) for each band.}
\item{beta}{vector of the current intercept estimates}
\item{a}{amplitude estimates}
\item{rho}{vector of the current estimates of the phase}
\item{omega}{frequency}
\item{gamma}{nonnegative regularization parameter}
\item{max_iter}{maximum number of iterations}
}
\description{
\code{update_rho_inexact} inexactly updates the phase parameter rho via an MM algorithm using a convex quadratic majorization.
}
\examples{
test_data <- synthetic_multiband()
tms <- test_data$tms
B <- test_data$B
beta <- test_data$beta
a <- test_data$a
rho <- test_data$rho
omega <- test_data$omega
gamma <- 1
## Check answer
rho_next <- update_rho_inexact(tms,beta,a,rho,omega,gamma,max_iter=1)
L <- update_Lipschitz(tms,beta,a)
f <- L + gamma
zeta <- update_zeta(tms,beta,a,rho,L,omega)
rho_direct <- solve(diag(f)-(gamma/B),zeta)
norm(as.matrix(rho_direct-rho_next),'f')
## Verify monotonicity of MM algorithm
max_iter <- 1e2
obj <- double(max_iter)
loss <- double(max_iter)
rho_last <- rho
at <- rep(1/sqrt(B),B)
for (iter in 1:max_iter) {
rho_next <- update_rho_inexact(tms,beta,a,rho_last,omega,gamma,max_iter=1)
obj[iter] <- mm_phase_obj(rho_next,tms,beta,a,at,rho_last,omega,gamma,gamma)
loss[iter] <- pnll(tms,beta,a,at,rho_next,omega,gamma,gamma)
rho_last <- rho_next
}
obj <- c(mm_phase_obj(rho,tms,beta,a,at,rho,omega,gamma,gamma),obj)
plot(1:(max_iter+1),obj,xlab='iteration',ylab='mm objective',pch=16)
loss <- c(pnll(tms,beta,a,at,rho,omega,gamma,gamma),loss)
plot(1:(max_iter+1),loss,xlab='iteration',ylab='loss',pch=16)
}
|
f17efd5277ef167bd79e9d059e12524015483bad | 4f2743db548d08f57ec5c441011d94c28aa0ccac | /man/comments.model.Rd | 242f4965a5650a8af04803afba98b90dee34fa9f | [] | no_license | bergsmat/nonmemica | 85cdf26fa83c0fcccc89112c5843958669373a2a | 8eddf25fdd603a5aca719a665c5b9475013c55b3 | refs/heads/master | 2023-09-04T06:10:48.651153 | 2023-08-28T13:23:18 | 2023-08-28T13:23:18 | 78,268,029 | 4 | 1 | null | null | null | null | UTF-8 | R | false | true | 963 | rd | comments.model.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/model.R
\name{comments.model}
\alias{comments.model}
\title{Extract Comments from Model}
\usage{
\method{comments}{model}(
x,
fields = c("symbol", "unit", "label"),
expected = character(0),
na = NA_character_,
tables = TRUE,
...
)
}
\arguments{
\item{x}{model}
\item{fields}{data items to scavenge from control stream comments}
\item{expected}{parameters known from NONMEM output}
\item{na}{string to use for NA values when writing default metafile}
\item{tables}{whether to include table comments}
\item{...}{passed arguments}
}
\value{
data.frame
}
\description{
Extracts comments from model.
}
\examples{
library(magrittr)
options(project = system.file('project/model',package='nonmemica'))
1001 \%>\% as.model \%>\% comments
}
\seealso{
Other comments:
\code{\link{comments.inits}()},
\code{\link{comments.items}()},
\code{\link{comments}()}
}
\concept{comments}
|
9575f2921eb62ab8b40a1a43113be5b6a30a71a3 | 50db6eed13fe6a33d72096f12d20e91739082834 | /functions.R | a11b865d31d21cf5b8a567cd5280d6e846876b95 | [] | no_license | randomornot/kaggle-amazon | 00631915f9d0db2bfdfc8454de00bc5cb2471b55 | e2ae8be98a55ad5ff4ab49f5360e20279c11a319 | refs/heads/master | 2021-01-10T05:38:07.512406 | 2015-10-17T20:01:31 | 2015-10-17T20:01:31 | 44,452,218 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,023 | r | functions.R | get_correlation_between_two_features <- function(dataset, feature_1, feature_2)
{
df_for_corr = xtabs(~ (feature_1) + (feature_2), data = as.character(dataset))
return(assocstats(df_for_corr))
}
cross_validate = function(training_dataset_to_use, k, model_name)
{
x = sample(k, dim(training_dataset_to_use)[1], replace = TRUE)
auc_vec = vector("numeric", length = k)
if(model_name == "logistic")
{
for(i in 1:k)
{
training_set = training_dataset_to_use[x != i, ]
test_set = training_dataset_to_use[ x == i, ]
logistic_mod = glm(ACTION ~ ., family = "binomial", data = training_set)
predicted_test = predict.glm(logistic_mod, test_set[, -1])
pred_roc = prediction(predicted_test, test_set[, 1])
auc.perf = performance(pred_roc, measure = "auc")
auc_vec[i] = as.vector(auc.perf@y.values)
print(auc_vec[i])
}
}
else if(model_name == "rf")
{
for(i in 1:k)
{
training_set = training_dataset_to_use[x != i, ]
test_set = training_dataset_to_use[ x == i, ]
rf_mod <- randomForest(ACTION ~ ., data = training_set )
predicted_test = predict(rf_mod, test_set[,-1])
pred_roc = prediction(predicted_test, test_set[, 1])
auc.perf = performance(pred_roc, measure = "auc")
auc_vec[i] = as.vector(auc.perf@y.values)
print(auc_vec[i])
}
}
else if(model_name == "svm")
{
for(i in 1:k)
{
training_set = training_dataset_to_use[x != i, ]
test_set = training_dataset_to_use[ x == i, ]
svm_mod <- svm(ACTION ~ ., data = training_set )
predicted_test = predict(svm_mod, test_set[,-1])
pred_roc = prediction(predicted_test, test_set[, 1])
auc.perf = performance(pred_roc, measure = "auc")
auc_vec[i] = as.vector(auc.perf@y.values)
#print(auc_vec[i])
}
}
return(mean(sapply(auc_vec, sum)))
}
|
348acf558c45bbdb31150264305fc6919a214e62 | 015558bad7fedcf6530bb22055e385f9140246bb | /man/plot_phenofit.Rd | c728aa3694be79c3bfed616c2a863d3a5e0faf43 | [] | no_license | geogismx/phenofit | 25e3a79f19465388bdfbc334d7336bbd24c166c4 | 388611631581c0160ee6d4b2f28ab6f6306fd1a3 | refs/heads/master | 2020-03-19T09:24:56.718135 | 2018-05-15T09:08:12 | 2018-05-15T09:08:12 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 318 | rd | plot_phenofit.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/PhenoExtract_main.R
\name{plot_phenofit}
\alias{plot_phenofit}
\title{plot_phenofit}
\usage{
plot_phenofit(fit, d, title = NULL, show.legend = T, plotly = F)
}
\arguments{
\item{fit}{data from phenofit_site}
}
\description{
plot_phenofit
}
|
c1147514c5be5a92907424f982bacbf5415049a2 | 0ae38ac8a376952e862a5c7ce1d4041eb8d48e8f | /R_analysis/promoter_breadth/Motif_cooccurance_heatmap.r | 89797c4893d7586eb060bff9e169f7515dbf595d | [] | no_license | rtraborn/Daphnia_CAGE_Data | 69657b89d2ff0bed64e89d5f2d4e457df7016d10 | c0c539e16d0fdee4b9c9ca4b5548f22913e48f37 | refs/heads/master | 2021-06-01T09:48:49.296119 | 2016-06-28T19:39:44 | 2016-06-28T19:39:44 | 29,452,502 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 950 | r | Motif_cooccurance_heatmap.r |
library(gplots)
library(RColorBrewer)
setwd("/home/rtraborn/Daphnia/Daphnia_CAGE_Data/R_analysis/promoter_calling_pipelines/TCO/tagClusters/pooled_samples")
promoter_comp <- read.table(file="Dpm_core_matrix.logPvalue.matrix.txt", skip=1, header=FALSE,sep="\t",stringsAsFactors = FALSE)
colnames(promoter_comp) <- c("MotifID","Dpm1","Dpm2","Dpm3","Dpm4","Dpm5","Dpm6","Dpm7","Dpm8")
row_in <- promoter_comp[,1]
promoter_comp <- promoter_comp[,-1]
rownames(promoter_comp) <- colnames(promoter_comp)
promoter_comp
promoter_comp_m <- as.matrix(promoter_comp)
head(promoter_comp_m)
is.matrix(promoter_comp_m)
r1 <- range(promoter_comp_m) - median(promoter_comp_m)
r1
hmcol<-brewer.pal(11,"RdBu")
par(mar=c(4.1,4.1,4.1,4.1))
png("Dpm_motifs_correlation.png",bg = "transparent",width= 1000, height = 1000, units = "px")
heatmap.2(promoter_comp_m,trace="none",notecol="black",col=colorRampPalette(c("red","white","blue"))(100))
dev.off()
|
4e72322ca34aa16efed69ff0e777d416f745cad9 | 68b22104cc432f679a89643d899468e86667b6f2 | /505/mahattan_plot_fatty_acid_all.R | 095a214365032af51122fdbeb75a073d489d14c9 | [] | no_license | lamyusam/KIAT | 7a38aec5b5bc61400f9fc83715ae7ce43413e41d | cf303fdd440b301fa17bc450184b9863e0869101 | refs/heads/master | 2022-02-19T08:47:30.872287 | 2019-04-19T03:20:10 | 2019-04-19T03:20:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,799 | r | mahattan_plot_fatty_acid_all.R | # GWAS result plot
library("qqman")
Oil_content <- read.csv("/Network/Servers/avalanche.plb.ucdavis.edu/Volumes/Mammoth/Users/ruijuanli/505/output/myGAPIT/late_silique_131_sample/fatty_acid_all/GAPIT..Oil_content.GWAS.Results.csv", header = T)
# Caprylic_acid <- read.csv("/Network/Servers/avalanche.plb.ucdavis.edu/Volumes/Mammoth/Users/ruijuanli/505/output/myGAPIT/late_silique_131_sample/fatty_acid_all/GAPIT..Caprylic_acid.GWAS.Results.csv", header = T)
# Capric_acid <- read.csv("/Network/Servers/avalanche.plb.ucdavis.edu/Volumes/Mammoth/Users/ruijuanli/505/output/myGAPIT/late_silique_131_sample/fatty_acid_all/GAPIT..Capric_acid.GWAS.Results.csv", header = T)
# Lauric_acid <- read.csv("/Network/Servers/avalanche.plb.ucdavis.edu/Volumes/Mammoth/Users/ruijuanli/505/output/myGAPIT/late_silique_131_sample/fatty_acid_all/GAPIT..Lauric_acid.GWAS.Results.csv", header = T)
Myristic_acid <- read.csv("/Network/Servers/avalanche.plb.ucdavis.edu/Volumes/Mammoth/Users/ruijuanli/505/output/myGAPIT/late_silique_131_sample/fatty_acid_all/GAPIT..Myristic_acid.GWAS.Results.csv", header = T)
Pentadecanoic_acid <- read.csv("/Network/Servers/avalanche.plb.ucdavis.edu/Volumes/Mammoth/Users/ruijuanli/505/output/myGAPIT/late_silique_131_sample/fatty_acid_all/GAPIT..Pentadecanoic_acid.GWAS.Results.csv", header = T)
Palmitic_acid <- read.csv("/Network/Servers/avalanche.plb.ucdavis.edu/Volumes/Mammoth/Users/ruijuanli/505/output/myGAPIT/late_silique_131_sample/fatty_acid_all/GAPIT..Palmitic_acid.GWAS.Results.csv", header = T)
Palmitoliec_acid <- read.csv("/Network/Servers/avalanche.plb.ucdavis.edu/Volumes/Mammoth/Users/ruijuanli/505/output/myGAPIT/late_silique_131_sample/fatty_acid_all/GAPIT..Palmitoliec_aicd.GWAS.Results.csv", header = T)
Heptadecanoic_acid <- read.csv("/Network/Servers/avalanche.plb.ucdavis.edu/Volumes/Mammoth/Users/ruijuanli/505/output/myGAPIT/late_silique_131_sample/fatty_acid_all/GAPIT..Heptadecanoic_acid.GWAS.Results.csv", header = T)
Stearic_acid <- read.csv("/Network/Servers/avalanche.plb.ucdavis.edu/Volumes/Mammoth/Users/ruijuanli/505/output/myGAPIT/late_silique_131_sample/fatty_acid_all/GAPIT..Stearic_acid.GWAS.Results.csv", header = T)
Oleic_acid <- read.csv("/Network/Servers/avalanche.plb.ucdavis.edu/Volumes/Mammoth/Users/ruijuanli/505/output/myGAPIT/late_silique_131_sample/fatty_acid_all/GAPIT..Oleic_acid.GWAS.Results.csv", header = T)
vaccenic_acid <- read.csv("/Network/Servers/avalanche.plb.ucdavis.edu/Volumes/Mammoth/Users/ruijuanli/505/output/myGAPIT/late_silique_131_sample/fatty_acid_all/GAPIT..vaccenic_acid.GWAS.Results.csv", header = T)
Linoleic_acid <- read.csv("/Network/Servers/avalanche.plb.ucdavis.edu/Volumes/Mammoth/Users/ruijuanli/505/output/myGAPIT/late_silique_131_sample/fatty_acid_all/GAPIT..Linoleic_acid.GWAS.Results.csv", header = T)
Arachidic_acid <- read.csv("/Network/Servers/avalanche.plb.ucdavis.edu/Volumes/Mammoth/Users/ruijuanli/505/output/myGAPIT/late_silique_131_sample/fatty_acid_all/GAPIT..Arachidic_acid.GWAS.Results.csv", header = T)
cis_11_Eicosenoic_acid <- read.csv("/Network/Servers/avalanche.plb.ucdavis.edu/Volumes/Mammoth/Users/ruijuanli/505/output/myGAPIT/late_silique_131_sample/fatty_acid_all/GAPIT..cis_11_Eicosenoic_acid.GWAS.Results.csv", header = T)
Linolenic_acid <- read.csv("/Network/Servers/avalanche.plb.ucdavis.edu/Volumes/Mammoth/Users/ruijuanli/505/output/myGAPIT/late_silique_131_sample/fatty_acid_all/GAPIT..Linolenic_acid.GWAS.Results.csv", header = T)
Behenic_acid <- read.csv("/Network/Servers/avalanche.plb.ucdavis.edu/Volumes/Mammoth/Users/ruijuanli/505/output/myGAPIT/late_silique_131_sample/fatty_acid_all/GAPIT..Behenic_acid.GWAS.Results.csv", header = T)
Erucic_acid <- read.csv("/Network/Servers/avalanche.plb.ucdavis.edu/Volumes/Mammoth/Users/ruijuanli/505/output/myGAPIT/late_silique_131_sample/fatty_acid_all/GAPIT..Erucic_acid.GWAS.Results.csv", header = T)
colnames(Oil_content)[1:4] <- c("SNP", "CHR", "BP", "P")
colnames(Oil_content)[1:4] <- c("SNP", "CHR", "BP", "P")
colnames(bolting_time)[1:4] <- c("SNP", "CHR", "BP", "P")
png("/Network/Servers/avalanche.plb.ucdavis.edu/Volumes/Mammoth/Users/ruijuanli/505/output/figure/131_sample/bolting_flowering_oil.png", width=12, height=10, units="in", res=300)
par(mfrow=c(3,1))
manhattan(flower_time, main = "Flowering time", ylim = c(0, 8), cex = 0.6,
cex.axis = 0.9, col = c("blue4", "orange3"), supggestiveline = 5, genomewideline = F
)
manhattan(Oil_content, main = "Oil content", ylim = c(0, 8), cex = 0.6,
cex.axis = 0.9, col = c("blue4", "orange3"), suggestiveline = 5, genomewideline = F
)
manhattan(bolting_time, main = "bolting_time", ylim = c(0, 8), cex = 0.6,
cex.axis = 0.9, col = c("blue4", "orange3"), suggestiveline = 5, genomewideline = F
)
dev.off()
|
79d440c0abbc93655bac4a94760f5af4e5363269 | 546e659d183e16e0a15bab5dd32b5725b62fd8a2 | /R/yuez/man/running_sd0.Rd | 56ed167c2f2fbe1012437cca7a95d681ffac4bf3 | [] | no_license | giantwhale/yuez | 805ba103a68c417dd68a635416259272b00e8800 | 6ef56b75a2ebb3833a7c36353f32bd5b3a014389 | refs/heads/master | 2021-09-02T23:53:02.951341 | 2018-01-04T05:07:09 | 2018-01-04T05:07:09 | 116,074,134 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 500 | rd | running_sd0.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{running_sd0}
\alias{running_sd0}
\title{Running Standard Deviation assuming Mean = 0}
\usage{
running_sd0(x, w, min_size = 2L)
}
\arguments{
\item{x}{numeric vector}
\item{w}{integer, windows size, results are right aligned if w < 0, left aligned if w > 0}
\item{min_size}{if number of non-NA elements is fewer than min_size, return NA}
}
\description{
Running Standard Deviation assuming Mean = 0
}
|
6e24549bfc0e3ba44ee834d9c5ef9cc8bb50234b | 91bdd5d4d507775a52c29786d1f773415fa26586 | /R/main_plotting.R | f2b4f1c61bbd5c1f3873d380b71917365d480f6d | [] | no_license | NVE/FlomKart_ShinyApp | b49a61cb9442cd4a0a9719923e306681de33d451 | 2b883729714b5e1a9a3a3dc75a6d0d6000499448 | refs/heads/master | 2021-01-18T22:19:48.080878 | 2018-01-26T13:03:10 | 2018-01-26T13:03:10 | 72,442,114 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,907 | r | main_plotting.R | # Plotting function for the ShinyApp
#' plot4server
#' @description plot fitted probability density function to estimated empirical pdf
#' @param dat
#' @param param
#' @param distr.index
#'
#' @return Returns nothing else than a plot, saves nothing
#' @importFrom nsRFA f.genlogis
#' @importFrom nsRFA f.gamma
#' @importFrom evd dgev
#' @importFrom evd dgumbel
#' @importFrom stats dgamma
#' @export
#'
#' @examples
plot4server <- function(dat, param, distr.index = 1) {
xmax <- max(dat)*1.2
x <- seq(0, xmax, xmax / 100)
# distr <- distr.name[distr.index]
# Distribution specific y vector
# PB: there is some logic erro with the NA management here. The app works, but this could be improved
if(distr.index == 1 && all(is.na(param)) == FALSE) y <- dgumbel(x, param[1], param[2])
if(distr.index == 2 && all(is.na(param)) == FALSE) y <- dgamma(x, param[1], param[2])
if(distr.index == 3 && all(is.na(param)) == FALSE) y <- evd::dgev(x, param[1], param[2], param[3])
if(distr.index == 4 && all(is.na(param)) == FALSE) y <- f.genlogis(x, param[1], param[2], param[3])
if(distr.index == 5 && all(is.na(param)) == FALSE) y <- f.gamma(x, param[1], param[2], param[3])
ymax <- max( max(na.omit(y)), max(na.omit(density(dat)$y)) ) * 1.1
# Plotting input dat, this is common to all distributions
hist(dat, xlab = "Flood discharge (m3/s)",ylab = "Probability density",freq = FALSE,
breaks = seq(0, xmax, xmax / 15), col = "gray", main = NULL, xlim = c(0, xmax), ylim = c(0, ymax))
par(new = TRUE)
plot(x, y, xlim = c(0, xmax), ylim = c(0, ymax), type = "l", lwd = 2, col = "black", xlab = "", ylab = "")
par(new = TRUE)
plot(density(dat), main = "Density distribution and data histogramm",
xlim = c(0, xmax), ylim = c(0, ymax), lty = 1, lwd = 3, col = "blue", xlab = "", ylab = "")
legend("topright", inset = .05, c("Model","Empirical" ), col = c("black","blue"),lty = c(1, 1),lwd=c(2, 3),
merge = TRUE, bg = "gray90")
}
#' plot4server_rlevel
#' @description Plots return levels
#' @param dat
#' @param param
#' @param distr.index
#'
#' @return Returns nothing else than a plot, saves nothing
#' @export
#'
#' @examples
plot4server_rlevel <- function(dat, param, distr.index = 1) {
# Common to all distributions
xmin <- min(dat)
xmax <- max(dat)*1.5
y <- seq(xmin, xmax, length = 100)
empq <- sort(dat)
# The x vector is distribution specific
if(distr.index == 1) {
x <- 1 / (1 - pgumbel(y, param[1], param[2]))
# empT <- 1/(1-(seq(1:length(empq))-0.44)/(length(empq))+0.12) # Gringorten, optimized for the gumbel distribution
empT <- 1/(1 - (seq(1:length(empq)) - 0.50) / (length(empq))) # Hazen, a traditional choice
}
if(distr.index == 2) {
x <- 1 / (1 - pgamma(y, param[1], param[2]))
empT <- 1/(1 - (seq(1:length(empq)) - 0.50) / (length(empq))) # Hazen, a traditional choice
}
if(distr.index == 3) {
x <- 1 / (1 - evd::pgev(y, param[1], param[2], param[3])) # initially evd::pgev # also tried nsRFA::F.GEV
# empT <- 1/(1-(seq(1:length(empq))-0.44)/(length(empq))+0.12) # Gringorten, optimized for the gumbel distribution
empT <- 1/(1 - (seq(1:length(empq)) - 0.50) / (length(empq))) # Hazen, a traditional choice
}
if(distr.index == 4) {
x <- 1 / (1 - F.genlogis(y, param[1], param[2], param[3]))
# empT <- 1/(1-(seq(1:length(empq))-0.35)/(length(empq))) # APL
empT <- 1/(1 - (seq(1:length(empq)) - 0.50) / (length(empq))) # Hazen, a traditional choice
}
if(distr.index == 5) {
x <- 1/(1 - nsRFA::F.gamma(y, param[1], param[2], param[3]))
empT <- 1 / (1 - (seq(1:length(empq)) - 0.50) / (length(empq))) # Hazen, a traditional choice
}
# xaxt="n" is to not plot the x axis ticks, as I specify them later
plot(log(log(x)), y, xlim = c(0, log(log(1000))), xaxt = "n", ylim = c(0, xmax),
main = "Return levels", xlab = "Return period (years)", ylab = "Flood discharge (m3/s)",type = "l",lwd = 2)
tix <- c(5, 10, 20, 50, 100, 200, 500)
axis(1, at = log(log(tix)), labels = tix)
# plot empirical dat points
points(log(log(empT)), empq, pch = 16, col = "blue")
grid(nx = 7, ny = 10, lwd = 2) # grid only in y-direction
}
#' plot4server_cdf
#' @description Plot estimated and empirical cumulative distribution function
#' @param dat
#' @param param
#' @param distr
#'
#' @return Returns nothing else than a plot, saves nothing
#' @export
#'
#' @examples
plot4server_cdf <- function(dat, param, distr = 1) {
xmax <- max(dat)*1.2
x <- seq(0, xmax, xmax / 100)
# Distribution specific y vector
if(distr == 1) y <- pgumbel(x, param[1], param[2])
if(distr == 2) y <- pgamma(x, param[1], param[2])
if(distr == 3) y <- evd::pgev(x, param[1], param[2], param[3])
if(distr == 4) y <- F.genlogis(x, param[1], param[2], param[3])
if(distr == 5) y <- F.gamma(x, param[1], param[2], param[3])
plot(ecdf(dat), main = "Cumulative density function", xlim = c(0, xmax), ylim = c(0, 1),
xlab = "", ylab = "", lty = 21, col = "blue")
par(new = TRUE)
plot(x, y, xlim = c(0, xmax), ylim = c(0, 1),
type = "l",lwd = 2, col = "black", xlab = "Flood discharge (m3/s)", ylab = "Cumulative probability")
}
#' plot4server_qq
#' @description QQ plot of empiricial against modelled
#' @param dat
#' @param param
#' @param distr
#'
#' @return Returns nothing else than a plot else than a plot, saves nothing
#' @export
#'
#' @examples
plot4server_qq <- function(dat, param, distr = 1) {
# Compute plotting position
# pvalues <-(seq(1:length(dat))-0.35)/length(dat) # APL
p.values <- (seq(1:length(dat)) - 0.5) / length(dat) # Hazen, a traditional choice
y <- sort(dat)
if(distr == 1) x <- sort(evd::rgumbel(p.values, param[1], param[2]))
if(distr == 2) {
# pvalues <- (seq(1:length(dat))-0.44)/(length(dat)+0.12) # Gringorten, optimized for the gumbel distribution
x <- sort(stats::rgamma(p.values, param[1], param[2]))
}
if(distr == 3) {
# pvalues <- (seq(1:length(dat))-0.44)/(length(dat)+0.12) # Gringorten, optimized for the gumbel distribution
x <- sort(evd::rgev(p.values, param[1], param[2], param[3])) # initially evd::rgev # also tried nsRFA::invF.GEV
}
if(distr == 4) x <- sort(invF.genlogis(p.values, param[1], param[2], param[3])) # PB shouldnt it be rand.genlogis ?
if(distr == 5) x <- sort(rand.gamma(p.values, param[1], param[2], param[3]))
if (length(x) == length(y)) {
plot(x, y, ylab = "Empirical flood dischare (m3/s)", xlab = "Modelled flood dischare (m3/s)",
main = "Quantile-Quantile Plot", pch = 16, col = "blue")
par(new = TRUE)
abline(0, 1, lwd = 2, col = "black")
} else {
plot(1,1)
legend("topright", inset = .05, "Missing or wrong data for the record length", bty = "n", bg = "gray90", cex = 1.2)
}
} |
f006e5bcfc518373643061cebcb58b4479b3231d | 63a8b105407d6d2a25b25df44637a730d63168e9 | /R/posteriors/posterior_predictions.R | 21f6b1003f5d000f275032a3bedeab25daf34433 | [] | no_license | jmhewitt/ctds_dives_jabes | 5857b0e37a01fd80e77b099bf46ef5cf63147b36 | 30de1a1d3b825cf2f07261f40db64b3558053976 | refs/heads/main | 2023-01-03T17:27:16.503272 | 2020-10-30T16:27:07 | 2020-10-30T16:27:07 | 308,685,611 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,931 | r | posterior_predictions.R | posterior_predictions = function(nim_pkg, sample_file, template_bins, inds,
tag, sattag_timestep) {
if(file.exists('nim_pkg_0.5.rds')) {
nim_pkg = readRDS('nim_pkg_0.5.rds')
}
# tags with validation dives
# tag_ids = unique(nim_pkg$consts$dive_relations_validation[, 'tag'])
# extract number of train/validation dives per tag
train_dives_per_tag = table(nim_pkg$consts$dive_relations[, 'tag'])
test_dives_per_tag = table(nim_pkg$consts$dive_relations_validation[, 'tag'])
# load posterior samples
load(sample_file)
# sample dives from posterior predictive distribution
lapply(inds, function(ind) {
#
# simulate dive
#
# extract parameters
beta = samples[ind, c('pi[1]', 'pi[3]')]
lambda = samples[ind, paste('lambda[', tag, ', ', 1:3, ']', sep = '')]
# sample a traing dive from which to use stage transition times
dive_id = sample(
x = which(nim_pkg$consts$dive_relations[, 'tag'] == tag),
size = 1
)
# extract stage transition times
stage_times = exp(
samples[ind, paste('log_xi[', dive_id, ', ', 1:2, ']', sep ='')]
)
# sample dive
d = dsdive.fwdsample.dive(depth.bins = template_bins, beta = beta,
lambda = lambda, t0 = 0, steps.max = 1e3,
T1 = stage_times[1], T2 = stage_times[2])
# find dive start/end endpoints associated with dive
endpoint_ids = nim_pkg$consts$dive_relations[
dive_id, c('T0_endpoint', 'T3_endpoint')
]
# extract nominal (i.e., "best guess") times for dive start/end
obs_endpoints = apply(
nim_pkg$consts$endpoint_priors[endpoint_ids, ], 1, mean
)
# extract sampled times for dive start/end
est_endpoints = samples[
ind, paste('endpoints[', endpoint_ids, ']', sep = '')
]
# compute offsets
offsets = obs_endpoints - est_endpoints
names(offsets) = c('dive_start', 'dive_end')
#
# observe dive
#
# exact duration of dive
duration = d$times[length(d$times)] - d$times[1]
# build sequence of observation times
t.obs = seq(from = max(-offsets['dive_start'], 0),
to = duration - offsets['dive_end'],
by = sattag_timestep)
# observe dive
obs = dsdive.observe(depths = d$depths, times = d$times,
stages = d$stages, t.obs = t.obs)
# relabel observation times
obs$times = seq(from = 0, by = sattag_timestep,
length.out = length(t.obs))
# compute observed stage durations
stages.dur = diff(c(0, obs$times[c(FALSE, diff(obs$stages)==1)],
obs$times[length(obs$times)]))
# package results
list(
dive = d,
dive.obs = obs,
stages.dur = stages.dur,
offsets = offsets
)
})
} |
196b822ee376ba021dbb3e69c144d784c2d6a201 | f1183482e47167a9020046c061a53b88179193ec | /scripts/outcomes/do_medication_stats.R | 8c5d2a8134fbafa0b7bc778b106388bf6ef7b299 | [
"MIT"
] | permissive | morrislab/plos-medicine-joint-patterns | a7a4ff4ce5f16d673fe2af48429ebe43b5132458 | cfdc6dd4854ec33e7e2efbf36d648b65d278df33 | refs/heads/master | 2020-04-17T09:33:09.077084 | 2019-01-18T19:33:32 | 2019-01-18T19:33:32 | 166,462,950 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,441 | r | do_medication_stats.R | # Conducts statistics on medication statuses.
library(argparse)
library(data.table)
library(gtools)
library(plyr)
library(doMC)
rm(list = ls())
# Get arguments.
parser <- ArgumentParser()
parser$add_argument('--input', required = TRUE)
parser$add_argument('--output', required = TRUE)
parser$add_argument('--iterations', type = 'integer', default = 2000)
parser$add_argument('--seed', type = 'integer', default = 73528552)
parser$add_argument('--threads', type = 'integer', default = 1, help = 'the number of threads to use')
args <- parser$parse_args()
# Load the data.
message('Loading medications')
dt.medications <- fread(args$input)
# Remove NSAIDs.
message('Removing NSAIDs')
dt.medications <- dt.medications[medication != 'nsaid']
# Conduct statistics.
message('Conducting statistics')
dt.global.dists <- dt.medications[grepl('^classification', cls_type), .(count = sum(count)), by = .(visit_id, medication, status)]
dt.global.dists.casted <- dcast(dt.global.dists, visit_id + medication ~ status, fill = 0)
setkey(dt.global.dists.casted, visit_id, medication)
registerDoMC(args$threads)
set.seed(args$seed)
dt.results <- rbindlist(llply(dt.medications[, unique(cls_type)], function (cls.type) {
dt.subset <- dt.medications[cls_type == cls.type]
dt.subset.casted <- dcast(dt.subset, visit_id + cls + medication ~ status, value.var = 'count', fill = 0)
do.stats <- function (dt.slice, visit_id, cls, medication) {
message('Classification type ', cls.type, ', Visit ', visit_id, ', Classification ', cls, ', Medication ', medication)
observed <- unlist(dt.slice)
query.key <- list(visit_id, medication)
reference <- unlist(dt.global.dists.casted[query.key][, .(`FALSE`, `TRUE`)])
chisq.res <- chisq.test(observed, p = reference, rescale.p = TRUE, simulate.p.value = TRUE, B = args$iterations)
data.table(x2 = chisq.res$statistic, p = chisq.res$p.value, pos_stdres = chisq.res$stdres['TRUE'])
}
dt.subset.casted[, do.stats(.SD, visit_id, cls, medication), by = .(visit_id, cls, medication)]
}, .parallel = TRUE))
dt.results <- dt.results[!is.na(p)]
dt.results[, p_adjusted := p.adjust(p), by = .(visit_id, medication)]
dt.results[, p_residual := pnorm(-abs(pos_stdres)) * 2]
dt.results[, p_residual_adjusted := p.adjust(p_residual), by = .(visit_id, medication)]
# Write the output.
message('Writing output')
write.csv(dt.results, args$output, row.names = FALSE)
|
c4f482ca1e6317b232de8d5e8b285086db92d276 | 101401023b390e7a5a63254ad57f15b7e5b7c952 | /plot2.R | f0f7fb5b15daf071a77c203bf2d8f8c5b3642ff3 | [] | no_license | kaiyang1995/ExData_Plotting1 | 8c590f75273b26fe895d4c44a5ef7c02b329e183 | 0b1326aa170eb75535a3d20766c5f32ff82edbd7 | refs/heads/master | 2022-10-15T09:53:00.199837 | 2020-06-13T11:32:06 | 2020-06-13T11:32:06 | 271,981,374 | 0 | 0 | null | 2020-06-13T09:48:23 | 2020-06-13T09:48:22 | null | UTF-8 | R | false | false | 462 | r | plot2.R | data_full <- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?")
data1 <- subset(data_full, Date %in% c("1/2/2007","2/2/2007"))
data1$Date <- as.Date(data1$Date, format="%d/%m/%Y")
datetime<-paste(data1$Date,data1$Time)
data1$datetime<-as.POSIXct(datetime)
with(data1,plot(data1$datetime,data1$Global_active_power,type="l",xlab="",ylab="Global Active Power (kilowatts)"))
dev.copy(png,file="plot2.png",height=480,width=480)
dev.off()
|
d465e05b8f49a3567845e2912a0bf3aa527ec6cf | c49ba7586c128e3caabe6f0d8da8226540b6f3a6 | /man/download_place.Rd | 5a158be2af3c4b232f52075a577c1e0f5f32781d | [
"MIT"
] | permissive | JiriStipl/strviewr | bbeb7a8859afe505dcd1569eddb98cf573c76825 | 35ba9956cb4f5014c70e9270d50881a86a6633c4 | refs/heads/master | 2020-03-28T13:25:45.677479 | 2019-01-18T01:12:44 | 2019-01-18T01:12:44 | 147,978,933 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 634 | rd | download_place.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/place.R
\name{download_place}
\alias{download_place}
\title{download_place}
\usage{
download_place(place_code, loc, folder, step = 30)
}
\arguments{
\item{place_code}{A number identifying the place to be downloaded, its passed to filenames of images}
\item{loc}{vector c(lat,lng)}
\item{folder}{Defaultly it is current working directory}
\item{step}{Change of angle between two images in degrees}
}
\value{
Returnes nothing.
}
\description{
Downloads 360 degrees panorama sequence of images with defined change in angle of view
}
|
5f4cbb2e1cee5e5063de8fc668d72fee63664fcc | 29585dff702209dd446c0ab52ceea046c58e384e | /noncompliance/R/RcppExports.R | 4ee0a9ce2604d6af7c92b1fd630e464a8cd5bdf9 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,548 | r | RcppExports.R | # This file was generated by Rcpp::compileAttributes
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
.FindMLE_CONT_H0_hypergeoC <- function(n_y0x0z0, n_y1x0z0, n_y0x0z1, n_y1x0z1, n_y0x1z1, n_y1x1z1) {
.Call('noncompliance_FindMLE_CONT_H0_hypergeoC', PACKAGE = 'noncompliance', n_y0x0z0, n_y1x0z0, n_y0x0z1, n_y1x0z1, n_y0x1z1, n_y1x1z1)
}
.FindMLE_CONT_H1_hypergeoC <- function(n_y0x0z0, n_y1x0z0, n_y0x0z1, n_y1x0z1, n_y0x1z1, n_y1x1z1) {
.Call('noncompliance_FindMLE_CONT_H1_hypergeoC', PACKAGE = 'noncompliance', n_y0x0z0, n_y1x0z0, n_y0x0z1, n_y1x0z1, n_y0x1z1, n_y1x1z1)
}
.AllPossiblyObsH0_CONT_C <- function(obs_y0x0z0, obs_y1x0z0, obs_y0x0z1, obs_y1x0z1, obs_y0x1z1, obs_y1x1z1) {
.Call('noncompliance_AllPossiblyObsH0_CONT_C', PACKAGE = 'noncompliance', obs_y0x0z0, obs_y1x0z0, obs_y0x0z1, obs_y1x0z1, obs_y0x1z1, obs_y1x1z1)
}
.AllPossiblyObsH0qH1_CONT_C <- function(obs_y0x0z0, obs_y1x0z0, obs_y0x0z1, obs_y1x0z1, obs_y0x1z1, obs_y1x1z1) {
.Call('noncompliance_AllPossiblyObsH0qH1_CONT_C', PACKAGE = 'noncompliance', obs_y0x0z0, obs_y1x0z0, obs_y0x0z1, obs_y1x0z1, obs_y0x1z1, obs_y1x1z1)
}
.GetPvalueshypergeoC_allpsi_CONT <- function(n_y0x0z0_H0, n_y1x0z0_H0, n_y0x0z1_H0, n_y1x0z1_H0, n_y0x1z1_H0, n_y1x1z1_H0, n_NTy0_H0, n_CONR_H0, n_COAR_H0, n_NTy1_H0, critical_regions) {
.Call('noncompliance_GetPvalueshypergeoC_allpsi_CONT', PACKAGE = 'noncompliance', n_y0x0z0_H0, n_y1x0z0_H0, n_y0x0z1_H0, n_y1x0z1_H0, n_y0x1z1_H0, n_y1x1z1_H0, n_NTy0_H0, n_CONR_H0, n_COAR_H0, n_NTy1_H0, critical_regions)
}
|
ab34564ab1bd532e16a2102138e573d9aec15ff0 | 1e81deb64a22c92d6cd53842ae7d8be0c3e9d49b | /2020-10-13_datasaurus-dozen/test-plots.R | 796fe86efa60d62f6f874a0359f0fb6fb31a0ca6 | [
"BSD-2-Clause"
] | permissive | jmcastagnetto/tidytuesday-kludges | 5bfea5ccd4640df4e9e5367794dbb09594ac32a3 | 13dcb24694acff3839a7e1322d725e80bb146ae0 | refs/heads/main | 2023-04-07T04:33:13.715619 | 2023-03-29T03:46:12 | 2023-03-29T03:46:12 | 193,815,379 | 9 | 3 | null | null | null | null | UTF-8 | R | false | false | 1,912 | r | test-plots.R | library(tidyverse)
df <- readRDS("2020-10-13_datasaurus-dozen/datasaurus-dozen.rds")
df2 <- df %>%
pivot_longer(
c(x, y),
names_to = "var",
values_to = "value"
)
ggplot(df2, aes(x = value, y = dataset, fill = dataset)) +
geom_density_ridges(show.legend = FALSE) +
facet_wrap(~var)
p0 <- ggplot(df, aes(x = x, y = y, color = dataset)) +
#geom_point(show.legend = FALSE, color = "white") +
geom_density_2d_filled(show.legend = FALSE) +
xlim(-10, 110) +
ylim(-10, 110) +
coord_equal() +
theme_classic(18)
library(gganimate)
p0 <- ggplot(df2, aes(x = value, color = dataset)) +
geom_density(show.legend = FALSE) +
theme_classic(18) +
facet_wrap(~var)
p0 +
labs(
title = "The Datasaurus Dozen: {closest_state}",
subtitle = "#TidyTuesday 2020-10-13",
x = "",
y = ""
) +
transition_states(
dataset,
transition_length = 2,
state_length = 2
)
# cool!
p0 +
geom_smooth(method = "lm") +
facet_wrap(~dataset)
models <- df %>%
group_by(dataset) %>%
group_modify(
~ broom::augment(lm(y ~ x, data = .))
)
p0 <- ggplot(models, aes(x = y, color = dataset)) +
geom_density()
p0 +
facet_wrap(~dataset)
ggplot(models, aes(x = .resid, color = dataset)) +
geom_density() +
facet_wrap(~dataset)
library(ggridges)
ggplot(models, aes(x = .resid, y = dataset)) +
geom_density_ridges()
ggplot(models, aes(x = x, y = dataset, fill = dataset)) +
geom_density_ridges(show.legend = FALSE)
library(ggExtra)
p1 <- ggplot(df, aes(x = x, y = y, group = dataset)) +
geom_point(aes(color = dataset), show.legend = FALSE)
p1
p2 <- ggMarginal(p1, type = "", groupColour = TRUE, groupFill = TRUE)
p2
library(ggpubr)
p3 <- ggscatterhist(
df,
x = "x",
y = "y",
color = "dataset",
group = "dataset",
margin.params = list(fill = "dataset", color = "black", size = 0.2)
)
p3$sp <- p3$sp +
facet_wrap(~dataset)
p3
|
3189896e8f3d818c46d1e25883271b3ced76ba08 | d6669bfbfec0635c5b7f3c86f87f4d24199e9ee8 | /1_lab2_quiz_practice_script.R | e1c72644d306c5bb8f03f18afde04e1a14734343 | [] | no_license | BrookeCharbonneau/lab2_quiz_practice | c5ea532a0a9e82c1a9962ad5f30d5793062b3bad | 535133c677a0cfc746c4d230120800762722759b | refs/heads/master | 2021-01-11T03:24:55.594312 | 2016-10-16T02:17:32 | 2016-10-16T02:17:32 | 71,010,173 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,441 | r | 1_lab2_quiz_practice_script.R | ## Creating analytic data file
library(tidyverse)
library(apaTables)
raw_data <- read_csv(file="raw_data.csv")
str(raw_data)
# View(raw_data)
raw_data$sex <- as.factor(raw_data$sex)
levels(raw_data$sex) <- list("Male"=1, "Female"=2)
sex <- select(raw_data, sex)
neg_affect_items <- select(raw_data, afraid, angry, anxious, ashamed)
pos_affect_items <- select(raw_data, delighted, elated, enthusiastic, excited)
Neuroticism <- select(raw_data, Neuroticism)
Extraversion <- select(raw_data, Extraversion)
psych::describe(neg_affect_items)
is_bad_value <- neg_affect_items<0 | neg_affect_items>3
neg_affect_items[is_bad_value] <- NA
# View(neg_affect_items)
psych::describe(pos_affect_items)
is_bad_value <- pos_affect_items<0 | pos_affect_items>3
pos_affect_items[is_bad_value] <- NA
# View(pos_affect_items)
is_bad_value <- Neuroticism<0 | Neuroticism>24
Neuroticism[is_bad_value] <- NA
is_bad_value <- Extraversion<0 | Extraversion>24
Extraversion[is_bad_value] <- NA
psych::describe(Neuroticism)
psych::describe(Extraversion)
## To obtain scale scores:
pos_affect <- psych::alpha(as.data.frame(pos_affect_items),check.keys=FALSE)$scores
neg_affect <- psych::alpha(as.data.frame(neg_affect_items),check.keys=FALSE)$scores
analytic_data <- cbind(sex,pos_affect,neg_affect,Neuroticism, Extraversion)
# View(analytic_data)
write_csv(analytic_data,path="analytic_data.csv")
str(analytic_data)
analytic_data
# View(analytic_data) |
ac82c54bf645cb51c1b35c25c35e713c7506d69d | 058f099a93ae6e1bc81801651e598ff6483129d4 | /man/print.summary_greek.Rd | f6ebe31ba4c6358a5f45ccef5c310e6fb226eb93 | [] | no_license | cran/greekLetters | a8d9be90c44cb020cb984f30c629dca1ab0c3ab4 | 681667219ab632bbfd5117755df907deb25144b9 | refs/heads/master | 2022-11-13T23:55:27.615432 | 2020-07-06T14:00:02 | 2020-07-06T14:00:02 | 278,228,966 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,349 | rd | print.summary_greek.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/print_greek.R
\name{print.summary_greek}
\alias{print.summary_greek}
\title{Print Summary for Linear Model Fits With Greek Letters}
\usage{
\method{print}{summary_greek}(
x,
digits = max(3L, getOption("digits") - 3L),
symbolic.cor = x$symbolic.cor,
signif.stars = getOption("show.signif.stars"),
concise = FALSE,
...
)
}
\arguments{
\item{x}{an object used to select a method.}
\item{digits}{minimal number of \emph{significant} digits, see
\code{\link[base]{print.default}}.}
\item{symbolic.cor}{logical. If \code{TRUE}, print the correlations in
a symbolic form (see \code{\link[stats]{symnum}}) rather than as numbers.}
\item{signif.stars}{logical. If \code{TRUE}, \sQuote{significance stars}
are printed for each coefficient.}
\item{concise}{logical.}
\item{...}{
Arguments passed on to \code{\link[base:print]{base::print}}, \code{\link[stats:summary.lm]{stats::summary.lm}}
\describe{
\item{\code{object}}{an object of class \code{"lm"}, usually, a result of a
call to \code{\link[stats]{lm}}.}
\item{\code{correlation}}{logical; if \code{TRUE}, the correlation matrix of
the estimated parameters is returned and printed.}
}}
}
\value{
The function is like print.summary.lm but with Greek
letters in output.
}
\description{
print summary method with Greek letters for class "lm".
}
\details{
It is recommended that the font size of the R console be increased for
better visualization of the symbols,
as some of the symbols are quite small.
}
\examples{
\dontrun{
#Same example as summary.lm and print.summary.lm from stat packages but with Greek letters.
## Annette Dobson (1990) "An Introduction to Generalized Linear Models".
## Page 9: Plant Weight Data.
ctl <- c(4.17,5.58,5.18,6.11,4.50,4.61,5.17,4.53,5.33,5.14)
trt <- c(4.81,4.17,4.41,3.59,5.87,3.83,6.03,4.89,4.32,4.69)
group <- gl(2, 10, 20, labels = c("Ctl","Trt"))
weight <- c(ctl, trt)
lm.D9 <- lm(weight ~ group)
lm.D90 <- lm(weight ~ group - 1) # omitting intercept
coef(lm.D90) # the bare coefficients
sld90 <- greekLetters::summary_greek(lm.D90 <- lm(weight ~ group -1)) # omitting intercept
greekLetters::print.summary_greek(sld90)
}
}
\seealso{
See \code{\link[stats]{summary.lm}} for more details.
}
\author{
Kévin Allan Sales Rodrigues.
}
|
f05f2554f4d73ec4d66e7bd3f0c23a6f5268e0a5 | caeb8764dabd4d0ed17d37e7486ad7e3d714b04e | /inst/doc/a01-introduction.R | dc1a2e0e3b14f5234d77395a9f009ecc445233f7 | [] | no_license | cran/caracas | 579b5cefd2b7a2db85e691d140f2215f4275c3bc | 42b3bf8eb37ddf5b6adde28b33fdb3359ecc4f11 | refs/heads/master | 2023-08-16T18:15:53.677578 | 2023-08-11T13:13:47 | 2023-08-11T15:30:42 | 236,567,703 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,127 | r | a01-introduction.R | ## ---- include = FALSE---------------------------------------------------------
knitr::opts_chunk$set(
collapse = TRUE,
comment = "#>"
)
## ---- message=FALSE-----------------------------------------------------------
library(caracas)
## ---- include = FALSE---------------------------------------------------------
inline_code <- function(x) {
x
}
if (!has_sympy()) {
# SymPy not available, so the chunks shall not be evaluated
knitr::opts_chunk$set(eval = FALSE)
inline_code <- function(x) {
deparse(substitute(x))
}
}
## -----------------------------------------------------------------------------
x <- symbol('x')
eq <- 2*x^2 - x
eq
as.character(eq)
as_expr(eq)
tex(eq)
## -----------------------------------------------------------------------------
solve_sys(eq, x)
der(eq, x)
subs(eq, x, "y")
## -----------------------------------------------------------------------------
A <- matrix(c("x", 2, 0, "2*x"), 2, 2)
B <- as_sym(A)
B
Binv <- inv(B) # or solve_lin(B)
Binv
tex(Binv)
## -----------------------------------------------------------------------------
eigenval(Binv)
eigenvec(Binv)
|
5935b77700590d20c95a2a39d4e8cfb30bd2ece2 | 619c0ba0282a4c2cb9a1b20a14536ef82dc46e8f | /man/read.nea.Rd | 575ddc6a3fb7b72805343102682bcb14d171ee7f | [] | no_license | SEELab/enaR | 796b51159ca43d2338ef441022e2077db516bc7f | 281a0c71f83fb4659c9300801e41d09729dbd261 | refs/heads/develop | 2023-04-26T01:58:20.788858 | 2023-04-22T20:24:54 | 2023-04-22T20:24:54 | 12,623,293 | 14 | 8 | null | 2018-05-17T22:34:51 | 2013-09-05T16:52:53 | R | UTF-8 | R | false | true | 792 | rd | read.nea.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/read.nea.R
\name{read.nea}
\alias{read.nea}
\title{Read an NEA formatted model into a network object}
\usage{
read.nea(file = "file name", sep = ",", warn = TRUE)
}
\arguments{
\item{file}{The name and path for the data file.}
\item{sep}{The separation character used to delimit data values.}
\item{warn}{LOGICAL: should pack warnings be reported?}
}
\value{
Returns the network object.
}
\description{
This function reads in and creates a network object from a NEA formatted
data file (Fath and Borrett 2006).
}
\references{
Fath, B. D., Borrett, S. R. 2006. A Matlab function for
Network Environ Analysis. Environ. Model. Softw. 21, 375-405.
}
\seealso{
\code{\link{write.nea}}
}
\author{
Stuart R. Borrett
}
|
a1fd381505105d3895ea178c9b8f50885be2100d | aebca85114388224fc24481fdfce04be048110db | /R/doClusterTableFCH_fromMatrix.R | 88436d5684bb662d29b889ccde34b77cd191dedf | [] | no_license | mssm-msf-2019/BiostatsALL | 4f79f2fbb823db8a0cbe60172b3dcd54eac58539 | 0623dd13db576b2501783b31d08ae43340f2080b | refs/heads/master | 2020-05-25T15:16:01.949307 | 2019-05-21T18:11:12 | 2019-05-21T18:11:12 | 187,864,190 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,550 | r | doClusterTableFCH_fromMatrix.R | #' A function that creates a heatmap with column indicators on top of each column.
#' @description This function creates a nice heatmap for publication. It puts on top of each column a color indicator of what sample type it is, does clustering, and adds a table in the right margin of the figure with logFoldChanges and respective significance indicators (stars).
#' @param mat numeric matrix to cluster. Usually th raw expression data.
#' @param coefs estimates to be drawn tin the tabl (eg lgFCHs)
#' @param fdrs p values or fdr indicating significance of the estimates; should be the same dim as coefs
#' @param colfc factor to add colored legend to the heatmap
#' @param facsepr factor to separate columns in the heatmaps
#' @param hmeth aglomeration strategy
#' @param dmeth aglomeration distance to cluster
#' @param ColD logical TRUE if cluster by rows and columns
#' @param sn edited names for the rows (if different than the rownames) eg symbols for probesets
#' @param gn gene name
#' @param scl how to scale the matrix (row, column, none, both)
#' @keywords heatmap , expression vizualisation
#' @examples
#' doClusterTableFCH_fromMatrix(mat,ps=rownames(mat),colfac,facsepr=NULL,hmeth='average',dmeth=cor.dist,coefs,fdrs,main="", ColD=FALSE, ss=ps, gn=NULL, breaks=NULL,cexg=1,margins=c(5,20) )
doClusterTableFCH_fromMatrix<-function (mat, ps = rownames(mat), colfac, facsepr = NULL, hmeth = "average",
dmeth = cor.dist, coefs, fdrs, main = "", ColD = FALSE, ss = ps,
gn = NULL, breaks = NULL, cexg = 1, margins = c(5, 20), scl = "row",
p.cuts.stars=c(0.01, 0.05, 0.1), p.symbols.stars= c("**", "*", "+")) {
#mat: numeric matrix to cluster
#coefs: estimates to be drawn tin the table (eg lgFCHs)
#fdrs: p values or fdr indicating significance of the estimates; should be the same dim as coefs
#colfc: factor to add colored legend to the heatmap
#facsepr: factor to separate columns in the heatmaps
#hmeth, dmeth: aglomeration strategy and distance to cluster
#ColD; logical TRUE if cluster by rows and columns
#sn edited names for the rows (if different than the rownames) eg symbols for probesets amed vector with probesets as names
# gn: gene name, named vector with probesets as names
require(stringr)
require(weights)
mat <- mat[ps, ]
ss <- ss[ps]
fdrs <- fdrs[ps, ]
coefs <- coefs[ps, ]
if (!is.null(gn))
gn <- gn[ps]
maxss <- max(sapply(ss, nchar))
adspace <- function(x, n) {
paste(x, substr(" ---------------------", 1, n - nchar(x)),
sep = "")
}
ss <- paste(sapply(ss, adspace, maxss + 3), ": ", sep = "")
ADDzero <- function(x) {
if (grepl(pattern = "\\.", x = x) == TRUE) {
a1 <- unlist(strsplit(x, "\\."))[1]
a2 <- unlist(strsplit(x, "\\."))[2]
if (is.na(a2) == T) {
a2 <- "0"
}
b1 <- paste(str_dup(" ", 4 - nchar(a1)), a1, sep = "")
b1 <- a1
b2 <- paste(a2, str_dup("0", 2 - nchar(a2)), sep = "")
out <- paste(b1, b2, sep = ".")
}
else {
out <- paste(paste(x, ".00", sep = ""), sep = "")
}
return(out)
}
adzero <- function(xv) {
out <- sapply(xv, ADDzero)
nmax <- max(sapply(out, nchar))
sapply(out, function(x, nmx) {
paste(str_dup(" ", nmx - nchar(x)), x, sep = "")
}, nmax)
}
reformatps <- function(p) {
as.numeric(format(p, digit = 3, drop0trailing = TRUE))
}
transformfch <- function(lgfch) {
fch <- sign(lgfch) * (2^abs(lgfch))
return(fch)
}
coef1 <- apply(coefs[ps, ], 2, function(x) {
adzero(as.character(signif(transformfch(x), 3)))
})
rownames(coef1) <- ps
fdrs1 <- apply(fdrs[ps, ], 2, function(x) { ifelse(x < 1e-04, signif(x, 1), round(x, 4)) }) ## MSf deleted this line because uits not needed
#fdrs2 <- apply(fdrs1, 2, starmaker, p.levels = c(0.01, 0.05, 0.1), symbols = c("**", "*", "+"))
fdrs2 <- apply(fdrs, 2, starmaker, p.levels =p.cuts.stars, symbols = p.symbols.stars)
rownames(fdrs2) <- ps
adSpace <- function(x) {
out <- paste(x, str_dup(" ", 4 - nchar(x)), sep = "")
}
fdrs2 <- apply(fdrs2, 2, adSpace)
coef2 <- coef1
print(tail(coef2))
print(tail(fdrs2))
a <- DoHeatmap(mat, colfac = colfac, symb = ss, dmeth = dmeth,
hmeth = hmeth, cex.genes = cexg, ColD = ColD, main = main,
margins = c(5, 10), breaks = breaks, scl = scl)
Tab <- data.frame(Symbol = ss[a$rowInd])
if (!is.null(gn))
Tab <- cbind(Tab, Desc = substr(gn[a$rowInd], 1, 40))
for (i in c(1:ncol(coef2))) {
Tab <- cbind(Tab, paste(coef2[a$rowInd, i], fdrs2[a$rowInd,
i], sep = ""))
}
Tab <- print.table(Tab)
ssTab <- apply(Tab[, ], 1, function(x) {
stackchar(x, sep = "")
})
mat2 <- mat[a$rowInd, ]
rownames(mat2) <- ssTab
ssTab_bold <- do.call(expression, sapply(as.character(ssTab),
function(.x) {
substitute(bold(.x), list(.x = .x))
}))
par(family = "mono")
a <- DoHeatmap(mat2, colfac = colfac, facsepr = facsepr,
symb = ssTab_bold, dmeth = dmeth, hmeth = hmeth, cex.genes = cexg,
ColD = ColD, main = main, margins = margins, breaks = breaks,
scl = scl)
}
|
e36b6dda86e4227e56102d4bbd6aa07542ba2143 | c547b016fb11aaac568a38200f9ea49f504098b9 | /code/Machiene leanring models/ordinal logistic regression.R | 618a92336669ac7e6b470ff07f877d6c967286ed | [] | no_license | john-m-burleson/NHANES-R-Programming | 5c1e656d8c925bb183927f98e85e8946d57a89e2 | 7729b654f7af091b47546bfe44b9614ee262f2fa | refs/heads/master | 2022-11-04T22:06:06.990900 | 2022-10-20T02:20:10 | 2022-10-20T02:20:10 | 121,905,018 | 19 | 3 | null | null | null | null | UTF-8 | R | false | false | 4,849 | r | ordinal logistic regression.R | #lets make sure there are no strangling datasets in our global enviroment
rm(list=ls(all=TRUE))
#lets read into R our dataset
final_data<-read.table(file.choose(),header = TRUE)
#lets do some final data manipulation on BMXBMI and RIDAgeyearn
#lets first start with our age variable lets start our by making our values ranging from 18-84
final_data$RIDAGEYR[which(final_data$RIDAGEYR<= 20)] = 20
final_data$RIDAGEYR[which(final_data$RIDAGEYR>= 84)] = 84
summary(final_data$RIDAGEYR);hist(final_data$RIDAGEYR)
#now lets bin our BMXBMI variable so that we are looking at something appropriate
final_data$BMXBMI[which(final_data$BMXBMI<= 13)] = 13
#looks all good lets proceed to modeling our data
summary(final_data)
#lets make our variables correct classes
final_data$PFQ061B<-as.ordered(final_data$PFQ061B);class(final_data$PFQ061B)
final_data$CDQ010<-as.factor(final_data$CDQ010);class(final_data$CDQ010)
final_data$BPQ020<-as.factor(final_data$BPQ020);class(final_data$BPQ020)
final_data$MCQ220<-as.factor(final_data$MCQ220);class(final_data$MCQ220)
final_data$MCQ160A<-as.factor(final_data$MCQ160A);class(final_data$MCQ160A)
final_data$MCQ010<-as.factor(final_data$MCQ010);class(final_data$MCQ010)
final_data$DMDEDUC2<-as.factor(final_data$DMDEDUC2);class(final_data$DMDEDUC2)
final_data$RIAGENDR<-as.factor(final_data$RIAGENDR);class(final_data$RIAGENDR)
final_data$DIQ010<-as.factor(final_data$DIQ010);class(final_data$DIQ010)
str(final_data)
#lets start out first by making a test/train dataset
#how we are going to set up the test/ train datasets simply in a couple of lines.
#we want to set a seed so that our results are reproducable.
set.seed(6475)
Partion_function<-sample(2,nrow(final_data),replace= TRUE,prob=c(.8,.2))
#setting a 80% weight of the observations on the training dataset
train<-final_data[Partion_function==1,]
#setting a 20% weight of our observations on our testing traninning dataset
test<-final_data[Partion_function==2, ]
#fitting regression model
#variables that we droppped from analysis: MCQ220
#as far as our link function goes since the distrubtion of our walking imparment variable is right skewed
# we are going to use a negative log log link function to meet our assumptions as well as better
#match our data
library(MASS)
ordinal<-polr(train$PFQ061B~train$CDQ010+train$BPQ020+train$MCQ160A+train$MCQ010+train$RIDAGEYR+train$DMDEDUC2+train$RIAGENDR+train$BMXBMI+train$DIQ010,train,Hess = TRUE,method =c("loglog"))
#calculting p-vlaues
(ctable<-coef(summary(ordinal)))
p<-pnorm(abs(ctable[,"t value"]),lower.tail = FALSE)*2
(ctable<-cbind(ctable,"p value"=p))
#lets create some 95% confidence intervals for our variables in our model and lets try to visualize our confidence intervals
wald_ci<-confint(ordinal)
#lets round to 3 decimal places
round(wald_ci,digits = 3)
#####################################################################
# Confusion matrrix #
#####################################################################
#predictions accroding to our ordinal logistic regression model
pred<-predict(ordinal,train)
print(pred,digits=3)
#calcaulating confusion matrix for our training dataset
(tab<-table(pred,train$PFQ061B))
#calculating classification error
1-sum(diag(tab))/sum(tab)
#calculating the confusion matrix for our traning dataset
#make sure to rerun the model on the test dataset
pred1<-predict(ordinal,test)
(tab1<-table(pred1,test$PFQ061B))
#calculating classification error
1-sum(diag(tab1))/sum(tab1)
#####################################################################
# assumption check to make sure our algorithim is ok #
#####################################################################
#lets check the assumption of our model
#checking multicolinearity by seeing if our categoirgal variables are related to one another
library(corrplot)
#lets make all of our variables numeric so that we can check corrlation
final_data$PFQ061B<-as.numeric(final_data$PFQ061B);class(final_data$PFQ061B)
final_data$CDQ010<-as.numeric(final_data$CDQ010);class(final_data$CDQ010)
final_data$BPQ020<-as.numeric(final_data$BPQ020);class(final_data$BPQ020)
final_data$MCQ220<-as.numeric(final_data$MCQ220);class(final_data$MCQ220)
final_data$MCQ160A<-as.numeric(final_data$MCQ160A);class(final_data$MCQ160A)
final_data$MCQ010<-as.numeric(final_data$MCQ010);class(final_data$MCQ010)
final_data$DMDEDUC2<-as.numeric(final_data$DMDEDUC2);class(final_data$DMDEDUC2)
final_data$RIAGENDR<-as.numeric(final_data$RIAGENDR);class(final_data$RIAGENDR)
final_data$DIQ010<-as.numeric(final_data$DIQ010);class(final_data$DIQ010)
str(final_data)
#plotting our correlation plot using the corplot
M <- cor(final_data)
corrplot(M, method= "shade")
|
f3d048b35980fedcafa46b1aba8770fbe3c273a2 | 68b0ae6f2f5576518f0285a4eea6a8deea4a5020 | /ui.R | ff84a539b3a07e05c6ffec988afaf939736f12fa | [] | no_license | yansonz/CBR_travelcase | d7c59de71860fe7519a08c0c08a28988e1497293 | caae0b94f874f6ecf12f1815625d8ae063c8d41b | refs/heads/master | 2020-04-17T22:20:17.501642 | 2016-09-11T23:25:33 | 2016-09-11T23:25:33 | 67,955,599 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,866 | r | ui.R | #
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
#system.time(source(paste(filePath, "main.R", sep = "")))
source("main.R")
numOfcases = nrow(travelCases)
numOfclusters = round(sqrt(numOfcases))
cl_min = round(numOfclusters * .8)
cl_max = round(numOfclusters * 1.2)
# palette(c("#E41A1C", "#377EB8", "#4DAF4A", "#984EA3",
# "#FF7F00", "#FFFF33", "#A65628", "#F781BF", "#999999"))
# Define UI for application that draws a histogram
shinyUI(navbarPage("Your Travel Plan!",
tabPanel("Recommendation",
sidebarLayout(
sidebarPanel(
selectInput("input_season", "Season", seasons),
selectInput("input_region", "Region", region),
#selectizeInput('input_region', 'Region', choices = region),
selectInput("input_holidayType", "Holiday Type", holidayType),
#numericInput("input_numOfPerson", "Number Of Persons", value = 1, min = 1, max = 12),
sliderInput("input_numOfPerson", "Number Of Persons", min=1, max=12, value=4),
#numericInput("input_duration", "Duration", value = 1, min = 1, max = 25),
sliderInput("input_duration", "Duration", min=1, max=25, value=5),
selectInput("input_transportation", "Transportation", transportation),
selectInput("input_price", "Price", price),
selectInput("input_accommodation", "Accommodation", accommodation),
h3("Submit"),
submitButton("Update Result")
),
mainPanel(
h4("TOP 5 Best Travel Cases for You"),
DT::dataTableOutput("result")
#verbatimTextOutput("result")
)
)
),
# #
# tabPanel("Similarity Table",
# sidebarLayout(
# sidebarPanel(
# selectInput("similarity", "Choose a similarity table:",
# choices = c("Total Similarity", names(travelCases)[3:10])),
# #choices = "All"),
# helpText("Note: while the data view will show only the specified",
# "number of observations, the summary will still be based",
# "on the full dataset."),
# submitButton("Update View")
# ),
#
# mainPanel(
# h4("Observations"),
# #tableOutput("similarityView")
# DT::dataTableOutput("similarityView")
# )
# )
# ),
#
tabPanel("Similarity Table",
h4("Observations"),
DT::dataTableOutput("similarityView")
),
tabPanel("K-means Clustering",
sidebarLayout(
sidebarPanel(
h3("Quick Search using Clustering Algorithm"),
helpText("I DON'T KNOW WHAT I WANT!"),
helpText("Simiply input the information when you want to leave,",
"and how much your budget is."),
selectInput("cluster_season", "Season", seasons, selected = "June"),
selectInput("cluster_price", "Price", price, selected = "> 1500 and <= 2000"),
sliderInput('numOfClusters', 'Cluster count', numOfclusters,
min = cl_min, max = cl_max),
h3("Submit"),
submitButton("Update Result")
),
mainPanel(
plotOutput('plot1'),
h4("Result"),
DT::dataTableOutput("result_kmean")
)
)
),
tabPanel("Travel Dataset",
DT::dataTableOutput("dataset"))
))
|
fc867504cc6ea76d250f133a4bd01f6a3e70602a | a16224a32558f9ec254688fb7c83de3c4c35fc25 | /R/list.extract.R | 4859cb8838a14bb01bf951189de7e22d08bc2f24 | [
"MIT"
] | permissive | renkun-ken/rlist | fb807f6f0162f52b08aa141104566f7c6e8c2dd6 | bfaa2c50f79c9e8cdb3bce481262829549ba8e7e | refs/heads/master | 2023-03-21T19:47:49.571870 | 2023-03-11T12:54:35 | 2023-03-11T12:54:35 | 20,375,257 | 187 | 31 | NOASSERTION | 2022-06-12T14:48:11 | 2014-06-01T10:33:11 | R | UTF-8 | R | false | false | 163 | r | list.extract.R | #' Extract an element from a list or vector
#' @export
#' @examples
#' x <- list(a=1, b=2, c=3)
#' list.extract(x, 1)
#' list.extract(x, 'a')
list.extract <- `[[`
|
e0309609c206918faf9ea955c121ebf58b03af4d | 9cdcd086bbe55a0e703e595ea6bfc8b41e17fb4d | /Vatsa_Supervised_titanic_svm.R | 8b144185cd525629ecd4f59799dd052c94367e08 | [] | no_license | vatsashah/DataScience | b7cc3357a8cf1d2f66cd1ef93baac180fbb3a11f | 730a06eb360f061508db4a561ab44ce9b37df873 | refs/heads/master | 2020-06-13T20:44:28.564225 | 2020-01-27T13:48:22 | 2020-01-27T13:48:22 | 194,782,650 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,971 | r | Vatsa_Supervised_titanic_svm.R | #Vatsa Shah
#Supervised learning with Support Vector Machine Algorithm
getwd()
rm(list = ls())
library(caret)
train=read.csv("C:/Users/Vatsa Shah/Documents/train.csv")
test=read.csv("C:/Users/Vatsa Shah/Documents/test.csv")
#creating new column to determine whether it is part of train data or test data
train$istrain = TRUE
test$istrain = FALSE
dim(train)
dim(test)
test$Survived = NA #creating new column (Survived) in test data.
fulldata=rbind(test,train) # merging test and train data
table(fulldata$istrain)
#assigning 'S' value to unknown values of embarked
fulldata[fulldata$Embarked=='',"Embarked"]='S'
table(fulldata$Embarked)
# finding median of age from fulldata set
medianage=median(fulldata$Age,na.rm = TRUE)
medianage
# assigning unknown values of age as median of age's from fulldata
fulldata[is.na(fulldata$Age),"Age"]=medianage
table(is.na(fulldata$Age))
# finding median of fare from fulldata set
medianfare=median(fulldata$Fare,na.rm = TRUE)
medianfare
# assigning unknown values of fare as median of fare's from fulldata
fulldata[is.na(fulldata$Fare),"Fare"]=medianfare
table(is.na(fulldata$Fare))
fulldata$Pclass=as.factor(fulldata$Pclass)
fulldata$Sex=as.factor(fulldata$Sex)
fulldata$Embarked=as.factor(fulldata$Embarked)
#split data set back into train and test
train=fulldata[fulldata$istrain==TRUE,]
test=fulldata[fulldata$istrain==FALSE,]
Actual_data=read.csv("C:/Users/Vatsa Shah/Documents/gender_submission.csv")
train$Survived=as.factor(train$Survived)
ModFit_SVM = train(Survived~ Pclass + Sex + Age + SibSp + Parch + Fare ,train,method="svmLinear",preProc=c("center","scale"))
predict_SVM = predict(ModFit_SVM,newdata=test)
predict_SVM
confusionMatrix(predict_SVM,factor(Actual_data$Survived))
ans.data=as.data.frame(test$PassengerId)
ans.data$Survived=predict_SVM
write.csv(ans.data,"Vatsa_Supervised_titanic_svm.csv", row.names = FALSE)
|
51df42095a0282ab09c011a65c577e934427ce6c | 1e75fb8258facec122f6afcb9e78a81edcc5e83e | /code/exploratory_data_analysis.R | 755243f9b1ebb603b1cb566061b0219f5ba3d385 | [
"MIT"
] | permissive | piinghel/home_bias | e0d941ddeb20dd11c1bd4ca8c7998135e9debbdc | c85f6291533156a757e3da891c499bca4f8ee6a7 | refs/heads/master | 2021-03-27T07:22:18.453809 | 2020-04-04T13:27:43 | 2020-04-04T13:27:43 | 247,801,144 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,950 | r | exploratory_data_analysis.R | # clear environment
rm(list = ls())
# load libraries
library(tidyverse)
library(readxl)
library(lubridate)
# plotting
library(patchwork)
library(viridis)
library(ggthemes)
library(RColorBrewer)
#---------------------------
# Global parameters
PALETTE <- RColorBrewer::brewer.pal(n = 8, name = "Dark2")
STYLE <- "steelblue"
#---------------------------
# TODO total number of firms per region is constant (ask professor)
# also I think that using counts is more informative than % since it gives
# you more information
df <- read_excel("data/data_rating.xlsx", sheet = "Long") %>%
# make everything lowercase
dplyr::rename(
date = Date,
pd = PD,
moodys = Moodys,
sp = SP,
region = Region,
sector = Sector
)
df <- df %>% mutate(
# compute the log of probability of default (pd)
pd_log = log(pd),
year = lubridate::year(date),
month = lubridate::month(date),
sp = ifelse(sp > 17, 17, sp),
moodys = ifelse(moodys > 17, 17, moodys)
)
#----------------------------------------
# 1) PROBABITLITY OF DEFAULT (PD)
#----------------------------------------
# overall distribution of PD
ggplot(df, aes(x = pd_log)) +
geom_density(alpha = .7, fill = PALETTE[1]) +
labs(x = "Probability of default (PD) Logarithmic", y = "Density")
ggsave("output/exploratory_data_analysis/figures/figure1.png", device = "png")
# condition by region
ggplot(df, aes(x = pd_log, fill = region)) +
geom_density(alpha = .7) +
labs(x = "Probability of Default (PD) Logarithmic", y = "Density") +
scale_fill_brewer(palette = "Dark2")
ggsave("output/exploratory_data_analysis/figures/figure2.png", device = "png")
# condition by sector
ggplot(df, aes(x = pd_log, fill = sector)) +
geom_density(alpha = .7) +
labs(x = "Probability of Default (PD) Logarithmic", y ="Density") +
scale_fill_brewer(palette = "Dark2")
ggsave("output/exploratory_data_analysis/figures/figure3.png", device = "png")
# condition by sector and region
ggplot(df, aes(x = pd_log, fill = region)) +
geom_density(alpha = .7) +
labs(x = "Probability of Default (PD) Logarithmic", y ="Density") +
scale_fill_brewer(palette = "Dark2") +
facet_grid(sector ~ ., scales = "free_y")
ggsave("output/exploratory_data_analysis/figures/figure4.png", device = "png")
# visualize over time by sector
df %>% dplyr::group_by(date, sector, region) %>%
dplyr::summarise(central_measure_pd = median(pd_log, na.rm = TRUE)) %>%
# plot
ggplot(., aes(x = date, y = central_measure_pd, color = sector)) +
geom_line() + geom_point() +
scale_color_brewer(palette = "Dark2") +
geom_vline(xintercept=as.POSIXct("2009-03-01"), size=1, color="red", linetype="dashed") +
annotate("text", x = as.POSIXct("2014-07-01"), y = -4,
label = "March 6, 2009: The Dow Jones hit its lowest level") +
#scale_y_continuous(trans = "log10") +
facet_grid(region ~ ., scales = "free_y") +
labs(x = "Date", y = "Median Probability of Default (MPD) Logarithmic ", fill = "Sector")
ggsave("output/exploratory_data_analysis/figures/figure5.png", device = "png",
units = "cm", height = 18, width = 24)
#----------------------------------------
# 2) Standards and Poor (sp) and Moodys
#----------------------------------------
#-----------------------------------------------------------
# 2.1) Statitic visualisation (don't take time into account)
#-----------------------------------------------------------
# include missing values (NA)
ggplot(df, aes(as.factor(sp))) +
geom_bar(fill = "steelblue") + labs(x = "Rating category", y = "Count") +
ggtitle("Standard & Poor's (S&P) (missing values (NA) included)")
ggsave("output/exploratory_data_analysis/figures/figure6.png", device = "png")
ggplot(df, aes(as.factor(moodys))) +
geom_bar(fill = "steelblue") + labs(x = "Rating category", y = "Count") +
ggtitle("Moodys (missing values (NA) included)")
ggsave("output/exploratory_data_analysis/figures/figure7.png", device = "png")
# ignore missing values (NA)
df %>% group_by(sp) %>% tally() %>% na.omit() %>%
ggplot(., aes(x = sp, y = n)) + geom_bar(stat = "identity", fill = PALETTE[2]) +
labs(x = "Rating category", y = "Count") + ggtitle("Standard & Poor's (S&P)") +
df %>% group_by(moodys) %>% tally() %>% na.omit() %>%
ggplot(., aes(x = moodys, y = n)) + geom_bar(stat = "identity", fill = PALETTE[1]) +
labs(x = "Rating category", y = "") + ggtitle("Moodys")
ggsave("output/exploratory_data_analysis/figures/figure8.png", device = "png")
# condition on region
df %>% group_by(sp, region) %>% tally() %>% na.omit() %>%
ggplot(., aes(x = sp, y = n, fill = region)) +
geom_bar(stat = "identity", position = position_dodge()) +
scale_fill_brewer(palette = "Dark2") +
labs(x = "Rating category", y = "Count") + ggtitle("Standard & Poor's (S&P)") +
df %>% group_by(moodys, region) %>% tally() %>% na.omit() %>%
ggplot(., aes(x = moodys, y = n, fill = region)) +
geom_bar(stat = "identity", position = position_dodge()) +
scale_fill_brewer(palette = "Dark2") +
labs(x = "Rating category", y = "") + ggtitle("Moodys") +
plot_layout(guides = 'collect')
ggsave("output/exploratory_data_analysis/figures/figure9.png", device = "png",
units = "cm", height = 14, width = 20)
# condition on sector
df %>% group_by(sp, sector) %>% tally() %>% na.omit() %>%
ggplot(., aes(x = sp, y = n, fill = sector)) +
geom_bar(stat = "identity", position = position_dodge()) +
scale_fill_brewer(palette = "Dark2") +
labs(x = "Rating category", y = "Count") +
ggtitle("Standard & Poor's (S&P)") +
df %>% group_by(moodys, sector) %>% tally() %>% na.omit() %>%
ggplot(., aes(x = moodys, y = n, fill = sector)) +
geom_bar(stat = "identity", position = position_dodge()) +
scale_fill_brewer(palette = "Dark2") +
labs(x = "Rating category", y = "") +
ggtitle("Moodys") +
plot_layout(guides = 'collect')
ggsave("output/exploratory_data_analysis/figures/figure10.png", device = "png",
units = "cm", height = 14, width = 20)
# condition both on region and sector
# 1) Standard & Poor's (S&P)
df %>% group_by(sp, sector, region) %>% tally() %>% na.omit() %>%
ggplot(., aes(x = sp, y = n)) +
geom_bar(stat = "identity",
position = position_dodge(),
fill = "steelblue") +
labs(x = "Rating category", y = "Count") +
facet_grid(sector ~ region, scales = "free_y") +
ggtitle("Standard & Poor's (S&P)")
ggsave("output/exploratory_data_analysis/figures/figure11.png", device = "png")
# 2) Moodys
df %>% group_by(moodys, sector, region) %>% tally() %>% na.omit() %>%
ggplot(., aes(x = moodys, y = n)) +
geom_bar(stat = "identity",
position = position_dodge(),
fill = "steelblue") +
labs(x = "Rating category", y = "Count") +
facet_grid(sector ~ region, scales = "free_y") +
ggtitle("Moodys")
ggsave("output/exploratory_data_analysis/figures/figure12.png", device = "png")
#-----------------------------------------------------------
# 2.1) Dynamic visualisation (take time into account)
#-----------------------------------------------------------
# check the number of observations over time
p1 <-
df %>% select(id, sp, region, year) %>% distinct(id, year, region) %>%
dplyr::group_by(year, region) %>%
tally() %>% ggplot(., aes(x = year, y = n, color = region)) +
geom_point() + geom_line() + labs(x = "", y = "Unique firms") +
scale_color_brewer(palette = "Dark2") +
ggtitle("S&P")
p2 <-
df %>% select(id, moodys, region, year) %>% distinct(id, year, region) %>%
dplyr::group_by(year, region) %>%
tally() %>% ggplot(., aes(x = year, y = n, color = region)) +
geom_point() + geom_line() + labs(x = "", y = "") +
scale_color_brewer(palette = "Dark2") +
ggtitle("Moody's")
# number of observations per year
p3 <-
df %>% select(id, sp, region, year) %>% dplyr::group_by(year, region) %>%
tally() %>% ggplot(., aes(x = year, y = n, color = region)) +
geom_point() + geom_line() + labs(x = "Year", y = "Observations") +
scale_color_brewer(palette = "Dark2") +
ggtitle("S&P")
# number of observations per year
p4 <-
df %>% select(id, moodys, region, year) %>% dplyr::group_by(year, region) %>%
tally() %>% ggplot(., aes(x = year, y = n, color = region)) +
geom_point() + geom_line() + labs(x = "Year", y = "") +
scale_color_brewer(palette = "Dark2") +
ggtitle("Moody's")
# group together in one plot
(p1 + p2) / (p3 + p4) + plot_layout(guides = "collect")
ggsave("output/exploratory_data_analysis/figures/figure13.png", device = "png")
# Look at the number of categories of rating over time: Standard & Poor's (S&P)
p5 <- df %>% select(id, sp, year) %>% distinct(id, sp, year) %>%
group_by(sp, year) %>% tally() %>% na.omit() %>%
ggplot(., aes(x = year, y = sp)) +
geom_tile(aes(fill = n)) +
scale_fill_viridis(option = "magma") + theme_tufte(base_family = "Helvetica") +
labs(x = "Year", y = "Rating category", fill = "Observations") + ggtitle("Standard & Poor's (S&P)")
# Look at the number of categories of rating over time: Moodys
p6 <-
df %>% select(id, moodys, year) %>% distinct(id, moodys, year) %>%
group_by(moodys, year) %>% tally() %>% na.omit() %>%
ggplot(., aes(x = year, y = moodys)) +
geom_tile(aes(fill = n)) +
scale_fill_viridis(option = "magma") + theme_tufte(base_family = "Helvetica") +
labs(x = "Year", y = "", fill = "Observations") + ggtitle("Moodys")
# group together in one figure
p5 + p6
ggsave("output/exploratory_data_analysis/figures/figure14.png", device = "png",
units = "cm", height = 12, width = 24)
# SP: Europe and US
df %>% select(id, sp, year, region) %>% distinct(id, sp, year, region) %>%
group_by(sp, region, year) %>% tally() %>% na.omit() %>%
ggplot(., aes(x = year, y = sp)) +
geom_tile(aes(fill = n)) +
scale_fill_viridis(option = "magma") + theme_tufte(base_family = "Helvetica") +
facet_grid(. ~ region) +
labs(x = "Year", y = "", fill = "Observations") + ggtitle("Standard & Poor's (S&P)")
ggsave("output/exploratory_data_analysis/figures/figure15.png", device = "png",
units = "cm", height = 12, width = 24)
# Moodys: Europe and US
df %>% select(id, moodys, year, region) %>% distinct(id, moodys, year, region) %>%
group_by(moodys, region, year) %>% tally() %>% na.omit() %>%
ggplot(., aes(x = year, y = moodys)) +
geom_tile(aes(fill = n)) +
scale_fill_viridis(option = "magma") + theme_tufte(base_family = "Helvetica") +
facet_grid(. ~ region) +
labs(x = "Year", y = "Rating category", fill = "Observations") + ggtitle("Moodys")
ggsave("output/exploratory_data_analysis/figures/figure16.png", device = "png",
units = "cm", height = 12, width = 24)
# SP: Sector
df %>% select(id, sp, year, sector) %>% distinct(id, sp, year, sector) %>%
group_by(sp, sector, year) %>% tally() %>% na.omit() %>%
ggplot(., aes(x = year, y = sp)) +
geom_tile(aes(fill = n)) +
scale_fill_viridis(option = "magma") + theme_tufte(base_family = "Helvetica") +
facet_grid(. ~ sector) +
labs(x = "Year", y = "Rating category", fill = "Observations") + ggtitle("Standard & Poor's (S&P)")
ggsave("output/exploratory_data_analysis/figures/figure17.png", device = "png",
units = "cm", height = 12, width = 24)
# Moodys: Europe and US
df %>% select(id, moodys, year, sector) %>% distinct(id, moodys, year, sector) %>%
group_by(moodys, sector, year) %>% tally() %>% na.omit() %>%
ggplot(., aes(x = year, y = moodys)) +
geom_tile(aes(fill = n)) +
scale_fill_viridis(option = "magma") + theme_tufte(base_family = "Helvetica") +
facet_grid(. ~ sector) +
labs(x = "Year", y = "Rating category", fill = "Observations") + ggtitle("Moodys")
ggsave("output/exploratory_data_analysis/figures/figure18.png", device = "png",
units = "cm", height = 12, width = 24)
# SP: Sector and Region
df %>% select(id, sp, year, sector, region) %>% distinct(id, sp, year, sector, region) %>%
group_by(sp, sector, region, year) %>% tally() %>% na.omit() %>%
ggplot(., aes(x = year, y = sp)) +
geom_tile(aes(fill = n)) +
scale_fill_viridis(option = "magma") + theme_tufte(base_family = "Helvetica") +
facet_grid(sector ~ region) +
labs(x = "Year", y = "Rating category", fill = "Observations") +
ggtitle("Standard & Poor's (S&P)")
ggsave("output/exploratory_data_analysis/figures/figure19.png", device = "png",
units = "cm", height = 12, width = 24)
# Moodys: Sector and Region
df %>% select(id, moodys, year, sector, region) %>% distinct(id, moodys, year, sector, region) %>%
group_by(moodys, sector, region, year) %>% tally() %>% na.omit() %>%
ggplot(., aes(x = year, y = moodys)) +
geom_tile(aes(fill = n)) +
scale_fill_viridis(option = "magma") + theme_tufte(base_family = "Helvetica") +
facet_grid(sector ~ region) +
labs(x = "Year", y = "Rating category", fill = "Observations") + ggtitle("Moodys")
ggsave("output/exploratory_data_analysis/figures/figure20.png", device = "png",
units = "cm", height = 12, width = 24)
|
3f14e7922fe2edb25c32560a4738ef40a2a09686 | d87af6218a5edcdcd5857129d2f88a8333a6e5b1 | /04_rfmix/including_mais_samples/seven_way/03.2_rfmixQ-proportion-plot.R | 1a2268cee468602767d5d5c06ddf13836ee8c96c | [
"MIT"
] | permissive | mcps-analysts/mcps-genetic-cohort-profile | f578b5e8c59a9a378e60274a7ff6bf3605687bc8 | fab2ea2d726bffe8d1b37c41daf3f00c3429f153 | refs/heads/main | 2023-04-14T21:41:17.027004 | 2023-02-13T18:08:57 | 2023-02-13T18:08:57 | 598,123,861 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,492 | r | 03.2_rfmixQ-proportion-plot.R |
"%&%" <- function(a, b) paste0(a, b)
library("data.table")
library("tidyverse")
serv.dir <- "./"
work.dir.serv <- serv.dir %&%
"popgen/04_rfmix/" %&%
"including_mais_samples/seven_way/"
input.dir <- work.dir.serv %&% "input_files/"
output.dir <- work.dir.serv %&% "output_files/"
pop.file <- serv.dir %&% "shared/reference_datasets/" %&%
"mais_information/reference-population-labels.txt"
mais.ref.df <- fread(serv.dir %&% "shared/reference_datasets/" %&%
"mais_information/mais-population-info_NJtree-regions.txt")
rgnkey.dir <- serv.dir %&%
"projects/freeze_145k/03_rgc-qc-steps/input_files/"
rgnkey.file = rgnkey.dir %&% "freeze_145k_id-key.csv"
rgn.link.df <- fread(rgnkey.file)[, c(1, 3)]
names(rgn.link.df) <- c("MCPS.ID", "IID")
rgn.link.df$IID <- as.character(rgn.link.df$IID)
rfmix.q.df <- fread(work.dir.serv %&% "output_files/"%&%
"global-ancestry-estimates.txt")
join.df <- inner_join(rfmix.q.df, rgn.link.df, by="IID")
mcps.sub.df <- filter(rfmix.q.df, IID %in% join.df$IID)
mcps.sub.df <- inner_join(mcps.sub.df, rgn.link.df, by="IID")
mcps.sub.df <- dplyr::select(mcps.sub.df, one_of("MCPS.ID", "AFRICA", "EUROPE",
"MEXICO_C", "MEXICO_N", "MEXICO_NW", "MEXICO_S", "MEXICO_SE"))
names(mcps.sub.df)[1] <- "IID"
ref.sub.df <- filter(rfmix.q.df, !(IID %in% join.df$IID))
rfmix.q.df <- rbind(mcps.sub.df, ref.sub.df)
ref.sub.df <- filter(rfmix.q.df, !(grepl("MCPS", IID)))
mcps.sub.df <- filter(rfmix.q.df, (grepl("MCPS", IID)))
ref.id.vec <- purrr::map(ref.sub.df$IID, function(s){
li <- strsplit(s, split="-")[[1]]
paste0(li[2:length(li)], collapse="-")
}) %>% as.character(.)
ref.sub.df$IID <- ref.id.vec
mcps.sub.df <- mcps.sub.df#[c(1:1000), ]## Full Set
rfmix.q.df <- rbind(mcps.sub.df, ref.sub.df)
write_rds(x=rfmix.q.df, path=work.dir.serv%&%"output_files/rfmix.q.df.RDS")
pop.df <- fread(pop.file)
ref.df <- c()
pb <- txtProgressBar(min=0, max=dim(rfmix.q.df)[1], style=3)
for (i in 1:dim(rfmix.q.df)[1]){
setTxtProgressBar(pb, i)
samp <- rfmix.q.df$IID[i]
sub.df <- filter(pop.df, sample==samp)
if (dim(sub.df)[1]==0){
sub.df <- data.table("sample"=samp, "population"="MCPS",
"region"="AMERICA", stringsAsFactors=F)
}
ref.df <- rbind(ref.df, sub.df)
}
names(ref.df)[1] <- "IID"
write_rds(x=ref.df, path=work.dir.serv%&%"output_files/ref.df.RDS")
library("viridis")
reformat_df <- function(df, k=3){
out.df <- c()
pb <- txtProgressBar(min=0, max=dim(df)[1], style=3)
for (i in 1:dim(df)[1]){
setTxtProgressBar(pb, i)
row.df <- df[i, ]
prop.vec <- row.df[, (dim(row.df)[2]-k+1):dim(row.df)[2]] %>% as.numeric(.)
grp.names <- row.df[, (dim(row.df)[2]-k+1):dim(row.df)[2]] %>% names(.)
build.df <- data.frame("IID"=row.df$IID,
"Proportion"=prop.vec, "Ancestry"=grp.names,
stringsAsFactors = F)
out.df <- rbind(out.df, build.df)
}
return(out.df)
}
pop_plot <- function(sub.df, col.vec, hide.text=TRUE, hide.legend=FALSE){
plt <- ggplot(data=sub.df, aes(x=IID, y=Proportion)) +
geom_bar(stat="identity",
aes(fill=Ancestry, col=Ancestry)) +
scale_y_continuous(breaks=seq(0, 1, 0.1)) +
scale_fill_manual(values=col.vec) +
scale_color_manual(values=col.vec) +
facet_wrap(~population, scales="free_x",
strip.position="bottom",
nrow=1) +
theme(axis.text.x=element_blank(),
axis.title.x = element_blank(),
axis.ticks.x=element_blank(),
panel.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.placement = "outside",
strip.background = element_rect(fill="white"),
strip.text = element_text(size=6))
if (hide.text==TRUE){
plt <- plt + theme(axis.text=element_blank(),
axis.title=element_blank(),
axis.ticks=element_blank())
}
if (hide.legend==TRUE){
plt <- plt + theme(legend.position ="none")
}
return(plt)
}
region_plot <- function(sub.df, col.vec, hide.legend=FALSE){
plt <- ggplot(data=sub.df, aes(x=IID, y=Proportion)) +
geom_bar(stat="identity",
aes(fill=Ancestry, col=Ancestry)) +
scale_y_continuous(breaks=seq(0, 1, 0.1)) +
scale_fill_manual(values=col.vec) +
scale_color_manual(values=col.vec) +
facet_wrap(~region, scales="free_x",
strip.position="bottom",
nrow=1) +
theme(axis.text.x=element_blank(),
axis.title.x = element_blank(),
axis.ticks.x=element_blank(),
panel.background = element_blank(),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.placement = "outside",
strip.background = element_rect(fill="white"),
strip.text = element_text(size=6))
if (hide.legend==TRUE){
plt <- plt + theme(legend.position ="none")
}
return(plt)
}
rfmixQ.plt.df <- reformat_df(rfmix.q.df, k=7)
write_rds(x=rfmixQ.plt.df, path=work.dir.serv%&%
"output_files/rfmixQ.plt.df.RDS")
lev.vec <- (filter(rfmixQ.plt.df, Ancestry=="EUROPE") %>%
arrange(desc(Proportion)))$IID %>% unique(.)
plt.df$IID <- factor(plt.df$IID, levels=lev.vec)
plt.df$Ancestry <- factor(plt.df$Ancestry,
levels=c("AFRICA", "EUROPE",
"MEXICO_C", "MEXICO_S", "MEXICO_SE", "MEXICO_NW", "MEXICO_N"))
library("cowplot")
vir.vec <- viridis(20)
cvec <- c("#FDE725FF", "#2D718EFF", "#FDBF6F",
"#FB9A99", "#B3367AFF", "#FF7F00", "#E31A1C")
plt1a <- region_plot(filter(plt.df, population!="MCPS"),
col.vec=cvec, hide.legend=T)
plt1b <- pop_plot(filter(plt.df, population=="MCPS"), col.vec=cvec, hide.text=F,
hide.legend = F)
plt1.full <- cowplot::plot_grid(plt1a, plt1b, nrow=1, rel_widths = c(1, 5))
plt1.mcps <- cowplot::plot_grid(plt1b, nrow=1)
#AMR-MAIS-North "#E31A1C"
#AMR-MAIS-Northwest "#FF7F00"
#AMR-MAIS-Central "#FDBF6F"
#AMR-MAIS-South "#FB9A99"
#AMR-MAIS-Southeast "#B3367AFF"
local.dir <- "popgen/04_rfmix/including_mais_samples/seven_way/"
ggsave(plot=plt1.mcps, filename=work.dir.serv%&%
"plots/rfmix.admixture-plot-freeze150k.png",
height=2.5, width =12, type = "cairo")
ggsave(plot=plt1.full, filename=work.dir.serv%&%
"plots/rfmix.admixture-plot-freeze150k-with-references.png",
height=2.5, width =15, type = "cairo")
|
76e3fde74430d76ef35ce509cd03c977038c3703 | 81d2fb20fce5a2b96ddba7a03202719f988eb2be | /Scripts/Rx_fri_mapoutput_TCSI.R | 1da6e4330ec65e5310df333ae394344b3684a031 | [] | no_license | LANDIS-II-Foundation/Project-Tahoe-Central-Sierra-2019 | 2fe775047783f10d065c363516554435fc579346 | 7111854053550a87f818ffd43b758989df90726c | refs/heads/master | 2021-07-25T04:02:27.382377 | 2021-07-12T15:37:10 | 2021-07-12T15:37:10 | 185,416,263 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 5,157 | r | Rx_fri_mapoutput_TCSI.R | ####Call necessary packages###
library(raster)
library(rgdal)
library(ggplot2)
library(dplyr)
library(spatialEco)
library(Hmisc)
######Set working directory####
p <- "I:/TCSI/Round2_outputs/"
setwd(p)
clim_dir <- c("CANESM85", "CNRM85", "GFDL85", "HADGEM85", "MIROC5_85")
scen_dir <- c("Scenario1_1", "Scenario2_1", "Scenario3_1", "Scenario4_1", "Scenario5_1", "Scenario6_1")
timesteps <- 1:80
TOT_TCSI <- raster("F:/TCSI/Round2_outputs/CANESM85/SSP2_Scenario1_1/rx_equal1.tif")
TOT_TCSI[TOT_TCSI == 0] <- NA
plot(TOT_TCSI)
######################PROJECT RASTER SPATIALLY#########################################
rasterNoProj <- raster(nrow = 800, ncol = 650)
xMin = -1810455
yMin = -499545
res <- 180
xMax <- xMin + (rasterNoProj@ncols * res)
yMax <- yMin + (rasterNoProj@nrows * res)
rasExt <- extent(xMin,xMax,yMin,yMax)
rasterNoProj@extent <- rasExt
crs(rasterNoProj) <- "EPSG:2163"
rasterNoProj@crs
rasterNoProj@extent
rasterNoProj
#####################################################################################
scen_data <- NULL
all_data <- NULL
i <- clim_dir[1]
j <- lf[1]
k <- scen_rep[1]
timesteps <-1:80
lf_existing <- list.files("I:/TCSI/Round2_outputs/rx_fire_maps/")
r1 <- raster("I:/TCSI/Round2_outputs/CANESM85/SSP2_Scenario1_1/scrapple-fire/ignition-type-10.img")
plot(r1)
freq(r1)
for(i in clim_dir){
path <- paste0(p,i,"/")
lf <- list.files(path = path, full.names = F, include.dirs = T)
for(j in lf){
path <- paste0(p,i,"/",j,"/scrapple-fire/")
setwd(path)
print(path)
lf <- list.files(pattern = "ignition.*.img")
timesteps <- length(lf)
timesteps <- c(1:timesteps)
rnameout <- paste0("I:/TCSI/Round2_outputs/rx_fire_maps/fire_totfire_",i,"_",j,".tif")
#existing_name <- paste0("HS_fire_gt100ha_",i,"_",j,".tif")
#skipval <- grep(existing_name, lf_existing)
#if( length(skipval) == 0){
binary.burn.map <- lapply(timesteps, function (timesteps){
r <- raster (lf[timesteps])
r4 <- r == 4
# r3 <- r == 3
# r4 <- r2 + r3
#r1 <- clump(r)
#fr1 <- freq(r1)
#fr_value <- which(fr1[,2]>30)
#r2 <- r1
#r2[!(r1[] %in% fr_value)] <- 0
#r2[r2[]>0]<-1
return(r4)
})
binary.burn.map2 <- Reduce (stack, binary.burn.map)
reburn.times.map <- sum (binary.burn.map2)
reburn.times.map@crs <- rasterNoProj@crs
reburn.times.map@extent <- rasterNoProj@extent
print(rnameout)
writeRaster(reburn.times.map, rnameout, overwrite=T)
}
}
#}
####################average across all replicates##########################
wd <- "I:/TCSI/Round2_outputs/fire_maps/"
setwd(wd)
lf <- list.files()
head(lf)
pattern <- "MIROC5"
miroc_only <- grep(pattern = pattern, lf, value = T)
no_miroc <- lf[lf %nin% miroc_only]
scen_dir <- c("Scenario1", "Scenario2", "Scenario3", "Scenario4", "Scenario5", "Scenario6")
i <- scen_dir[1]
for(i in scen_dir){
lf1 <- grep( paste0(i), no_miroc, value = T)
print(lf1)
scen_tot <- sum(stack(lf1))
scen_tot <- raster::mask(scen_tot, TOT_TCSI)
scen_tot@crs <- rasterNoProj@crs
scen_tot@extent <- rasterNoProj@extent
rnameouta <- paste0("no_miroc_fire_avg_", i, ".tif")
scen_avg <- scen_tot / length(lf1)
plot(scen_avg)
writeRaster(scen_avg, rnameouta, overwrite = T)
rnameoutp <- paste0("no_miroc_fire_pct_", i, ".tif")
scen_pct <- (scen_avg / 80) * 100
plot(scen_pct)
writeRaster(scen_pct, rnameoutp, overwrite = T)
rnameoutf <- paste0("no_miroc_fire_fri_", i, ".tif")
scen_fri <- 80 / (scen_avg + 1)
plot(scen_fri)
writeRaster(scen_fri, rnameoutf, overwrite = T)
}
scen6 <- raster("fire_fri_Scenario6.tif")
scen5 <- raster("fire_fri_Scenario5.tif")
scen4 <- raster("fire_fri_Scenario4.tif")
scen3 <- raster("fire_fri_Scenario3.tif")
scen2 <- raster("fire_fri_Scenario2.tif")
scen1 <- raster("fire_fri_Scenario1.tif")
scen6 <- raster("fire_pct_Scenario6.tif")
scen5 <- raster("fire_pct_Scenario5.tif")
scen4 <- raster("fire_pct_Scenario4.tif")
scen3 <- raster("fire_pct_Scenario3.tif")
scen2 <- raster("fire_pct_Scenario2.tif")
scen1 <- raster("fire_pct_Scenario1.tif")
sta1 <- stack(scen1, scen2, scen3, scen4, scen5, scen6)
plot(sta1)
scen6 <- raster("H:/TCSI/hs_fire_maps/hs40_avg_Scenario6_1.tif")
scen5 <- raster("H:/TCSI/hs_fire_maps/hs40_avg_Scenario5_1.tif")
scen4 <- raster("H:/TCSI/hs_fire_maps/hs40_avg_Scenario4_1.tif")
scen3 <- raster("H:/TCSI/hs_fire_maps/hs40_avg_Scenario3_1.tif")
scen2 <- raster("H:/TCSI/hs_fire_maps/hs40_avg_Scenario2_1.tif")
scen1 <- raster("H:/TCSI/hs_fire_maps/hs40_avg_Scenario1_1.tif")
stacltw <- stack(scen1, scen2, scen3, scen4, scen5, scen6)
plot(stacltw)
freq(stacltw)
r1 <- raster("E:/SNPLMA3/hs_fire_maps/HS_fire_gt40ac_Scenario5_MIROC5_8.5_1.tif")
r2 <- raster("E:/SNPLMA3/hs_fire_maps/HS_fire_gt40ac_Scenario5_MIROC5_8.5_2.tif")
r3 <- raster("E:/SNPLMA3/hs_fire_maps/HS_fire_gt40ac_Scenario5_MIROC5_8.5_3.tif")
plot(r1)
plot(r2)
plot(r3)
|
8b635bd41782ac4c0ecd8678eaf866db0b6bebbb | b5964f2dd7c8966c658b5095894c7e0ba242791e | /pair_test_2.r | 5082c9d2fca443a8f45a15973a5e74a958cdbe59 | [] | no_license | spreadstock/model1 | 20c7f71c710a1bb11b9e70991916566a910c3187 | 7127bdf4361b0ee7a22667fa4b70551073b6fb0a | refs/heads/master | 2020-04-16T17:43:09.381493 | 2017-04-19T00:44:02 | 2017-04-19T00:45:29 | 61,471,612 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,058 | r | pair_test_2.r | #setup
currency("USD")
Sys.setenv(TZ="UTC")
#clean up
if (!exists('.blotter')) .blotter <- new.env()
if (!exists('.strategy')) .strategy <- new.env()
suppressWarnings(rm(list = ls(envir = .blotter), envir = .blotter))
suppressWarnings(rm(list = ls(envir = .strategy), envir = .strategy))
initDate <- '2012-01-01'
startDate <- '2013-01-01'
endDate <- '2014-12-31'
initEq <- 1e5
MaxPos <- 35000 #max position in stockA;
# max position in stock B will be max * ratio, i.e. no hard position limit in
# Stock B
lvls <- 3 #how many times to fade; Each order's qty will = MaxPos/lvls
stock.folder.daily <- 'C:/important/ideas/stock/projects/model1/StockDatas/2016-08-09-Former_Rehabilitation_leaned/'
#symbList = c("SH601169" ,"SH601328")
symbList = c("SH600353" ,"SZ002123")
for(symbol in symbList)
{
a <- subsetByDateRange(loadStock(stock.folder.daily, symbol, operation.name="all"),startDate, endDate)
assign(symbol,a)
rm(a)
}
stock_daily <- get(symbList[1])
stock_daily <-cbind(stock_daily, get(symbList[2]))
for(symbol in symbList)
{
stock(symbol, currency = "USD", multiplier = 1)
}
qs.strategy <- "qsModel1"
initPortf(qs.strategy, symbols=symbList, initDate = initDate)
initAcct(
qs.strategy,
portfolios = qs.strategy,
initDate = initDate,
currency = "USD",
initEq = initEq
)
initOrders(portfolio = qs.strategy, initDate = initDate)
# osFUN will need to know which symbol is leg 1 and which is leg 2 as well as
# what the values are for MaxPos and lvls. So, create a slot in portfolio to
# hold this info.
pair <- c(1,2 , MaxPos, lvls,0,0,0)
names(pair) <- c(symbList[1], symbList[2], "MaxPos", "lvls","transA","transB","transBInit")
.blotter[[paste('portfolio', qs.strategy, sep='.')]]$pair <- pair
# Create initial position limits and levels by symbol
# allow 3 entries for long and short if lvls=3.
# addPosLimit(portfolio=qs.strategy, timestamp=initDate, symbol=symbList[1],
# maxpos=MaxPos, longlevels=lvls, minpos=-0, shortlevels=lvls)
# addPosLimit(portfolio=qs.strategy, timestamp=initDate, symbol=symbList[2],
# maxpos=MaxPos * 2, longlevels=lvls, minpos=0, shortlevels=lvls)
strategy(qs.strategy, store = TRUE)
#Signal set
#build singal set
#SH601169.Open SH601169.High SH601169.Low SH601169.Close SH601169.Volume
#SH601169.Adjusted SH601328.Open SH601328.High SH601328.Low SH601328.Close
#SH601328.Volume SH601328.Adjusted SMA.SMA30D_1 SMA.SMA30D_2 StockMonth.1.SMA
#StockMonthSMA10.1.SMA StockMonth.2.SMA StockMonthSMA10.2.SMA Spread.SPREAD
#Beta.SPREAD Upper.SPREAD Lower.SPREAD Mean.SPREAD
add.indicator(
strategy = qs.strategy,
name = "SMA",
arguments = list(
x = quote(Cl(mktdata)),
n=15),
label = "SMA30D"
)
add.indicator(
strategy = qs.strategy,
name = "get.montlySMA",
arguments = list(
mktdata = quote(Cl(mktdata)),
n=5),
label = "SMA"
)
add.indicator(
strategy = qs.strategy,
name = "calculate_beta",
arguments = list(
x = quote(Cl(stock_daily))
),
label = "SPREAD"
)
add.signal(
qs.strategy,
name = "sigCrossover",
arguments = list(
columns = c("Close", "StockMonthSMA10.SMA"),
relationship = "gt"
),
label = "StockMCl.gt.SMA"
)
add.signal(
qs.strategy,
name = "sigCrossover",
arguments = list(
columns = c("Close", "StockMonthSMA10.SMA"),
relationship = "lt"
),
label = "StockMCl.lt.SMA"
)
add.signal(
qs.strategy,
name = "sigCrossover",
arguments = list(columns = c("Close", "SMA.SMA30D"), relationship = "gt"),
label = "StockCl.gt.SMA"
)
add.signal(
qs.strategy,
name = "sigCrossover",
arguments = list(columns = c("Close", "SMA.SMA30D"), relationship = "lt"),
label = "StockCl.lt.SMA"
)
add.signal(
qs.strategy,
name = "sigCrossover",
arguments = list(columns = c("BetaTotal.SPREAD", "Upper.SPREAD"), relationship = "gt"),
label = "Spread.cross.upper"
)
add.signal(
qs.strategy,
name = "sigCrossover",
arguments = list(columns = c("BetaTotal.SPREAD", "Lower.SPREAD"), relationship = "lt"),
label = "Spread.cross.lower"
)
add.signal(
qs.strategy,
name = "sigFormula",
arguments = list(
columns = c(
"StockCl.gt.SMA"
),
formula = "(StockCl.gt.SMA == 1)",
cross = FALSE
),
label = "Stock.longEnter"
)
add.signal(
qs.strategy,
name = "sigFormula",
arguments = list(
columns = c(
"Spread.cross.upper"
),
formula = "(Spread.cross.upper == 1)",
cross = FALSE
),
label = "Stock.upperAdj"
)
add.signal(
qs.strategy,
name = "sigFormula",
arguments = list(
columns = c(
"Spread.cross.lower"
),
formula = "(Spread.cross.lower == 1)",
cross = FALSE
),
label = "Stock.lowerAdj"
)
add.signal(
qs.strategy,
name = "sigFormula",
arguments = list(
columns = c(
"StockMCl.lt.SMA"
),
formula = "(StockMCl.lt.SMA == 1)",
cross = FALSE
),
label = "Stock.longExit"
)
# #add rules
add.rule(
qs.strategy,
name = 'ruleSignal',
arguments = list(
sigcol = "Stock.longEnter",
sigval = TRUE,
ordertype = 'market',
orderside = 'long',
replace = FALSE,
prefer = 'Open',
TxnFees="takeTranxFee",
orderset="pairForTrend",
osFUN = 'osSpreadMaxDollar',
tradeSize = floor(MaxPos / 2 / lvls),
maxSize = floor(MaxPos)
),
type = 'enter',
label='longRule'
)
# add.rule(
# qs.strategy,
# name = 'ruleSignal',
# arguments = list(
# sigcol = "Stock.longExit",
# sigval = TRUE,
# orderqty = 'all',
# ordertype = 'market',
# orderside = NULL
# ),
# type = 'exit'
# )
add.rule(
qs.strategy,
name = 'ruleSignal',
arguments = list(
sigcol = "Stock.upperAdj",
sigval = TRUE,
ordertype = 'market',
orderside = 'long',
replace = FALSE,
prefer = 'Open',
TxnFees="takeTranxFee",
orderset="pairForTrend",
osFUN = 'osSpreadSize',
ordersidetype = 'upperAdj'
),
type = 'enter',
label='UpperAdjRule'
)
add.rule(
qs.strategy,
name = 'ruleSignal',
arguments = list(
sigcol = "Stock.lowerAdj",
sigval = TRUE,
ordertype = 'market',
orderside = 'long',
replace = FALSE,
prefer = 'Open',
TxnFees="takeTranxFee",
orderset="pairForTrend",
osFUN = 'osSpreadSize',
ordersidetype = 'lowerAdj'
),
type = 'enter',
label='LowerAdjRule'
)
add.rule(qs.strategy, 'ruleReblance',
arguments=list(rebalance_on='days'),
type='rebalance',
label='rebalance'
)
stopLossPercent <- 0.1
add.rule(
qs.strategy,
name='ruleSignal',
arguments = list(sigcol="Stock.longEnter", sigval=TRUE,
replace=FALSE,
orderside='long',
ordertype='stoptrailing',
tmult=TRUE,
threshold=quote(stopLossPercent),
orderqty='all',
prefer = 'Open',
TxnFees="takeTranxFee",
orderset='pairForTrend'),
type='chain', parent="longRule",
label='StopLossLong',
enabled=FALSE
)
add.rule(
qs.strategy,
name='ruleSignal',
arguments = list(sigcol="Stock.lowerAdj", sigval=TRUE,
replace=FALSE,
orderside='long',
ordertype='stoptrailing',
tmult=TRUE,
threshold=quote(stopLossPercent),
orderqty='all',
prefer = 'Open',
TxnFees="takeTranxFee",
orderset='pairForTrend'),
type='chain', parent="LowerAdjRule",
label='StopLossLower',
enabled=FALSE
)
add.rule(
qs.strategy,
name='ruleSignal',
arguments = list(sigcol="Stock.upperAdj", sigval=TRUE,
replace=FALSE,
orderside='long',
ordertype='stoptrailing',
tmult=TRUE,
threshold=quote(stopLossPercent),
orderqty='all',
prefer = 'Open',
TxnFees="takeTranxFee",
orderset='pairForTrend'),
type='chain', parent="UpperAdjRule",
label='StopLossUpper',
enabled=FALSE
)
#
# ################################################################################
|
d1ea16f93c1910ce9790196395bf9bab28f6f9f1 | 0bcd8e58c426429f5e892acb50841793924f4f14 | /codigos/e6_prac1.R | 6d7833eacda68e944e7c19d763890a0d3ee17799 | [] | no_license | ealaurel/clase_EST_383 | 66a0285567fa013080d37b014f00ca99bdfcdc4e | efb78b4b4f99ba10ea62cffa44109062c5572bd1 | refs/heads/master | 2022-11-19T00:39:27.239480 | 2020-07-15T04:41:04 | 2020-07-15T04:41:04 | 255,178,370 | 0 | 0 | null | null | null | null | ISO-8859-10 | R | false | false | 619 | r | e6_prac1.R | rm(list=ls())
#6 emplenado la ENDSA muestre por aņo y departamneto
#el porcentraje de persona que fuman
bd<-load(url("https://github.com/AlvaroLimber/EST-384/blob/master/data/endsa.RData?raw=true"))
#names(endsa)
#View(endsa)
#str(endsa)
#str(endsa[,4])
#eriquetas de cada variable (preguntas)
attributes(endsa)$var.labels
#porcentaje de personas que fuman por aņo
t1<-table(endsa[,4],endsa[,15])
aux<-addmargins(t1,2)
totaņo<-aux[,3]
totaņo
t1<-t1/totaņo
t1
#porcentaje de personas que fuman por departamento
t2<-table(endsa[,2],endsa[,15])
aux2<-addmargins(t2,2)
totdept<-aux2[,3]
totdept
t2<-t2/totdept
t2
|
2a9b1fbfcfcf2f284c14d3365af78bd959619c3d | 58b690a4301f55fa3b1a1a2a675864d0b2c19620 | /scripts/ChinaHW_cluster/main.R | 3380a28d951509bbd1805e2797a1dff285e6559c | [
"MIT"
] | permissive | rpkgs/RHtestsHelper | 0313a8a9022649248d0490280057310fea6673bd | 23036d69aa77da00f3c1a302aeabe67f2c1931a7 | refs/heads/master | 2023-04-13T18:25:39.188408 | 2023-03-28T07:17:16 | 2023-03-29T04:41:17 | 289,939,439 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,364 | r | main.R | library(Ipaper)
library(lubridate)
library(RHtests)
library(tidymet)
library(tidyfst)
ErrorMSG = ""
devtools::load_all()
devtools::load_all("../RHtests.R")
# 有5个站点出现错误。
# # [data.table]:
# # A data frame: 6 × 14
# site date RH_avg RH_min Tair_avg Tair_max Tair_min Pa_avg Pa_max Pa_min q_mean RH_min2
# <int> <date> <dbl> <int> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl> <dbl>
# 1 50548 2022-09-14 41738 35 41683. 999998 8.6 98.5 98.8 98.4 -1.65 38006.
# 2 54416 2022-12-01 43518. 12 43471. 999998 -12.7 4446. 100000. 102. -1.65 39786.
# 3 54916 2022-08-04 43549. 49 43509. 999998 29.5 99.9 100. 99.8 -1.65 39818.
# 4 58942 2022-10-06 41750. 76 41689. 999998 22 101. 101. 101. -1.65 38018.
# 5 59265 2020-05-13 47699. 65 47644. 999998 23.2 4857. 100000. 99.5 -1.65 43966.
# 6 59265 2020-09-21 41752. 64 41693. 999998 24.9 4262. 100000. 99.4 -1.65 38020.
fix_badValues <- function(df) {
inds_bad <- df[, which(RH_avg >= 200)]
# df[inds_bad, ]
df[inds_bad, `:=`(
RH_avg = NA_real_,
Tair_avg = NA_real_, Tair_max = NA_real_,
Pa_avg = NA_real_, Pa_max = NA_real_,
q_mean = NA_real_, RH_min2 = NA_real_,
HI_max = NA_real_, HI_max_e = NA_real_
)]
invisible()
}
date <- gsub("-", "", format(Sys.Date()))
version <- glue("RHtests_v{date}")
# version <- "RHtests_v20230228"
## Input data
main_RHtests_met2481 <- function(
varname = "RH_avg",
version = "v20230328")
{
sites <- df[, .N, .(site)]$site
st = st_met2481[site %in% sites]
# varname <- "RH_avg"
lst = select(df, all_of(c("site", "date", varname))) %>% split_site()
if (!isTRUE(all.equal(as.character(sites), names(lst)))) {
stop("site order error")
}
# 这个是月尺度的结果
f_stRef <- glue("OUTPUT/ChinaHI/RHtests_{version}_{varname}_st_refer.rda")
f_noRef_mon <- glue("OUTPUT/ChinaHI/RHtests_{version}_{varname}_noRef_monthly.RDS")
f_noRef_day <- glue("OUTPUT/ChinaHI/RHtests_{version}_{varname}_noRef_daily.RDS")
f_Ref_day <- glue("OUTPUT/ChinaHI/RHtests_{version}_{varname}_withRef_daily.RDS")
f_final <- glue("OUTPUT/ChinaHI/OUTPUT_mete2481_1961-2022_RHtests_{version}_{varname}.csv")
# fs = c(f_Ref, f_noRef_mon, f_noRef, f_withRef, f_final)
# file.exists(fs)
if (!file.exists(f_noRef_mon)) {
# sink("log.txt")
res <- homogenize_monthly(df, st_moveInfo, sites, varname, .parallel = TRUE)
res_noRefMon <- RHtests_rm_empty(res)
saveRDS(res_noRefMon, f_noRef_mon)
# sink(NULL)
} else {
res_noRefMon <- readRDS(f_noRef_mon)
}
### withRef
ok("Merging TPs of yearly and monthly input ...")
info <- TP_mergeYM_sites(res_noRefMon)
info2 <- info[abs(year(date) - year(date_year)) <= 1, ][Idc != "No ", ]
sites_adj = info2[, .N, .(site)][, site]
### 2.1. 挑选参考站
if (!file.exists(f_stRef)) {
mat_mon = convert_day2mon(df, varname)
if (!isTRUE(all.equal(colnames(mat_mon), as.character(st$site)))) {
stop("check site names order first!")
}
ok("Finding Reference sites ...")
st_refs <- st_refer(st, mat_mon, nsite = NULL, .parallel = TRUE)
st_refs_opt <- st_refer_opt(st_refs, sites_adj)
d_refs <- melt_list(st_refs_opt, "target")
sites_miss <- setdiff(sites, d_refs$target) %>% as.character()
# length(sites_miss)
save(st_refs, st_refs_opt, d_refs, sites_miss, file = f_stRef)
} else {
load(f_stRef)
}
### 2.2. 带有参考站的(withRef)均一化检测
# ? 如果WithRef未检测到TP,withRef是否有可能检测到?
if (!file.exists(f_Ref_day)) {
inds <- d_refs$target %>% set_names(seq_along(.), .)
m <- nrow(d_refs)
ok("Homogenization withRef ...")
res_ref <- foreach(i = inds, icount()) %dopar% {
runningId(i)
# if (i == 2) break()
site_target <- d_refs$target[i]
site_refer <- d_refs$site[i]
i_t <- match(site_target, sites)
i_r <- match(site_refer, sites)
d_target <- lst[[i_t]]
d_refer <- lst[[i_r]]
d <- merge(d_target, d_refer %>% set_names(c("date", "ref")), all.x = TRUE)
metadata <- get_metadata(d, site_target)
tryCatch({
r <- homogenize.wRef(d, metadata)
}, error = function(e) {
message(sprintf("[%d] %s", i, e$message))
})
}
saveRDS(res_ref, file = f_Ref_day)
} else {
res_ref <- readRDS(f_Ref_day)
}
## 3. 数据清洗
### 3.1. with refer, 含有TP的部分
TPs <- map(res_ref, ~ .$day$TP)
inds_fixed <- which.notnull(TPs)
# > TPs 不为空的站点,采用`homogenize.wRef`修正;其余的采用no-ref进行修正
d_ref <- map(res_ref[inds_fixed], ~ .$day$data[, .(date, QM_adjusted)]) %>%
melt_list("site")
### 3.2. without refer, 含有TP的部分
res_noRef = readRDS(f_noRef_day)
d_noref <- res_noRef[sites_miss] %>%
map(~ .$data[, .(date, QM_adjusted)]) %>%
rm_empty() %>%
melt_list("site")
df_fixed <- rbind(d_ref, d_noref) %>% set_colnames(c("site", "date", varname))
## merge the unfixed and fixed
sites_fixed <- df_fixed$site %>% unique()
df_org = df[!(site %in% sites_fixed), ] %>% select(all_of(c("site", "date", varname)))
df_final <- rbind(df_fixed, df_org)
fwrite(df_final, f_final)
df_final
}
|
f81a3d3acfdba5afec5f46dec17dc7fd106cf00c | a5205e6eb618540fe8a3686f709369bd2e160568 | /author_year_plot.R | 4f272cce4b310ad81d5a935777913fd3a6e2df24 | [] | no_license | millerlp/Misc_R_scripts | 48bfdb49542e30460dbd2bcafbf44ee30682b239 | 01d78c6b94c75b79135ea2b03f458b0100495fbe | refs/heads/main | 2023-07-19T07:30:12.817825 | 2023-07-13T16:25:25 | 2023-07-13T16:25:25 | 7,250,046 | 12 | 19 | null | null | null | null | UTF-8 | R | false | false | 9,075 | r | author_year_plot.R | # author_year_plot.R
#
# Author: Luke Miller 2015-04-22
###############################################################################
###############################################################################
# Export a text file from Endnote that only lists Year and Authors, all
# separated by commas. To do this, create an Output Style
# that lists the year followed by a comma and then each author separated by
# a comma. Select all references, then go to File>Export. In the window that
# opens, you'll see a menu for output style, choose your author-only version
# there and save the output file as text file.
f1 = 'authors_list_20150422.txt'
#
## Scan input file, divide each line into a separate entry in a character vector
authors = scan(file = f1, what = character(), sep = '\n')
#
yr = character()
# Extract year from each record.
for (i in 1:length(authors)){
yr[i] = substr(authors[i],regexpr('[1-2]',authors[i])[[1]],
regexpr(',',authors[i])[[1]] - 1)
}
yr = as.numeric(yr) # Convert to numbers
# Entries with missing or ambiguous years (anything with multiple years listed
# like 1997-2013) will end up as NA's in the yr vector, and will generate a
# warning.
cnt = numeric(length(yr)) # Create empty vector
# To count the number of authors on a paper, simply count the number of
# commas in each line of the authors vector. There is always one comma after
# the year, denoting at least one author, and every additional comma means there
# is another author.
for (i in 1:length(authors)){
cnt[i] = length(gregexpr(',',authors[i])[[1]])
}
# Pick out rows that don't have a useful year value
bad.entries = which(is.na(yr))
# Remove the offending rows from the yr and cnt vectors
yr = yr[-(bad.entries)]
cnt = cnt[-(bad.entries)]
# Make a data frame out of the yr and cnt vectors
df = data.frame(Year = yr, Count = cnt)
# Make a new dataframe that holds each combination of Year and Count
newdf = expand.grid(Years = unique(yr), Count = unique(cnt))
# Make a new column to hold a tally of the number of papers for each Year and
# author Count combination.
newdf$TotalPapers = NA
# Go through the combinations of years and counts to tally the number of papers
# that match that combo in the 'df' dataframe
for (i in 1:nrow(newdf)){
# Put the tally of number of papers matching each Year & Count combo in the
# TotalPapers column
newdf$TotalPapers[i] = nrow(df[df$Year == newdf$Year[i] &
df$Count == newdf$Count[i],])
}
# Drop any combinations where the TotalPapers was 0
newdf = newdf[-(which(newdf$TotalPapers == 0)),]
#########################################################
#########################################################
# Create a function to plot a color scale bar on the existing plot using the
# vector of colors that will be generated later by the colorRampPalette function
color.bar <- function(lut, min, max=-min, nticks=11,
x1 = 1, x2 = 2, y1 = 1, y2 = 2,
ticks=seq(min,max, length=nticks), round = TRUE, title = '',
cex.title = 1, text.col = 'black', horiz = FALSE){
# lut = a vector of color values, in hex format
# min = minimum value represented by the first color
# max = maximum value represented by the last color
# nticks = number of tick marks on the colorbar
# x1 = location of left edge of colorbar, in plot's x-units
# x2 = location of right edge of colorbar, in plot's x-units
# y1 = location of bottom edge of color bar, in plot's y-units
# y2 = location of top edge of color bar, in plot's y-units
# ticks = a sequence of tick mark value to be added to colorbar
# round = TRUE or FALSE, round off tick values to 0 decimal place.
# title = Title for colorbar
# cex.title = size for title
# text.col = color of tick marks, title, and border of colorbar
# horiz = TRUE or FALSE, lay out color bar vertically or horizontally
# Calculate a scaling factor based on the number of entries in the
# look-up-table and the absolute distance between y2 and y1 on the plot
if (horiz == FALSE){
scale = (length(lut)-1)/(y2-y1)
} else if (horiz == TRUE){
# For horizontal bars, use the distance between x2 and x1 instead
scale = (length(lut)-1)/(x2-x1)
}
# Round off the tick marks if desired
if (round) { ticks = round(ticks,0) }
# Draw little thin rectangles for each color in the look up table. The
# rectangles will span the distance between x1 and x2 on the plot's
# coordinates, and have a y-axis height scaled to fit all of the colors
# between y1 and y2 on the plot's coordinates. Each color will only be a
# small fraction of that overall height, using the scale factor. For a
# horizontal-oriented bar the thin rectangles will run between y1 and y2,
# scaled to fit all of the colors between x1 and x2.
for (i in 1:(length(lut)-1)) {
if (horiz == FALSE) {
# Calculate myy, the lower y-location of a rectangle
myy = (i-1)/scale + y1
# Calculate the upper y value as y+(1/scale), and draw the rectangle
rect(x1,myy,x2,myy+(1/scale), col=lut[i], border=NA)
} else if (horiz == TRUE) {
# Calculate x, the left x-location of a rectangle
myx = (i-1)/scale + x1
# Calculate the right x value as x+(1/scale), and draw the rectangle
rect(myx,y1,myx+(1/scale),y2, col=lut[i], border=NA)
}
}
# Draw a border around the color bar
rect(x1,y1,x2,y2, col = NULL, border = text.col)
# Draw tick marks and tick labels
for (i in 1:length(ticks)){
if (horiz == FALSE) {
myy = (ticks[i]-1)/scale + y1
# This is an attempt to set the tick mark and labels just off to the
# right side of the color bar without having them take up too much
# of the plot area. The x locations are calculated as x2 plus a
# fraction of the width of the rectangle.
myx2 = x2 + ((x2-x1)*0.1)
myx3 = x2 + ((x2-x1)*0.13)
# Draw little tick marks
lines(x = c(x2,myx2), y = c(myy,myy), col = text.col)
# Draw tick labels
text(x = myx3, y = myy, labels = ticks[i], adj = c(0,0.3),
col = text.col)
} else if (horiz == TRUE) {
# For a horizontal scale bar
myx = (ticks[i]-1)/scale + x1
# This is an attempt to set the tick mark and labels just below the
# bottom of the color bar without having them take up too much of
# the plot area. The y locations are calculated as y1 minus a
# fraction of the height of the rectangle
myy2 = y1 - ((y2-y1)*0.1)
myy3 = y1 - ((y2-y1)*0.13)
# Draw little tick marks
lines(x = c(myx,myx), y = c(y1,myy2), col = text.col)
# Draw tick labels
text(x = myx, y = myy3, labels = ticks[i], adj = c(0.5,1),
col = text.col)
}
}
# Draw a title for the color bar
text(x = ((x1+x2)/2), y = y2, labels = title, adj = c(0.5,-0.35),
cex = cex.title, col = text.col)
}
####################################################
####################################################
# Define a color ramp function from white to blue
# From ColorBrewer 9-class Blues (single-hue). ColorBrewer recommends the
# following set of 9 color values, expressed in hex format. I reverse them so
# that the highest value will be the lightest color.
colfun = colorRampPalette(rev(c("#f7fbff","#deebf7","#c6dbef","#9ecae1",
"#6baed6","#4292c6","#2171b5","#08519c","#08306b")),
space = 'Lab')
# Define a set of colors from blue to white using that function, covering the
# entire range of possible values for newdf$TotalPapers
cols = colfun(max(newdf$TotalPapers))
# Assign a color to each entry in the newdf data frame based on its TotalPapers
# value.
newdf$col = ""
for (i in 1:nrow(newdf)){
newdf$col[i] = cols[newdf$TotalPapers[i]]
}
##############################
# Create an output file in svg format
svg(filename = "author-year-count.svg", width = 9, height = 4.8)
par(mar =c(5,6,1,2)) # Change the figure margins slightly
plot(Count~Years, data = newdf, type = 'n',
ylim = c(0,45), las = 1,
cex.lab = 1.6,
cex.axis = 1.3,
ylab = 'Number of coauthors',
xlab = 'Publication Year',
yaxt = 'n')
# Color the background of the plot using a rectangle, and determine its
# dimensions on the fly by calling the par()$usr function to get the coordinates
# of the plot edges.
rect(par()$usr[1],par()$usr[3],par()$usr[2],par()$usr[4], col = "#BBBBBB")
# Draw some grid lines at useful locations
abline(h = c(1,2,3,4,5,10,15,20,25,30,35,40), col = "#CCCCCC")
abline(v = seq(1875,2015, by = 5), col = "#CCCCCC")
# Redraw the plot's bounding box to cover where the horizontal lines overwrite
# it.
box()
# Redraw the point data over the newly drawn background and horizontal lines
points(Count~Years, data = newdf, col = newdf$col, pch = 20, cex = 0.9)
# Call the color.bar function created earlier to create a color scale.
color.bar(lut = cols, nticks = 8, horiz = TRUE,
min = 1, max = max(newdf$TotalPapers),
x1 = 1880, x2 = 1920, y1 = 42, y2 = 44,
title = 'Number of papers', cex.title = 1.1, text.col = 'black')
# Draw the y-axis labels at the appropriate spots
axis(2, at = c(1,2,3,4,5,10,15,20,25,30,35,40),
labels = c('1','','3','','5','10','15','20','25','30','35','40'),
las = 1, cex.axis = 1.1)
dev.off()
|
5c86d4b753ead590e7fc91d90682388c05898942 | e5b1416f3d7434fc19fee3a51474069cb2478e29 | /R/train.R | 7573e8432af31166a770eb7e8a81a511324769cb | [] | no_license | anilgunduz/deepG | 16c13a8e0d2d372913506ab626ad31d4af76c428 | e47c415f04da15e363b46c39027c30255e0b698e | refs/heads/master | 2023-06-26T23:38:27.845094 | 2021-07-30T09:51:12 | 2021-07-30T09:51:12 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 34,759 | r | train.R | #' @title Trains a neural network on genomic data. Designed for developing genome based language models (GenomeNet)
#'
#' @description
#' Depth and number of neurons per layer of the netwok can be specified.
#' If a path to a folder where FASTA files are located is provided, batches will be generated using an external generator which
#' is recommended for big training sets. Alternative, a dataset can be supplied that holds the preprocessed batches (generated by \code{preprocessSemiRedundant()})
#' and keeps them in RAM.
#' @inheritParams fastaFileGenerator
#' @inheritParams labelByFolderGenerator
#' @inheritParams fastaLabelGenerator
#' @param train_type Either "lm" for language model, "label_header", "label_folder" or "label_csv". Language model is trained to predict character in sequence.
#' "label_header"/"label_folder" are trained to predict a corresponding class, given a sequence as input. If "label_header", class will be read from fasta headers.
#' If "label_folder", class will be read from folder, i.e. all files in one folder must belong to the same class.
#' If "label_csv", targets are read from a csv file. This file should have one column names "file". The targets then correspond to entries in that row (except "file"
#' column). Example: if we are currently working with a file called "a.fasta", there should be a row in our csv file
#' file | label_1 | label_2
#' "a.fasta" 1 8
#' @param model A keras model.
#' @param built_model Call to a function that creates a model. \code{create_model_function} can be either "create_model_lstm_cnn", "create_model_wavenet"
#' or "create_model_lstm_cnn_target_middle".
#' In \code{function_args} arguments of the corresponding can be specified, if no argument is given default values will be used.
#' Example: \code{built_model = list(create_model_function = "create_model_lstm_cnn", function_args = list(maxlen = 50, lstm_layer_size = 32, layers.lstm = 1)}
#' @param path Path to folder where individual or multiple FASTA or FASTQ files are located for training. If \code{train_type} is \code{label_folder}, should be a vector
#' containing a path for each class. If \code{train_type} is not \code{label_folder}, can be a list of directories.
#' @param path Path to folder where individual or multiple FASTA or FASTQ files are located for validation. If \code{train_type} is \code{label_folder}, should be a vector
#' containing a path for each class. If \code{train_type} is not \code{label_folder}, can be a list of directories.
#' @param dataset Dataframe holding training samples in RAM instead of using generator.
#' @param checkpoint_path Path to checkpoints folder.
#' @param validation.split Defines the fraction of the batches that will be used for validation (compared to size of training data), i.e. one validtion iteration
#' processed \code{batch.size} * \code{steps.per.epoch} * \code{validation.split samples}.
#' @param run.name Name of the run (without file ending). Name will be used to identify output from callbacks.
#' @param batch.size Number of samples that are used for one network update.
#' @param epochs Number of iterations.
#' @param max.queue.size Queue on fit_generator().
#' @param reduce_lr_on_plateau Whether to use learning rate scheduler.
#' @param lr.plateau.factor Factor of decreasing learning rate when plateau is reached.
#' @param patience Number of epochs waiting for decrease in loss before reducing learning rate.
#' @param cooldown Number of epochs without changing learning rate.
#' @param steps.per.epoch Number of batches per epoch.
#' @param step Frequency of sampling steps.
#' @param randomFiles Boolean, whether to go through files sequentially or shuffle beforehand.
#' @param vocabulary Vector of allowed characters. Character outside vocabulary get encoded as specified in \code{ambiguous_nuc}.
#' @param initial_epoch Epoch at which to start training. Note that network
#' will run for (\code{epochs} - \code{initial_epochs}) rounds and not \code{epochs} rounds.
#' @param tensorboard.log Path to tensorboard log directory.
#' @param save_best_only Only save model that improved on best val_loss score.
#' @param save_weights_only Whether to save weights only.
#' @param seed Sets seed for set.seed function, for reproducible results when using \code{randomFiles} or \code{shuffleFastaEntries}
#' @param shuffleFastaEntries Logical, shuffle entries in file.
#' @param output List of optional outputs, no output if none is TRUE.
#' @param tb_images Boolean, whether to show plots in tensorboard. Note this doubles the time needed for validation step.
#' @param format File format, "fasta" or "fastq".
#' @param fileLog Write name of files used for training to csv file if path is specified.
#' @param labelVocabulary Character vector of possible targets. Targets outside \code{labelVocabulary} will get discarded if
#' \code{train_type = "label_header"}.
#' @param numberOfFiles Use only specified number of files, ignored if greater than number of files in \code{path}.
#' @param reverseComplementEncoding Logical, use both original sequence and reverse.complement as two input sequences.
#' @param output_format Determines shape of output tensor for language model (if \code{train_type = "lm"}).
#' Either "target_right", "target_middle_lstm", "target_middle_cnn" or "wavenet".
#' Assume a sequence "AACCGTA". Output correspond as follows
#' "target_right": X = "AACCGT", Y = "A"
#' "target_middle_lstm": X = (X_1 = "AAC", X_2 = "ATG"), Y = "C" (note reversed order of X_2)
#' "target_middle_cnn": X = "AACGTA", Y = "C" (nucleotide in middle encoded as 0-vector)
#' "wavenet": X = "AACCGT", Y = "ACCGTA"
#' "dummy_gen": generator creates random data
#' @param reset_states Boolean, whether to reset hidden states of RNN layer at every new input file.
#' @param proportion_per_file Numerical value between 0 and 1. Proportion of possible samples to take from one file. Takes samples from random subsequence.
#' @param read_data If true the first element of output is a list of length 2, each containing one part of paired read. Maxlen should be 2*length of one read.
#' @param use_quality_score Whether to use fastq qualitiy scores. If TRUE input is not one-hot-encoding but corresponds to probabilities.
#' For example (0.97, 0.01, 0.01, 0.01) instead of (1, 0, 0, 0).
#' @param padding Whether to pad sequences too short for one sample with zeros.
#' @param early_stopping_time Time in seconds after which to stop training.
#' @param validation_only_after_training Boolean, whether to skip validation during training and only do one validation after training.
#' @param skip_amb_nuc Threshold of ambiguous nucleotides to accept in fasta entry. Complete entry will get discarded otherwise.
#' @param class_weight Vector with number of samples for each class in training data. Order should correspond to \code{labelVocabulary}.
#' You can use \code{get_class_weight} function to estimates class weights: class_weights <- get_class_weights(path = path, train_type = train_type)
#' If train_type = "label_csv" you need to add path to csv file:
#' class_weights <- get_class_weights(path = path, train_type = train_type, csv_path = target_from_csv)
#' @param print_scores Whether to print train/validation scores during training.
#' @param train_val_split_csv A csv file specifying train/validation split. csv file should contain one column named "file" and one columnn named
#' "type". The "file" column contains names of fasta/fastq files and "type" column specifies if file is used for training or validation.
#' Entries in "type" must be named "train" or "val", otherwise file will not be used for either. path and path.val arguments should be the same.
#' Not implemented for train_type = "label_folder".
#' @export
trainNetwork <- function(train_type = "lm",
built_model = list(create_model_function = NULL, function_args = list()),
model = NULL,
path = NULL,
path.val = NULL,
dataset = NULL,
checkpoint_path = NULL,
validation.split = 0.2,
run.name = "run",
batch.size = 64,
epochs = 10,
max.queue.size = 100,
reduce_lr_on_plateau = TRUE,
lr.plateau.factor = 0.9,
patience = 20,
cooldown = 1,
steps.per.epoch = 1000,
step = 1,
randomFiles = TRUE,
initial_epoch = 0,
vocabulary = c("a", "c", "g", "t"),
tensorboard.log = NULL,
save_best_only = TRUE,
save_weights_only = FALSE,
seed = c(1234, 4321),
shuffleFastaEntries = TRUE,
output = list(none = FALSE,
checkpoints = FALSE,
tensorboard = FALSE,
log = FALSE,
serialize_model = FALSE,
full_model = FALSE
),
tb_images = TRUE,
format = "fasta",
fileLog = NULL,
labelVocabulary = NULL,
numberOfFiles = NULL,
reverseComplements = FALSE,
reverseComplementEncoding = FALSE,
output_format = "target_right",
reset_states = FALSE,
ambiguous_nuc = "equal",
proportion_per_file = NULL,
read_data = FALSE,
use_quality_score = FALSE,
padding = FALSE,
early_stopping_time = NULL,
added_label_path = NULL,
add_input_as_seq = NULL,
target_from_csv = NULL,
target_split = NULL,
validation_only_after_training = FALSE,
skip_amb_nuc = NULL,
max_samples = NULL,
split_seq = FALSE,
class_weight = NULL,
concat_seq = NULL,
target_len = 1,
print_scores = TRUE,
train_val_split_csv = NULL) {
tensorflow::tf$random$set_seed(seed[1])
stopifnot(train_type %in% c("lm", "label_header", "label_folder", "label_csv"))
stopifnot(ambiguous_nuc %in% c("zero", "equal", "discard", "empirical"))
if (train_type == "label_csv") {
train_type <- "label_header"
if (is.null(target_from_csv)) {
stop('You need to add a path to csv file for target_from_csv when using train_type = "label_csv"')
}
if (!is.null(labelVocabulary)) {
message("Reading labelVocabulary from csv header")
output_label_csv <- read.csv2(target_from_csv, header = TRUE, stringsAsFactors = FALSE)
if (dim(output_label_csv)[2] == 1) {
output_label_csv <- read.csv(target_from_csv, header = TRUE, stringsAsFactors = FALSE)
}
labelVocabulary <- names(output_label_csv)
labelVocabulary <- labelVocabulary[labelVocabulary != "file"]
}
}
if (!is.null(skip_amb_nuc)) {
if((skip_amb_nuc > 1) | (skip_amb_nuc <0)) {
stop("skip_amb_nuc should be between 0 and 1 or NULL")
}
}
if (!is.null(proportion_per_file)) {
if(any(proportion_per_file > 1) | any(proportion_per_file < 0)) {
stop("proportion_per_file should be between 0 and 1 or NULL")
}
}
if (!is.null(class_weight) && (length(class_weight) != length(labelVocabulary))) {
stop("class_weight and labelVocabulary must have same length")
}
# train validation split via csv file
if (!is.null(train_val_split_csv)) {
if (train_type == "label_folder") {
stop('train_val_split_csv not implemented for train_type = "label_folder"')
}
if (is.null(path.val)) {
path.val <- path
} else {
if (!all(unlist(path.val) %in% unlist(path))) {
warning("Train/validation split split done via file in train_val_split_csv. Only using files from path argument.")
}
path.val <- path
}
train_val_file <- read.csv2(train_val_split_csv, header = TRUE, stringsAsFactors = FALSE)
if (dim(train_val_file)[2] == 1) {
train_val_file <- read.csv(train_val_split_csv, header = TRUE, stringsAsFactors = FALSE)
}
train_val_file <- dplyr::distinct(train_val_file)
if (!all(c("file", "type") %in% names(train_val_file))) {
stop("Column names of train_val_split_csv file must be 'file' and 'type'")
}
if (length(train_val_file$file) != length(unique(train_val_file$file))) {
stop("In train_val_split_csv all entires in 'file' column must be unique")
}
train_files <- train_val_file %>% dplyr::filter(type == "train")
train_files <- as.character(train_files$file)
val_files <- train_val_file %>% dplyr::filter(type == "val")
val_files <- as.character(val_files$file)
} else {
train_files <- NULL
val_files <- NULL
}
wavenet_format <- FALSE ; target_middle <- FALSE ; cnn_format <- FALSE
if (train_type == "lm") {
stopifnot(output_format %in% c("target_right", "target_middle_lstm", "target_middle_cnn", "wavenet", "dummy_gen"))
if (output_format == "target_middle_lstm") target_middle <- TRUE
if (output_format == "target_middle_cnn") cnn_format <- TRUE
if (output_format == "wavenet") wavenet_format <- TRUE
}
if (is.null(built_model$create_model_function) + is.null(model) == 0) {
stop("Two models were specified. Set either model or built_model$create_model_function argument to NULL.")
}
if (train_type == "lm") {
labelGen <- FALSE
labelByFolder <- FALSE
}
if (train_type == "label_header") {
labelGen <- TRUE
labelByFolder <- FALSE
if (is.null(target_from_csv)) stopifnot(!is.null(labelVocabulary))
}
if (train_type == "label_folder") {
labelGen <- TRUE
labelByFolder <- TRUE
stopifnot(!is.null(labelVocabulary))
stopifnot(length(path) == length(labelVocabulary))
}
if (output$none) {
output$checkpoints <- FALSE
output$tensorboard <- FALSE
output$log <- FALSE
output$serialize_model <- FALSE
output$full_model <- FALSE
}
# set model arguments
if (!is.null(built_model[[1]])) {
if (built_model[[1]] == "create_model_lstm_cnn_target_middle") {
if (!read_data){
# target_middle <- TRUE
# wavenet_format <- FALSE
}
}
if (built_model[[1]] == "create_model_lstm_cnn") {
#target_middle <- FALSE
#wavenet_format <- FALSE
}
if (built_model[[1]] == "create_model_wavenet") {
#target_middle <- TRUE
#wavenet_format <- TRUE
}
new_arguments <- names(built_model[[2]])
default_arguments <- formals(built_model[[1]])
# overwrite default arguments
for (arg in new_arguments) {
default_arguments[arg] <- built_model[[2]][arg]
}
# create model
if (built_model[[1]] == "create_model_lstm_cnn") {
formals(create_model_lstm_cnn) <- default_arguments
model <- create_model_lstm_cnn()
}
if (built_model[[1]] == "create_model_lstm_cnn_target_middle") {
formals(create_model_lstm_cnn_target_middle) <- default_arguments
model <- create_model_lstm_cnn_target_middle()
}
if (built_model[[1]] == "create_model_wavenet") {
if (!wavenet_format) {
warning("Argument wavenet_format should be TRUE when using wavenet architecture.")
}
formals(create_model_wavenet) <- default_arguments
model <- create_model_wavenet()
}
}
model_weights <- model$get_weights()
# function arguments
argumentList <- as.list(match.call(expand.dots=FALSE))
label.vocabulary.size <- length(labelVocabulary)
vocabulary.size <- length(vocabulary)
# extract maxlen from model
num_in_layers <- length(model$inputs)
if (num_in_layers == 1) {
maxlen <- model$input$shape[[2]]
} else {
if (!target_middle & !read_data & !split_seq) {
maxlen <- model$input[[num_in_layers]]$shape[[2]]
} else {
maxlen <- model$inputs[[num_in_layers - 1]]$shape[[2]] + model$inputs[[num_in_layers]]$shape[[2]]
}
}
# get solver and learning rate
solver <- stringr::str_to_lower(model$optimizer$get_config()["name"])
learning.rate <- keras::k_eval(model$optimizer$lr)
if (solver == "adam") {
optimizer <- keras::optimizer_adam(lr = learning.rate)
}
if (solver == "adagrad") {
optimizer <- keras::optimizer_adagrad(lr = learning.rate)
}
if (solver == "rmsprop") {
optimizer <- keras::optimizer_rmsprop(lr = learning.rate)
}
if (solver == "sgd") {
optimizer <- keras::optimizer_sgd(lr = learning.rate)
}
if (labelByFolder) {
if (length(path) == 1) warning("Training with just one label")
}
if (output$checkpoints) {
# create folder for checkpoints using run.name
# filenames contain epoch, validation loss and validation accuracy
checkpoint_dir <- paste0(checkpoint_path, "/", run.name, "_checkpoints")
dir.create(checkpoint_dir, showWarnings = FALSE)
if (!is.list(model$output)) {
filepath_checkpoints <- file.path(checkpoint_dir, "Ep.{epoch:03d}-val_loss{val_loss:.2f}-val_acc{val_acc:.3f}.hdf5")
} else {
filepath_checkpoints <- file.path(checkpoint_dir, "Ep.{epoch:03d}.hdf5")
if (save_best_only) {
warning("save_best_only not implemented for multi target. Setting save_best_only to FALSE")
save_best_only <- FALSE
}
}
}
# Check if fileLog is unique
if (!is.null(fileLog) && dir.exists(fileLog)) {
stop(paste0("fileLog entry is already present. Please give this file a unique name."))
}
# Check if run.name is unique
if (output$tensorboard && dir.exists(file.path(tensorboard.log, run.name))) {
stop(paste0("Tensorboard entry '", run.name , "' is already present. Please give your run a unique name."))
}
# add empty hparam dict if non exists
if (!reticulate::py_has_attr(model, "hparam")) {
model$hparam <- reticulate::dict()
}
# tempory file to log training data
removeLog <- FALSE
if (is.null(fileLog)) {
removeLog <- TRUE
fileLog <- tempfile(pattern = "", fileext = ".csv")
} else {
if (!endsWith(fileLog, ".csv")) fileLog <- paste0(fileLog, ".csv")
}
if (reset_states) {
fileLogVal <- tempfile(pattern = "", fileext = ".csv")
} else {
fileLogVal <- NULL
}
# if no dataset is supplied, external fasta generator will generate batches
if (is.null(dataset)) {
message("Starting fasta generator...")
if (output_format == "dummy_gen") {
gen <- dummy_gen(model, batch.size)
gen.val <- dummy_gen(model, batch.size)
removeLog <- FALSE
} else {
if (!labelGen) {
# generator for training
gen <- fastaFileGenerator(corpus.dir = path, batch.size = batch.size,
maxlen = maxlen, step = step, randomFiles = randomFiles,
vocabulary = vocabulary, seed = seed[1],
shuffleFastaEntries = shuffleFastaEntries, format = format,
fileLog = fileLog, reverseComplements = reverseComplements,
output_format = output_format, ambiguous_nuc = ambiguous_nuc,
proportion_per_file = proportion_per_file, skip_amb_nuc = skip_amb_nuc,
use_quality_score = use_quality_score, padding = padding,
added_label_path = added_label_path, add_input_as_seq = add_input_as_seq,
max_samples = max_samples, concat_seq = concat_seq, target_len = target_len,
file_filter = train_files)
# generator for validation
gen.val <- fastaFileGenerator(corpus.dir = path.val, batch.size = batch.size,
maxlen = maxlen, step = step, randomFiles = randomFiles,
vocabulary = vocabulary, seed = seed[2],
shuffleFastaEntries = shuffleFastaEntries, format = format,
fileLog = fileLogVal, reverseComplements = FALSE,
output_format = output_format, skip_amb_nuc = skip_amb_nuc,
ambiguous_nuc = ambiguous_nuc, proportion_per_file = proportion_per_file,
use_quality_score = use_quality_score, padding = padding,
added_label_path = added_label_path, add_input_as_seq = add_input_as_seq,
max_samples = max_samples, concat_seq = concat_seq, target_len = target_len,
file_filter = val_files)
# label generator
} else {
# label by folder
if (labelByFolder) {
#' @param reverseComplementEncoding Logical, use both original sequence and reverse.complement as two input sequences.
# initialize training generators
initializeGenerators(directories = path, format = format, batch.size = batch.size, maxlen = maxlen, vocabulary = vocabulary,
verbose = FALSE, randomFiles = randomFiles, step = step, showWarnings = FALSE, seed = seed[1],
shuffleFastaEntries = shuffleFastaEntries, numberOfFiles = numberOfFiles, skip_amb_nuc = skip_amb_nuc,
fileLog = fileLog, reverseComplements = reverseComplements, reverseComplementEncoding = reverseComplementEncoding, val = FALSE, ambiguous_nuc = ambiguous_nuc,
proportion_per_file = proportion_per_file, read_data = read_data, use_quality_score = use_quality_score,
padding = padding, max_samples = max_samples, split_seq = split_seq, concat_seq = concat_seq,
added_label_path = added_label_path, add_input_as_seq = add_input_as_seq)
# initialize validation generators
initializeGenerators(directories = path.val, format = format, batch.size = batch.size, maxlen = maxlen,
vocabulary = vocabulary, verbose = FALSE, randomFiles = randomFiles, step = step,
showWarnings = FALSE, seed = seed[2], shuffleFastaEntries = shuffleFastaEntries, skip_amb_nuc = skip_amb_nuc,
numberOfFiles = NULL, fileLog = fileLogVal, reverseComplements = FALSE, reverseComplementEncoding = reverseComplementEncoding, val = TRUE,
ambiguous_nuc = ambiguous_nuc, proportion_per_file = proportion_per_file, read_data = read_data,
use_quality_score = use_quality_score, padding = padding, max_samples = max_samples,
split_seq = split_seq, concat_seq = concat_seq, added_label_path = added_label_path,
add_input_as_seq = add_input_as_seq)
gen <- labelByFolderGeneratorWrapper(val = FALSE, path = path)
gen.val <- labelByFolderGeneratorWrapper(val = TRUE, path = path.val)
} else {
# generator for training
gen <- fastaLabelGenerator(corpus.dir = path, format = format, batch.size = batch.size, maxlen = maxlen,
vocabulary = vocabulary, verbose = FALSE, randomFiles = randomFiles, step = step,
showWarnings = FALSE, seed = seed[1], shuffleFastaEntries = shuffleFastaEntries,
fileLog = fileLog, labelVocabulary = labelVocabulary, reverseComplements = reverseComplements,
ambiguous_nuc = ambiguous_nuc, proportion_per_file = proportion_per_file,
read_data = read_data, use_quality_score = use_quality_score, padding = padding,
added_label_path = added_label_path, add_input_as_seq = add_input_as_seq,
skip_amb_nuc = skip_amb_nuc, max_samples = max_samples, concat_seq = concat_seq,
target_from_csv = target_from_csv, target_split = target_split, file_filter = train_files)
# generator for validation
gen.val <- fastaLabelGenerator(corpus.dir = path.val, format = format, batch.size = batch.size, maxlen = maxlen,
vocabulary = vocabulary, verbose = FALSE, randomFiles = randomFiles, step = step,
showWarnings = FALSE, seed = seed[2], shuffleFastaEntries = shuffleFastaEntries,
fileLog = fileLogVal, labelVocabulary = labelVocabulary, reverseComplements = FALSE,
ambiguous_nuc = ambiguous_nuc, proportion_per_file = proportion_per_file,
added_label_path = added_label_path, add_input_as_seq = add_input_as_seq,
read_data = read_data, use_quality_score = use_quality_score, padding = padding,
skip_amb_nuc = skip_amb_nuc, max_samples = max_samples, concat_seq = concat_seq,
target_from_csv = target_from_csv, target_split = target_split, file_filter = val_files)
}
}
}
# callbacks
callbacks <- vector("list")
if (reduce_lr_on_plateau) {
if (is.list(model$outputs)) {
monitor <- "val_loss"
} else {
monitor <- "val_acc"
}
callbacks[[1]] <- reduce_lr_cb(patience = patience, cooldown = cooldown,
lr.plateau.factor = lr.plateau.factor,
monitor = monitor)
}
if (output$log) {
callbacks <- c(callbacks, log_cb(run.name))
}
if (!output$tensorboard) tb_images <- FALSE
if (output$tensorboard) {
# count files in path
num_train_files <- count_files(path = path, format = format, train_type = train_type)
complete_tb <- tensorboard_complete_cb(default_arguments = default_arguments, model = model, tensorboard.log = tensorboard.log, run.name = run.name, train_type = train_type,
model_path = model_path, path = path, validation.split = validation.split, batch.size = batch.size, epochs = epochs,
max.queue.size = max.queue.size, lr.plateau.factor = lr.plateau.factor, patience = patience, cooldown = cooldown,
steps.per.epoch = steps.per.epoch, step = step, randomFiles = randomFiles, initial_epoch = initial_epoch, vocabulary = vocabulary,
learning.rate = learning.rate, shuffleFastaEntries = shuffleFastaEntries, labelVocabulary = labelVocabulary, solver = solver,
numberOfFiles = numberOfFiles, reverseComplements = reverseComplements, wavenet_format = wavenet_format, cnn_format = cnn_format,
create_model_function = built_model$create_model_function, vocabulary.size = vocabulary.size, gen_cb = gen_cb, argumentList = argumentList,
maxlen = maxlen, labelGen = labelGen, labelByFolder = labelByFolder, label.vocabulary.size = label.vocabulary.size, tb_images = FALSE,
target_middle = target_middle, num_train_files = num_train_files, fileLog = fileLog, proportion_per_file = proportion_per_file,
skip_amb_nuc = skip_amb_nuc, max_samples = max_samples)
callbacks <- c(callbacks, complete_tb)
}
if (output$checkpoints) {
if (wavenet_format) {
# can only save weights for wavenet
save_weights_only <- TRUE
}
callbacks <- c(callbacks, checkpoint_cb(filepath = filepath_checkpoints, save_weights_only = save_weights_only,
save_best_only = save_best_only))
}
if (reset_states) {
callbacks <- c(callbacks, reset_states_cb(fileLog = fileLog, fileLogVal = fileLogVal))
}
if (!is.null(early_stopping_time)) {
callbacks <- c(callbacks, early_stopping_cb(early_stopping_patience = early_stopping_patience,
early_stopping_time = early_stopping_time))
}
# skip validation callback
if (validation_only_after_training | is.null(validation.split) || validation.split == 0) {
validation_data <- NULL
} else {
validation_data <- gen.val
}
validation_steps <- ceiling(steps.per.epoch * validation.split)
if (validation_only_after_training) {
callbacks <- c(callbacks, validation_after_training_cb(gen.val = gen.val, validation_steps = validation_steps))
}
if (tb_images) {
if (is.list(model$output)) {
warning("Tensorboard images (confusion matrix) not implemented for model with multiple outputs.
Setting tb_images to FALSE")
tb_images <- FALSE
}
if (model$loss == "binary_crossentropy") {
warning("Tensorboard images (confusion matrix) not implemented for sigmoid activation in last layer.
Setting tb_images to FALSE")
tb_images <- FALSE
}
}
if (tb_images) {
if (!reticulate::py_has_attr(model, "cm_log")) {
model$cm_log <- tempfile(pattern = "", fileext = ".csv")
}
if (train_type == "lm") {
confMatLabels <- vocabulary
} else {
confMatLabels <- labelVocabulary
}
#add f1-score for binary classification
cm_log <- tempfile(pattern = "", fileext = ".csv")
model$cm_log <- cm_log
num_targets <- ifelse(train_type == "lm", length(vocabulary), length(labelVocabulary))
# add f1-score for binary classification
f1 <- keras::custom_metric("f1", function(y_true, y_pred) {
true <- keras::k_argmax(y_true)
pred <- keras::k_argmax(y_pred)
if (!is.list(dim(y_true))) {
df <- data.frame(as.array(true), as.array(pred))
write.table(x = df, file = model$cm_log, append = TRUE, col.names = FALSE, row.names = FALSE)
}
if (num_targets == 2) {
labels <- tensorflow::tf$math$argmax(y_true, axis = 1L)
predictions <- tensorflow::tf$math$argmax(y_pred, axis = 1L)
TP <- tensorflow::tf$cast(tensorflow::tf$math$count_nonzero(predictions * labels), dtype = "float32")
#TN <- tensorflow::tf$cast(tensorflow::tf$math$count_nonzero((predictions - 1L) * (labels - 1L)), dtype = "float32")
FP <- tensorflow::tf$cast(tensorflow::tf$math$count_nonzero(predictions * (labels - 1L)), dtype = "float32")
FN <- tensorflow::tf$cast(tensorflow::tf$math$count_nonzero((predictions - 1L) * labels), dtype = "float32")
precision <- tensorflow::tf$math$divide_no_nan(TP, TP + FP)
recall <- tensorflow::tf$math$divide_no_nan(TP, TP + FN)
two <- tensorflow::tf$constant(2)
A <- two * precision * recall
B <- precision + recall
f1_score <- tensorflow::tf$math$divide_no_nan(A, B)
return(f1_score)
} else {
return(Inf)
}
})
contains_f1_metric <- FALSE
for (i in 1:length(model$metrics)) {
if (model$metrics[[i]]$name == "f1") contains_f1_metric <- TRUE
}
if (contains_f1_metric) {
model_metrics <- model$metrics
} else {
model_metrics <- c(model$metrics, f1)
}
model %>% keras::compile(loss = model$loss,
optimizer = model$optimizer, metrics = model_metrics)
callbacks <- c(callbacks, conf_matrix_cb(path = model$cm_log, tensorboard.log = tensorboard.log,
run.name = run.name, confMatLabels = confMatLabels))
}
# training
message("Start training ...")
if (!is.null(class_weight)) {
weight_list <- list()
weight_list[["0"]] <- 1
for (i in 2:(length(class_weight))) {
weight_list[[as.character(i-1)]] <- class_weight[1]/class_weight[i]
}
class_weight <- weight_list
}
model <- keras::set_weights(model, model_weights)
history <-
model %>% keras::fit_generator(
generator = gen,
validation_data = validation_data,
validation_steps = validation_steps,
steps_per_epoch = steps.per.epoch,
max_queue_size = max.queue.size,
epochs = epochs,
initial_epoch = initial_epoch,
callbacks = callbacks,
class_weight = class_weight,
verbose = print_scores
)
if (validation_only_after_training) {
history$val_loss <- model$val_loss
history$val_acc <- model$val_acc
model$val_loss <- NULL
model$val_acc <- NULL
}
} else {
model <- keras::set_weights(model, model_weights)
message("Start training ...")
history <- model %>% keras::fit(
dataset$X,
dataset$Y,
batch_size = batch.size,
validation_split = validation.split,
epochs = epochs)
}
if (removeLog) {
file.remove(fileLog)
}
# save final model
message("Training done.")
if (output$serialize_model) {
Rmodel <-
keras::serialize_model(model, include_optimizer = TRUE)
save(Rmodel, file = paste0(run.name, "_full_model.Rdata"))
}
if (output$full_model) {
keras::save_model_hdf5(
model,
paste0(run.name, "_full_model.hdf5"),
overwrite = TRUE,
include_optimizer = TRUE
)
}
return(history)
}
|
cce775647c2b944f16a8ee4914358e8b1d040e47 | 464b7e63447fdc739413fe764174814503a06367 | /man/log_shiny_input_changes.Rd | f2509f1acd46ff80e85bedf1ec0449d0569f85a3 | [] | no_license | daroczig/logger | 4f32a43edf38d575fd06e653636ab435a78033f9 | 829aabbf46cee5d427d66e94c13e2d52112029a3 | refs/heads/master | 2022-09-27T06:04:11.011518 | 2022-05-27T20:20:25 | 2022-05-27T20:20:25 | 157,297,209 | 239 | 38 | null | 2023-09-06T11:26:20 | 2018-11-13T00:39:35 | R | UTF-8 | R | false | true | 1,218 | rd | log_shiny_input_changes.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/hooks.R
\name{log_shiny_input_changes}
\alias{log_shiny_input_changes}
\title{Auto logging input changes in Shiny app}
\usage{
log_shiny_input_changes(
input,
level = INFO,
namespace = NA_character_,
excluded_inputs = character()
)
}
\arguments{
\item{input}{passed from Shiny's \code{server}}
\item{level}{log level}
\item{namespace}{the name of the namespace}
\item{excluded_inputs}{character vector of input names to exclude from logging}
}
\description{
This is to be called in the \code{server} section of the Shiny app.
}
\examples{
\dontrun{
library(shiny)
ui <- bootstrapPage(
numericInput('mean', 'mean', 0),
numericInput('sd', 'sd', 1),
textInput('title', 'title', 'title'),
textInput('foo', 'This is not used at all, still gets logged', 'foo'),
passwordInput('password', 'Password not to be logged', 'secret'),
plotOutput('plot')
)
server <- function(input, output) {
logger::log_shiny_input_changes(input, excluded_inputs = 'password')
output$plot <- renderPlot({
hist(rnorm(1e3, input$mean, input$sd), main = input$title)
})
}
shinyApp(ui = ui, server = server)
}
}
|
243659d008f0db208cf1adc09cb804a6fc868bc0 | af716fe719978bda13825c90190b5fd40a644edc | /R/est_plm.list.R | 35aa86929462c12b1e10e92810ef2a145ddc5e18 | [] | no_license | cran/plm | 492fed724b1b4e917829990d1295117055fcdb50 | b1eb02da282264741609692ac73c61b8722fc7e8 | refs/heads/master | 2023-04-15T03:38:07.442011 | 2023-04-09T10:40:02 | 2023-04-09T10:40:02 | 17,698,568 | 19 | 23 | null | null | null | null | UTF-8 | R | false | false | 11,026 | r | est_plm.list.R | plm.list <- function(formula, data, subset, na.action,
effect = c("individual", "time", "twoways"),
model = c("within", "random", "ht", "between", "pooling", "fd"),
random.method = NULL, #c("swar", "walhus", "amemiya", "nerlove", "ht"),
inst.method = c("bvk", "baltagi"),
restrict.matrix = NULL,
restrict.rhs = NULL,
index = NULL,
...){
sysplm <- match.call(expand.dots = FALSE)
if (!inherits(data, "pdata.frame")){
odataname <- substitute(data)
data <- pdata.frame(data, index)
sysplm$data <- data
}
names.eq <- names(formula)
# run plm for each equation of the list, store the results in a
# list
plm.models <- function(sysplm, amodel, ...){
formulas <- sysplm$formula
L <- length(formulas) - 1
models <- vector(mode = "list", length = L)
for (l in 2:(L+1)){
aformula <- formulas[[l]]
if (is.name(aformula)) aformula <- eval(aformula, parent.frame())
else aformula <- as.formula(formulas[[l]])
sysplm$formula <- aformula
sysplm[[1L]] <- as.name("plm")
sysplm$model <- amodel
# a new pb, plm on every equation fails because of the restrict.matrix argument
sysplm$restrict.matrix <- NULL
models[[l-1]] <- eval(sysplm, parent.frame())
}
models
}
# Extract the model matrix and the response and transform them in
# order to get iid errors using a furnished matrix of covariance of
# the raw errors
BIG <- function(X, y, W, Omega){
S <- chol(Omega)
N <- length(y[[1L]])
if (!is.null(W)) BIGW <- c()
BIGX <- c()
BIGy <- c()
L <- nrow(S)
for (l in seq_len(L)){
rowBIGy <- rep(0, N)
rowBIGX <- c()
if (!is.null(W)) rowBIGW <- c()
for (m in seq_len(L)){
rowBIGX <- cbind(rowBIGX, t(solve(S))[l, m] * X[[m]])
if (!is.null(W)) rowBIGW <- cbind(rowBIGW, t(S)[l, m] * W[[m]])
rowBIGy <- rowBIGy + t(solve(S))[l, m] * y[[m]]
}
BIGX <- rbind(BIGX, rowBIGX)
if (!is.null(W)) BIGW <- rbind(BIGW, rowBIGW)
BIGy <- c(BIGy, rowBIGy)
}
if (!is.null(W)) return(structure(list(X = BIGX, y = BIGy, W = BIGW), class = "BIG"))
else return(structure(list(X = BIGX, y = BIGy), class = "BIG"))
}
# take a list of unconstrained models and a restriction matrix and
# return a list containing the coefficients, the vcov and the
# residuals of the constrained model ; qad version which deals with
# lists of plm models or with models fitted by mylm (which have X, y
# and W slots)
systemlm <- function(object, restrict.matrix, restrict.rhs){
if (inherits(object, "list")){
Ucoef <- Reduce("c", lapply(object, coef))
Uvcov <- Reduce("bdiag", lapply(object, vcov))
X <- Reduce("bdiag", lapply(object, model.matrix))
y <- Reduce("c", lapply(object, pmodel.response))
}
else{
Ucoef <- coef(object)
Uvcov <- vcov(object)
X <- object$X
y <- object$y
}
if (!is.null(restrict.matrix)){
R <- restrict.matrix
if (is.null(restrict.rhs)) restrict.rhs <- rep(0, nrow(restrict.matrix))
XpXm1 <- solve(crossprod(X))
Q <- XpXm1 %*% t(R) %*% solve(R %*% XpXm1 %*% t(R))
Ccoef <- as.numeric(Ucoef - Q %*% (R %*% Ucoef - restrict.rhs))
names(Ccoef) <- names(Ucoef)
Cvcov <- Uvcov - Q %*% R %*% Uvcov
Cresid <- y - X %*% Ccoef
structure(list(coefficients = Ccoef, vcov = Cvcov, residuals = Cresid), class = "basiclm")
}
else{
.resid <- Reduce("c", lapply(object, resid))
structure(list(coefficients = Ucoef, vcov = Uvcov, residuals = .resid), class = "basiclm")
}
}
models <- plm.models(sysplm, amodel = model, random.method = "kinla") #TODO NB: "kinla" does not seem to be supported anymore...
L <- length(models)
sys <- systemlm(models, restrict.matrix = restrict.matrix, restrict.rhs = restrict.rhs)
Instruments <- sapply(models, function(x) length(formula(x))[2L]) > 1L
# Get the residuals and compute the consistent estimation of the
# covariance matrix of the residuals : Note that if there are
# restrictions, the "restricted" residuals are used ; for random
# effect models, two covariance matrices must be computed
if (model == "random"){
resid.pooling <- Reduce("cbind", lapply(models, function(x) resid(x, model = "pooling")))
id <- index(models[[1L]])[[1L]]
pdim <- pdim(models[[1L]])
T <- pdim$nT$T
N <- pdim$nT$n
.fixef <- apply(resid.pooling, 2, tapply, id, mean)
resid.within <- resid.pooling - .fixef[as.character(id),]
Omega.nu <- crossprod(resid.within)/(N * (T - 1))
Omega.eta <- crossprod(.fixef) / (N - 1)
colnames(Omega.nu) <- rownames(Omega.nu) <- colnames(Omega.eta) <- rownames(Omega.eta) <- names.eq
Omega.1 <- Omega.nu + T * Omega.eta
Omega <- list(id = Omega.eta, idios = Omega.nu)
phi <- 1 - sqrt(diag(Omega.nu)/diag(Omega.1))
XW <- lapply(models, function(x) model.matrix(x, model = "within"))
intercepts <- lapply(models, has.intercept)
XB <- lapply(models, function(x) model.matrix(x, model = "Between"))
yW <- lapply(models, function(x) pmodel.response(x, model = "within"))
yB <- lapply(models, function(x) pmodel.response(x, model = "Between"))
if (Instruments[1L]){
WW <- lapply(models,
function(x){
if (length(formula(x))[2L] == 3L) rhss = c(2, 3) else rhss = 2
model.matrix(model.frame(x), rhs = rhss, model = "within")
}
)
WB <- lapply(models, function(x) model.matrix(model.frame(x), rhs = 2, model = "Between"))
}
else WW <- WB <- NULL
coefnames <- lapply(XB, colnames)
BIGW <- BIG(XW, yW, WW, Omega.nu)
BIGB <- BIG(XB, yB, WB, Omega.1)
y <- BIGW$y + BIGB$y
X <- BIGB$X
# Attention, pb lorsque noms de colonnes duppliques !!
# X[, colnames(BIGW$X)] <- X[, colnames(BIGW$X)] + BIGW$X
# version provisoire : emplacement des constantes
intercepts <- c(1, cumsum(sapply(XB, ncol))[-length(XB)]+1)
X[ , - intercepts] <- X[ , - intercepts] + BIGW$X
m <- mylm(y, X, cbind(BIGW$W, BIGB$W))
}
else{
.resid <- matrix(sys$residuals, ncol = length(models))
Omega <- crossprod(.resid) / nrow(.resid)
colnames(Omega) <- rownames(Omega) <- names.eq
X <- lapply(models, model.matrix)
y <- lapply(models, pmodel.response)
if (Instruments[1L])
W <- lapply(models,
function(x){
if (length(formula(x))[2L] == 3L) rhss = c(2, 3) else rhss = 2
model.matrix(model.frame(x), rhs = rhss)
}
)
else W <- NULL
coefnames <- lapply(X, colnames)
BIGT <- BIG(X, y, W, Omega)
X <- BIGT$X
m <- with(BIGT, mylm(y, X, W))
}
if (!is.null(restrict.matrix)){
m <- systemlm(m, restrict.matrix = restrict.matrix, restrict.rhs = restrict.rhs)
}
m$model <- data
m$coefnames <- coefnames
m$df.residual <- length(resid(m)) - length(coef(m))
m$vcovsys <- Omega
m$formula <- formula
sysplm$data <- odataname
m$call <- sysplm
args <- list(model = model, effect = effect, random.method = random.method)
m$args <- args
class(m) <- c("plm.list", "plm", "panelmodel", "lm")
return(m)
}
#' @rdname summary.plm
#' @export
summary.plm.list <- function(object, ...){
class(object) <- setdiff(class(object), "plm.list")
formulas <- eval(object$call$formula)
eqnames <- names(formulas)
L <- length(object$coefnames)
Ks <- c(0, cumsum(sapply(object$coefnames, length)))
models <- vector(mode = "list", length = L)
if (is.null(object$vcov)){
coefTable <- coef(summary(object))
}
else{
std.err <- sqrt(diag(object$vcov))
b <- coefficients(object)
z <- b / std.err
p <- 2 * pt(abs(z), df = object$df.residual, lower.tail = FALSE)
coefTable <- cbind("Estimate" = b,
"Std. Error" = std.err,
"t-value" = z,
"Pr(>|t|)" = p)
}
for (l in seq_len(L)){
models[[l]] <- coefTable[(Ks[l] + 1):Ks[l + 1] , ]
}
names(models) <- eqnames
object$models <- models
object$coefficients <- coefTable
class(object) <- c("summary.plm.list", class(object))
object
}
#' @rdname summary.plm
#' @export
coef.summary.plm.list <- function(object, eq = NULL, ...){
if (is.null(eq)) object$coefficients
else object$models[[eq]]
}
#' @rdname summary.plm
#' @export
print.summary.plm.list <- function(x, digits = max(3, getOption("digits") - 2),
width = getOption("width"), ...){
effect <- describe(x, "effect")
model <- describe(x, "model")
cat(paste(effect.plm.list[effect]," ",sep=""))
cat(paste(model.plm.list[model]," Model",sep=""))
if (model=="random"){
ercomp <- describe(x, "random.method")
cat(paste(" \n (",
random.method.list[ercomp],
"'s transformation)\n",
sep=""))
}
else{
cat("\n")
}
cat("Call:\n")
print(x$call)
cat("\n")
print(pdim(x))
cat("\nEffects:\n\n")
cat(" Estimated standard deviations of the error\n")
if (model == "random"){
sd <- rbind(id = sqrt(diag(x$vcovsys$id)),
idios = sqrt(diag(x$vcovsys$idios)))
print(sd, digits = digits)
cat("\n")
cat(" Estimated correlation matrix of the individual effects\n")
corid <- x$vcovsys$id / tcrossprod(sd[1L, ])
corid[upper.tri(corid)] <- NA
print(corid, digits = digits, na.print = ".")
cat("\n")
cat(" Estimated correlation matrix of the idiosyncratic effects\n")
coridios <- x$vcovsys$idios / tcrossprod(sd[2L, ])
coridios[upper.tri(coridios)] <- NA
print(coridios, digits = digits, na.print = ".")
}
else{
sd <- sqrt(diag(x$vcovsys))
print(sd, digits = digits)
cat("\n")
cat("\nEstimated correlation matrix of the errors\n")
corer <- x$vcovsys / tcrossprod(sd)
corer[upper.tri(corer)] <- NA
print(corer, digits = digits, na.print = ".")
cat("\n")
}
for (l in seq_along(x$models)){
cat(paste("\n - ", names(x$models)[l], "\n", sep = ""))
printCoefmat(x$models[[l]], digits = digits)
}
invisible(x)
}
#' @rdname plm
#' @export
print.plm.list <- function(x, digits = max(3, getOption("digits") - 2), width = getOption("width"),...){
cat("\nModel Formulas:\n")
for (l in seq_along(formula(x))){
cat(paste(names(formula(x))[l], " : ", deparse(formula(x)[[l]]), "\n", sep = ""))
}
cat("\nCoefficients:\n")
print(coef(x),digits = digits)
cat("\n")
invisible(x)
}
|
44d806ff017836796cd9e7470845487ed09ecaab | 9907a5d3f1b7bef83b825e2dc490bc1feec49ec4 | /plot4.R | a4c579fd2ecab0cf6f7b89e7bd754b4cd258d252 | [] | no_license | alvarezloaiciga/ExData_Plotting1 | 2f77744916be5d941ddffa3f9ead78a2ed811f19 | 7d4f2deafe0e436c77652993260ec4fe3383c65f | refs/heads/master | 2022-06-14T14:55:06.851309 | 2020-05-02T21:21:37 | 2020-05-02T21:21:37 | 260,758,517 | 0 | 0 | null | 2020-05-02T19:19:10 | 2020-05-02T19:19:10 | null | UTF-8 | R | false | false | 1,231 | r | plot4.R | library(dplyr)
library(lubridate)
if(!file.exists("household_power_consumption.txt")) {
temp <- tempfile()
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", temp)
unzip(temp, "household_power_consumption.txt", exdir = getwd())
unlink(temp)
}
if (!("consumption" %in% ls())) {
consumption <- read.csv("household_power_consumption.txt", sep = ";", na.strings = c("?")) %>%
tbl_df() %>%
filter(Date %in% c("1/2/2007", "2/2/2007")) %>%
mutate(DateTime = dmy_hms(paste(Date, Time)))
}
png("plot4.png")
par(mfrow = c(2, 2))
with(consumption, plot(DateTime, Global_active_power, ylab = "Global Active Power", xlab = "", type = "l"))
with(consumption, plot(DateTime, Voltage, ylab = "Voltage", type = "l"))
with(consumption, plot(
DateTime, Sub_metering_1,
type = "l",
ylab = "Energy sub metering",
xlab = ""
))
with(consumption, lines(DateTime, Sub_metering_2, col = "red"))
with(consumption, lines(DateTime, Sub_metering_3, col = "blue"))
legend("topright", lty = 1, col = c("black", "red", "blue"), legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"))
with(consumption, plot(DateTime, Global_reactive_power, type = "l"))
dev.off() |
aabab285ee460b9bacab9f64676d982024a8d3a6 | 8f645b43614d7e30a361f541a1bf5e0ed27fe65e | /main.R | 7a283f5dd3fc6e2ab97fbd015ebeb45f0269f29e | [] | no_license | bethesdamd/temp_test_r_httr_caching | 7611392ff6711aa4b55de5c8b5b8e52b6173fb28 | 699668dc305c556a58364bcaf812b9cbe904d4a1 | refs/heads/master | 2022-07-09T20:05:52.781220 | 2020-05-17T13:35:45 | 2020-05-17T13:35:45 | 264,469,562 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 386 | r | main.R | library(httr)
print("================================================================")
e <- GET("https://raw.githubusercontent.com/bethesdamd/temp_test_r_httr_caching/master/data.csv")
cache_info(e) # this tells you what the Expires date is for the content, which is determined by the server
e$date # I think this is the date this data was retrieved
#rerequest(e)$date
print(e)
|
b1206b0d3f2867d051de1b1bb1d071db761ddaf1 | 4447c1851f04e25322ca23f6baa70007c25686f2 | /crypto/data_function.R | f3fd20e212f2ad5e4b040abd4bf04b6b7efc734d | [] | no_license | TDTran333/exercices_codes | fd6e283f518896e8393a94bdace08563c8143fac | dfe4fb9481343bc07a49fd01987caa1b3d33fd70 | refs/heads/master | 2023-03-26T22:06:22.138942 | 2021-03-27T22:29:15 | 2021-03-27T22:29:15 | 321,821,155 | 0 | 3 | null | null | null | null | UTF-8 | R | false | false | 773 | r | data_function.R | rm(list = ls()) # reset global variables
#import the libraries we need
library(jsonlite)
library(glue)
library(tidyverse)
library(lubridate)
# create a function to retrieve daily data
retreive_daily_data <- function(pair, filename) {
url = glue("https://api.pro.coinbase.com/products/{pair}/candles?granularity=86400")
columnNames <- c('unix', 'low', 'high', 'open', 'close', glue('{pair} volume'))
mydata <- fromJSON(url)
df <- as.data.frame(mydata)
colnames(df) <- columnNames # rename the columns
write.csv(df, file = here::here("crypto", filename))
}
newPair <- "BTC-USD"
fileName <- glue("dailyData{newPair}.csv")
runFunc <- retreive_daily_data(newPair, filename = fileName)
runFunc
BTC_USD <- dailyDataBTC_USD %>%
mutate(date = as_datetime(unix))
|
43beac15c6421a1a877b6258322264d2954c56c9 | 62dbac526ceebab3fec67cddd4ff5e0aadfc24fc | /src/02_boot_envipe.R | b92d17d0436fd08a6f439b92a9ce85cca9057743 | [] | no_license | CADSalud/ViolenciaSexual | 155569d79df326367f8c0a23f3536d2376534dc9 | d94d2f940293be29ac6e0db00bc1c48399073420 | refs/heads/master | 2021-03-30T18:32:31.834747 | 2018-03-28T18:13:19 | 2018-03-28T18:13:19 | 107,608,199 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,627 | r | 02_boot_envipe.R |
library(ProjectTemplate)
load.project()
load("cache/poblacion.RData")
library(bigrquery)
#
# Proyecto ----
project <- "acoustic-field-186719" # put your project ID here
df_envipe <- lapply(2012:2016, function(year.nom){
print(year.nom)
sql <- paste0("SELECT year, SEXO, edad_num, seccion, ",
"situacion, ocurri__, SUM(FAC_ELE), COUNT(year) ",
"FROM [acoustic-field-186719:envipe.vic2_",
year.nom,
"] WHERE situacion = 14 ", # violacion sexual
"GROUP BY year, SEXO, edad_num, seccion, situacion, ocurri__")
tt <- query_exec(sql, project = project)
}) %>%
bind_rows() %>%
mutate(edad_gpo = cut(edad_num, breaks = c(seq(19,65, by = 5), 97),
include.lowest = F)) %>%
dplyr::rename(nfac = f0_, n = f1_, ocurrencia = ocurri__) %>%
as_tibble()
df_envipe
df_envipe %>%
group_by(edad_gpo) %>%
tally
query_exec("select * FROM [acoustic-field-186719:envipe.vic2_2012] limit 3",
project = project)
df_envipe_id <- lapply(2012:2016, function(year.nom){
print(year.nom)
sql <- paste0("SELECT UPM, VIV_SEL, HOGAR, R_SEL, FAC_ELE, SEXO, ",
"edad_num, year, situacion, ocurri__ ",
"FROM [acoustic-field-186719:envipe.vic2_",
year.nom,
"] WHERE situacion = 14 and SEXO = 2")
tt <- query_exec(sql, project = project) %>%
as_tibble()
}) %>%
bind_rows() %>%
rename(ocurrencia = ocurri__) %>%
unite(id_persona, c('UPM', 'VIV_SEL', 'HOGAR', 'R_SEL')) %>%
mutate(edad_gpo = cut(edad_num, breaks = c(seq(19,65, by = 5), 97),
include.lowest = F))
df_envipe_id
# tablas necesarias ----
tab_envipe <- df_envipe %>%
filter(SEXO == 2) %>%
group_by(edad_gpo, year, ocurrencia) %>%
summarise(nfac = sum(nfac)) %>%
group_by(edad_gpo, year) %>%
mutate(pob_envipe = sum(nfac)) %>%
ungroup
tab_envipe
tab_envipe %>%
filter(ocurrencia == 1,
!is.na(edad_gpo)) %>%
ggplot(aes(x = edad_gpo, y = nfac,
color = factor(year),
group = year)) +
geom_point() +
geom_line()
poblacion$edad %>% summary()
tab_conapo <- poblacion %>%
filter(edad > 19,
anio > 2011) %>%
mutate(edad_gpo = cut(edad, breaks = c(seq(19,65, by = 5), 97),
include.lowest = F),
year = parse_number(anio)) %>%
filter(sexo == "Mujeres") %>%
group_by(sexo, edad_gpo, year) %>%
summarise(pob_conapo = sum(pob)) %>%
ungroup()
tab_conapo
tab_envipe %>% head
tab_conapo %>% head
# proporcion de abuso
tab_orig <- tab_envipe %>%
left_join(tab_conapo,
by = c("edad_gpo", "year")) %>%
na.omit() %>%
mutate(prop_envipe = nfac/pob_envipe,
prop_conapo = nfac/pob_conapo)
tab_orig %>%
filter(year == 2012,
ocurrencia == 1)
# Compare population estimation ----
# Proportion using conapo population and envipe population
tab_orig %>%
filter(ocurrencia == 1) %>%
ggplot(aes(x = prop_envipe, y = prop_conapo,
color = factor(year))) +
geom_point() +
geom_abline(slope = 1, intercept = 0) +
scale_x_continuous(labels = function(x)100000*x) +
scale_y_continuous(labels = function(x)100000*x) +
facet_wrap(~edad_gpo, scales = "free")
# Remuestreo----
prop_fun <- function(sub){
sub %>%
group_by(year, edad_gpo, ocurrencia) %>%
summarise(n_fac = sum(FAC_ELE)) %>%
group_by(year, edad_gpo) %>%
mutate(prop_envipe = n_fac/sum(n_fac)) %>%
ungroup() %>%
left_join(tab_conapo,
by = c("year", "edad_gpo")) %>%
mutate(prop_conapo = n_fac/pob_conapo)
}
tab_boot <- df_envipe_id %>%
filter(!is.na(edad_gpo)) %>%
group_by(year) %>%
bootstrap(m = 100, by_group = T) %>%
do(prop_fun(.)) %>%
ungroup
tab_boot
tab_boot %>%
filter(ocurrencia == 1) %>%
ggplot(aes(x = prop_envipe,
color = factor(year))) +
geom_density() +
facet_wrap(~edad_gpo, scales ="free")
tab_boot %>%
filter(ocurrencia == 1) %>%
ggplot(aes(x = edad_gpo,
y = prop_envipe,
fill = factor(year))) +
geom_boxplot()
summ_envipe <- tab_boot %>%
filter(ocurrencia == 1) %>%
dplyr::select(replicate, year, edad_gpo, n_fac, prop = prop_envipe) %>%
gather(tipo, val, n_fac, prop) %>%
group_by(year, edad_gpo, tipo) %>%
summarise(prom = mean(val),
median = median(val),
q75 = quantile(val, .75),
q25 = quantile(val, .25)) %>%
ungroup %>%
arrange(tipo)
summ_envipe
cache("summ_envipe")
|
e98ed6fe09ce99971d792b12b4434c72a338aa68 | cbb9e94ce4d0b6ff444be6129560b5b4d0133d0a | /man/create_blank_config.Rd | fd3443bcaf2e495a0565758577b4dabfabbb8b66 | [] | no_license | pedmiston/totems-data | de39b8a38d9aefcee8ef34868323d5cf054814eb | 5ed46fe78cefafcead59297508a2631e9ea0d27b | refs/heads/master | 2021-07-16T03:17:06.326910 | 2019-02-05T19:53:05 | 2019-02-05T19:53:05 | 104,396,767 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 333 | rd | create_blank_config.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/db-tables.R
\name{create_blank_config}
\alias{create_blank_config}
\title{Create a blank config file "config.yml" in the current directory.}
\usage{
create_blank_config()
}
\description{
Create a blank config file "config.yml" in the current directory.
}
|
0acafb5b0fa42ad1bd95c3bedc4044e84fc6d552 | 323d9497a4d4730a4f34eaef8154f2477ec2e068 | /Code/Pre-Procesamiento de datos.R | 365c08ca68cc1ad1dc6c41fd06332d981631be65 | [] | no_license | OscarFloresP/ea-2021-1-cc51 | 1c235da8decc7f81c0da6325d1f848872bd1c2f9 | b5f66c6e7d4e855f70adebaccd69564ea7768aec | refs/heads/main | 2023-04-18T13:57:47.616878 | 2021-05-12T04:57:20 | 2021-05-12T04:57:20 | 365,569,381 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,258 | r | Pre-Procesamiento de datos.R | #3. Pre-Procesamiento de datos
#Identificación de valores NA
ValoresVacios <- is.na(DFHotel_Ruido)
View(ValoresVacios)
summary(ValoresVacios)
#Reemplazo por promedio:
mean.valor <- function(x){
faltantes <- is.na(x)
tot.faltantes <- sum(faltantes)
x.obs <- x[!faltantes]
valorado <- x
valorado[faltantes] <- round(mean(x.obs))
return (valorado)
}
mean.df <- function(df, cols){
nombres <- names(df)
for (col in cols) {
nombre <- paste(nombres[col], sep = ".")
df[nombre] <- mean.valor(df[,col])
}
df
}
DFHotel_Limpio <- mean.df(DFHotel_Ruido, c(3,4,6,7,8,9,10,26))
Comprobacion <- is.na(DFHotel_Limpio)
summary(Comprobacion)
#Reemplazo por valor aleatorio:
rand.valor <- function(x){
faltantes <- is.na(x)
tot.faltantes <- sum(faltantes)
x.obs <- x[!faltantes]
valorado <- x
valorado[faltantes] <- sample(x.obs, tot.faltantes, replace = TRUE)
return (valorado)
}
random.df <- function(df, cols){
nombres <- names(df)
for (col in cols) {
nombre <- paste(nombres[col], sep = ".")
df[nombre] <- rand.valor(df[,col])
}
df
}
DFHotel_Limpio <- random.df(DFHotel_Limpio, c(11,12))
Comprobacion_Aleatorio <- is.na(DFHotel_Limpio)
summary(Comprobacion_Aleatorio)
#Identificación de valores atípicos
boxplot(DFHotel_Limpio$lead_time, main = "Número de días entre reserva y llegada por cliente")
boxplot(DFHotel_Limpio$lead_time)$out
boxplot(DFHotel_Limpio$stays_in_weekend_nights, main = "Número de noches hospedadas en fin de semana por cliente")
boxplot(DFHotel_Limpio$stays_in_weekend_nights)$out
boxplot(DFHotel_Limpio$stays_in_week_nights, main = "Número de noches hospedadas en dia de semana por cliente")
boxplot(DFHotel_Limpio$stays_in_week_nights)$out
boxplot(DFHotel_Limpio$adr, main = "Tarifa diaria promedio por cliente")
boxplot(DFHotel_Limpio$adr)$out
boxplot(DFHotel_Limpio$total_of_special_requests, main = "Número de solicitudes especiales por cliente")
boxplot(DFHotel_Limpio$total_of_special_requests)$out
#Cambio por promedio y mediana
outliers.med <- function(x, removeNA = TRUE){
quantiles <- quantile(x, c(0.05, 0.95), na.rm = removeNA)
x[x<quantiles[1]] <- mean(x, na.rm = removeNA)
x[x>quantiles[2]] <- median(x, na.rm = removeNA)
x
}
boxplot(DFHotel_Limpio$stays_in_week_nights, main = "Número de noches hospedadas en dia de semana por cliente (outliers)")
boxplot(DFHotel_Limpio$stays_in_week_nights)$out
boxplot(outliers.med(DFHotel_Limpio$stays_in_week_nights), main = "Número de noches hospedadas en dia de semana por cliente (limpio)")
boxplot(outliers.med(DFHotel_Limpio$stays_in_week_nights))$out
summary(outliers.med(DFHotel_Limpio$stays_in_week_nights))
DFHotel_Limpio$stays_in_week_nights <- outliers.med(DFHotel_Limpio$stays_in_week_nights)
summary(DFHotel_Limpio$stays_in_week_nights)
boxplot(DFHotel_Limpio$total_of_special_requests, main = "Número de solicitudes especiales por cliente (outliers)")
boxplot(DFHotel_Limpio$total_of_special_requests)$out
boxplot(outliers.med(DFHotel_Limpio$total_of_special_requests), main = "Número de solicitudes especiales por cliente (limpio)")
boxplot(outliers.med(DFHotel_Limpio$total_of_special_requests))$out
summary(outliers.med(DFHotel_Limpio$total_of_special_requests))
DFHotel_Limpio$total_of_special_requests <- outliers.med(DFHotel_Limpio$total_of_special_requests)
summary(DFHotel_Limpio$total_of_special_requests)
#Cambio por enmascarado (capping)
outliers.cap <- function(x, removeNA = TRUE){
qrts <- quantile(x, probs = c(0.25, 0.75), na.rm = removeNA)
caps <- quantile(x, probs = c(.05, 0.95), na.rm = removeNA)
iqr <- qrts[2]-qrts[1]
altura <- 1.5*iqr
x[x<qrts[1]-altura] <- caps[1]
x[x>qrts[2]+altura] <- caps[2]
x
}
boxplot(DFHotel_Limpio$lead_time, main = "Número de días entre reserva y llegada por cliente (outliers)")
boxplot(DFHotel_Limpio$lead_time)$out
boxplot(outliers.cap(DFHotel_Limpio$lead_time), main = "Número de días entre reserva y llegada por cliente (limpio)")
boxplot(outliers.cap(DFHotel_Limpio$lead_time))$out
summary(outliers.cap(DFHotel_Limpio$lead_time))
DFHotel_Limpio$lead_time <- outliers.cap(DFHotel_Limpio$lead_time)
summary(DFHotel_Limpio$lead_time)
boxplot(DFHotel_Limpio$stays_in_weekend_nights, main = "Número de noches hospedadas en fin de semana por cliente (outliers)")
boxplot(DFHotel_Limpio$stays_in_weekend_nights)$out
boxplot(outliers.cap(DFHotel_Limpio$stays_in_weekend_nights), main = "Número de noches hospedadas en fin de semana por cliente (limpio)")
boxplot(outliers.cap(DFHotel_Limpio$stays_in_weekend_nights))$out
summary(outliers.cap(DFHotel_Limpio$stays_in_weekend_nights))
DFHotel_Limpio$stays_in_weekend_nights <- outliers.cap(DFHotel_Limpio$stays_in_weekend_nights)
summary(DFHotel_Limpio$stays_in_weekend_nights)
boxplot(DFHotel_Limpio$adr, main = "Tarifa diaria promedio por cliente (outliers)")
boxplot(DFHotel_Limpio$adr)$out
boxplot(outliers.cap(DFHotel_Limpio$adr), main = "Tarifa diaria promedio por cliente (limpio)")
boxplot(outliers.cap(DFHotel_Limpio$adr))$out
summary(outliers.cap(DFHotel_Limpio$adr))
DFHotel_Limpio$adr <- outliers.cap(DFHotel_Limpio$adr)
summary(DFHotel_Limpio$adr)
#Ejemplo de muy poca variacion de valores
boxplot(DFHotel_Limpio$required_car_parking_spaces, main = "Estacionamientos requeridos por cliente (outliers)")
boxplot(DFHotel_Limpio$required_car_parking_spaces)$out
boxplot(outliers.med(DFHotel_Limpio$required_car_parking_spaces), main = "Estacionamientos requeridos por cliente (intento de limpieza con metodo 1)")
boxplot(outliers.med(DFHotel_Limpio$required_car_parking_spaces))$out
summary(outliers.med(DFHotel_Limpio$required_car_parking_spaces))
boxplot(outliers.cap(DFHotel_Limpio$required_car_parking_spaces), main = "Estacionamientos requeridos por cliente (intento de limpieza con metodo 2)")
boxplot(outliers.cap(DFHotel_Limpio$required_car_parking_spaces))$out
summary(outliers.cap(DFHotel_Limpio$required_car_parking_spaces))
#Guardado del dataframe preprocesado (Rdata y csv)
save(DFHotel_Limpio, file = "~/R/EA-Admin-Info/Data/DF_Limpio.RData")
write.csv(DFHotel_Limpio, "DFHotel_Limpio.csv", row.names = FALSE)
|
de49a0d1a998ed47f12d5f57b8f14fe6969fa535 | 433cfe886b8649366e894a396accd0809ed408d9 | /R/memoise.R | d479b6013a767016f2326bd1c26e92e00116bde7 | [] | no_license | benubah/shinyfind | 8cf5c73d0fb242c119767d75babb0eff3cce140b | e8b4b590bd737be29ac47ef3428e3832b7676ec7 | refs/heads/main | 2023-08-12T21:27:24.998293 | 2021-10-01T15:11:04 | 2021-10-01T15:11:04 | 416,333,564 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 182 | r | memoise.R |
#' Starts a new day at UTC 7am
#'
#' This can be used as an update trigger in a memoised function
today_at_sunrise <- function() {
as.Date(lubridate::now("UTC") - 7 * 3600)
}
|
7addfe16ca63183ff4820b584fee62b1662e428e | b7044633a4a752f95a8ad05b2edd972a63e48945 | /R/TeacupCerberus-package.R | 5cb2a9d75a2e520797661673689790aee62f4f15 | [] | no_license | jsilve24/TeacupCerberus | d40ace7819ddc5dd234eb35b0a67a92cb2324cb0 | e6551c81efb67da9d691decb58d9f4ec305af17c | refs/heads/master | 2020-06-27T09:41:32.892345 | 2019-08-06T20:22:26 | 2019-08-06T20:22:26 | 199,916,251 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 546 | r | TeacupCerberus-package.R | #' TeacupCerberus
#'
#' Estimates covariation within and between two count datasets where the counts
#' contain multinomial variation (e.g., sequence count data like microbiome 16S or bulk/single-cell RNA-seq).
#' The model outputs Bayesian posterior samples over covariance matricies.
#' The entire posterior reflects uncertainty in the true covariation due to multinomial counting.
#'
#' @docType package
#' @author Justin D Silverman
#' @useDynLib TeacupCerberus
#' @import Rcpp
#' @import RcppEigen
#' @import RcppCoDA
#' @name RcppCoDA
NULL
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.