blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
623a896b364b08c77b8753e0a61457b735ef6e87 | 66dd0b831434c4ad85bf58e473278883aeadbb1b | /analysis/tewhey_subset.R | 5f3d468f284417e1416f4c427fa5714517cd1cfa | [] | no_license | andrewGhazi/bayesianMPRA | 6e394893c7cfb079ca55698339eaa77c568a6736 | 4b9823f5a05b431adac9caae83737e525ae5d93c | refs/heads/master | 2021-01-20T03:34:35.703409 | 2018-09-17T07:41:10 | 2018-09-17T07:41:10 | 83,831,741 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,050 | r | tewhey_subset.R |
dir = '/mnt/bigData2/andrew/MPRA/Tewhey/indivTags/'
file_names = list.files(dir,
pattern = '.expanded$') %>%
grep(pattern = 'ctrl|HepG2', x = ., value = TRUE)
tewhey_subset = mclapply(file_names,
function(t_file){
read_tsv(paste0(dir, t_file),
col_names = c('snp_allele', 'barcode', 'count')) %>%
mutate(sample = t_file) %>%
separate(snp_allele, into = c('snp_id', 'allele'), sep = stringr::regex('(?=[AB]$)')) %>%
mutate(snp_id = gsub('_$', '', snp_id))
},
mc.cores = 11) %>%
bind_rows %>%
filter(snp_id %in% base::sample(unique(snp_id), size = 2000)) %>% #randomly choose 5000 alleles as a subset
unique %>% # there are infrequently some duplicate rows in the tewhey data
mutate(sample = gsub('Geuv_90K_', '', sample) %>% gsub('.tag.ct.indiv.expanded', '', .))
save(tewhey_subset, file = '~/bayesianMPRA/analysis_data/tewhey_subset.RData')
|
be979ebb4a708c2bdf7ab712c16547136685cd9e | f0d5df048c0d5ac4f969a03b477515bd762a446c | /R/rprofile_d.R | 4e1d248b4591f94b68cc332c493dcc66feb0717a | [] | no_license | HenrikBengtsson/startup | 67a01ac529ff0adc8dd0e722bbaccbd80010cb2a | abd1be760a8665e7f301129ec97e1d5d1b175a43 | refs/heads/develop | 2023-04-07T06:44:59.018075 | 2023-04-06T01:42:50 | 2023-04-06T01:42:50 | 73,848,752 | 163 | 8 | null | 2022-04-03T06:44:22 | 2016-11-15T19:38:47 | R | UTF-8 | R | false | false | 1,635 | r | rprofile_d.R | #' @describeIn startup Initiate using \file{.Rprofile.d/} files
#'
#' @aliases rprofile
#' @export
rprofile_d <- function(sibling = FALSE, all = FALSE, check = NA,
unload = FALSE, skip = NA,
on_error = c("error", "warning", "immediate.warning",
"message", "ignore"),
dryrun = NA, debug = NA, paths = NULL) {
debug <- debug(debug)
if (is.na(check)) {
check <- as.logical(Sys.getenv("R_STARTUP_CHECK", "TRUE"))
check <- isTRUE(getOption("startup.check", check))
}
## Skip?
if (is.na(skip)) {
skip <- any(c("--no-init-file", "--vanilla") %in% commandArgs())
}
# (i) Check and fix common errors
if (check) {
check(all = all, fix = TRUE, debug = FALSE)
}
debug(debug)
if (!skip) {
# (ii) Source custom .Rprofile.d/* files
if (is.null(paths)) paths <- find_rprofile_d(sibling = sibling, all = all)
files <- list_d_files(paths, filter = filter_files)
encoding <- getOption("encoding")
keep_source <- getOption("keep.source", TRUE)
source_print_eval <- function(pathname) {
current_script_pathname(pathname)
on.exit(current_script_pathname(NA_character_))
source(pathname, encoding = encoding, local = FALSE, chdir = FALSE,
print.eval = TRUE,
keep.source = keep_source, echo = FALSE, verbose = FALSE)
}
files_apply(files, fun = source_print_eval,
on_error = on_error, dryrun = dryrun, what = "Rprofile",
debug = debug)
}
res <- api()
if (unload) unload()
invisible(res)
}
|
9f1d2734b09ce7b3dd1e8d044ecbad8d7e3c8eb3 | 9a4518c0ac57cfaffd4069a39fcdccd6a8173949 | /tests/testthat/test-geom-spatial-segment.R | 65167156f21e418cf8bc51d1eb3834b2c51e2602 | [] | no_license | paleolimbot/ggspatial | 78c1047e344ec658d092851ce6fb2a83d10c5db3 | 5c4c903a0785702d83acfe6d9753294882ed676c | refs/heads/master | 2023-08-19T22:53:31.102638 | 2023-08-18T00:27:19 | 2023-08-18T00:27:19 | 63,102,201 | 357 | 37 | null | 2023-08-18T00:27:20 | 2016-07-11T21:06:12 | R | UTF-8 | R | false | false | 2,080 | r | test-geom-spatial-segment.R |
test_that("geom_spatial_segment() works", {
skip_if_not_installed("vdiffr")
skip_if_not_installed("lwgeom")
cities <- data.frame(
x = c(-63.58595, 116.41214, 13.50, -149.75),
y = c(44.64862, 40.19063, 52.51, 61.20),
city = c("Halifax", "Beijing", "Berlin", "Anchorage")
)
cities$xend <- cities$x[c(2, 4, 1, 3)]
cities$yend <- cities$y[c(2, 4, 1, 3)]
p <- ggplot(cities, aes(x, y, xend = xend, yend = yend)) +
geom_spatial_point(crs = 4326) +
# view of the north pole
coord_sf(crs = 3995)
expect_message(
ggplot2::ggplot_build(p + geom_spatial_segment()),
"Assuming `crs = 4326`"
)
expect_silent(
ggplot2::ggplot_build(p + geom_spatial_segment(crs = 4326))
)
expect_doppelganger(
"geom_spatial_segment(), great circle wrap",
p + geom_spatial_segment(
crs = 4326,
great_circle = TRUE,
wrap_dateline = TRUE,
arrow = grid::arrow()
)
)
expect_doppelganger(
"geom_spatial_segment(), great circle no wrap",
p + geom_spatial_segment(
crs = 4326,
great_circle = TRUE,
wrap_dateline = FALSE,
arrow = grid::arrow()
)
)
expect_doppelganger(
"geom_spatial_segment(), no great circle",
p + geom_spatial_segment(crs = 4326, great_circle = FALSE)
)
expect_doppelganger(
"geom_spatial_segment(), no great circle + detail",
p + geom_spatial_segment(detail = 100, great_circle = FALSE, crs = 4326)
)
expect_doppelganger(
"geom_spatial_segment(), great circle merc",
# don't use halifax -> beijing for this one
ggplot(
cities[cities$city != "Halifax", ],
aes(x, y, xend = xend, yend = yend)
) +
geom_spatial_point(crs = 4326) +
coord_sf(crs = 3857) +
geom_spatial_segment(crs = 4326, great_circle = TRUE)
)
expect_doppelganger(
"geom_spatial_segment(), no great circle merc",
ggplot(cities, aes(x, y, xend = xend, yend = yend)) +
geom_spatial_point(crs = 4326) +
coord_sf(crs = 3857) +
geom_spatial_segment(crs = 4326, great_circle = FALSE)
)
})
|
2d18c960961f524df19b1058cfc17f96b38f49e0 | 0a677c67824ad812542e8625126be1dd3ed7c711 | /tests/testthat.R | 4424675b6500b685d90fe9520f1807bd5bbf0479 | [
"LicenseRef-scancode-warranty-disclaimer",
"MIT"
] | permissive | dfalbel/torch | 48ff1b38ffdef9abe4873364c26b8abbae3ba330 | ae317db8ec1392acd9f1a2d5f03cef9ad676f778 | refs/heads/master | 2021-07-08T04:41:59.099075 | 2020-08-07T22:21:43 | 2020-08-07T22:21:43 | 151,864,442 | 58 | 8 | NOASSERTION | 2019-10-24T21:35:26 | 2018-10-06T17:27:42 | C++ | UTF-8 | R | false | false | 54 | r | testthat.R | library(testthat)
library(torch)
test_check("torch")
|
8b7c83e14b747115f76ea5a61662f0738801a93a | 24e47e0c7b92a49320b050e028f116c42e290e43 | /spring2018/2018-02-27_BaseRGraphics_Code.R | b3c429f3354b68708a0cc686f873a231d6a0dc01 | [] | no_license | shannonajw/archive | 2c90b0e1f5ffa9174f6d98e6aa905bf6e3f5d464 | fede550898289b1a5c2474d44525bef29bb5ddee | refs/heads/master | 2021-09-15T03:01:11.343987 | 2018-05-24T16:10:59 | 2018-05-24T16:10:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,000 | r | 2018-02-27_BaseRGraphics_Code.R | rm(list=ls())
## Delete your workspace;
getwd()
## Check your current working directory
setwd("FILL IN")
## Set your working directory
polls = read.csv("2016_StatePolls_final.csv")
## Load data
summary(polls$other)
summary(polls$undecided)
polls$etc<-100-polls$trump-polls$clinton
table(polls$State)
barplot(table(polls$State),main="Unordered")
## simple bar plot (Q: what is the order in the x values?)
barplot(table(polls$State),main="Unordered",ylab="Poll freq.")
## simple bar plot (Q: what is the order in the x values?)
polls_r <- transform(polls,State = reorder(State, trump))
levels(polls_r$State)
levels(polls$State)
## reorder states by Trump support rate
barplot(table(polls_r$State),main="Ordered by %Trump", ylab="Poll freq.")
## ordered plot!
par(mfrow=c(2,1))
barplot(table(polls$State),main="Simple Bar Plot")
barplot(table(polls_r$State),main="Ordered by %Trump")
dev.off()
## with subplot function
install.packages("Lock5Data")
## Install package Lock5Data which contains the Hollywood dataset
library("Lock5Data")
data(HollywoodMovies2011)
## Load data
movies<- na.omit(HollywoodMovies2011)
## drop all observations with at least one NA
hist(movies$RottenTomatoes, breaks=10, col="red", xlab="Rating", main="Colored histogram with 10 bins")
dev.off()
par(mfrow=c(1,2))
## 1 by 2 subplots
plot(movies$RottenTomatoes,log10(movies$WorldGross))
plot(log10(movies$Budget),log10(movies$WorldGross))
## log10: logarithm function with base 10
dev.off()
par(mfrow=c(1,2))
## 1 by 2 subplots
plot(movies$RottenTomatoes,log10(movies$WorldGross))
plot(log10(movies$Budget),log10(movies$WorldGross))
## log10: logarithm function with base 10
dev.off()
par(mfrow=c(1,2))
## 1 by 2 subplots
plot(movies$RottenTomatoes,log10(movies$WorldGross),col=movies$Genre)
plot(log10(movies$Budget),log10(movies$WorldGross),col=movies$Genre)
## log10: logarithm function with base 10
legend('topleft', legend=unique(movies$Genre),
col=unique(movies$Genre), pch=21)
par(mfg=c(1,1))
dev.off()
mod1 <- lm(log10(movies$WorldGross) ~ movies$RottenTomatoes)
## Linear regression
preds1 <- predict(mod1)
## predicted value obtained by linear regression
plot(movies$RottenTomatoes,log10(movies$WorldGross))
lines(movies$RottenTomatoes, preds1)
dev.off()
mod2 <- lm(log10(movies$WorldGross) ~ log10(movies$Budget))
## Linear regression
preds2 <- predict(mod2)
## predicted value obtained by linear regression
plot(log10(movies$Budget),log10(movies$WorldGross))
lines(log10(movies$Budget), preds2)
dev.off()
par(las=2)
## horizontal text
par(mfrow=c(1,2))
boxplot(movies$RottenTomatoes~movies$Genre,xlab="Genre",ylab="Rating")
## Genre VS Rating
boxplot(movies$Budget~movies$Genre,xlab="Genre",ylab="Budget")
## Genre VS Budget
dev.off()
pdf("boxplots.pdf")
par(las=2)
## horizontal text
par(mfrow=c(1,2))
boxplot(movies$RottenTomatoes~movies$Genre,xlab="Genre",ylab="Rating")
## Genre VS Rating
boxplot(movies$Budget~movies$Genre,xlab="Genre",ylab="Budget")
## Genre VS Budget
dev.off() |
9cf6de45ef254389c7a571cafbe1259df1ab9e1e | dd71aa829dfa18c6996f09dfd7b759bcb7f912e5 | /brainmusic/hack.R | ebaf5d3d4dec226d2b19292ea400612e1c1bd811 | [] | no_license | bradjunswick/api-examples | 9a4e492ef68d364386f490eeb42131ee6cd271b3 | 7c79ea95ca8d1ad86304f1d07741ad8d81ae3983 | refs/heads/master | 2020-12-25T17:05:16.257065 | 2012-06-21T00:12:27 | 2012-06-21T00:12:27 | 4,732,669 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,015 | r | hack.R | #starting with Leon French's collapsed human microarray data
brain1=read.delim(file="Desktop/matrix.29192x346.txt", sep="\t", header=T, row.names=1)
brain2=read.delim(file="Desktop/matrix.29192x323.txt", sep="\t", header=T, row.names=1)
brain3=read.delim(file="Desktop/matrix.29192x158.txt", sep="\t", header=T, row.names=1)
#combine all into a single matrix
a=cbind(brain1, brain2, brain3)
write.table(a, "test.txt", quote=F, row.names=T, col.names=T, sep="\t")
#sampled 1500 values from the data
b=sample(a, 1500)
write.table(b, "rand1500.txt", quote=F, row.names=F, col.names=F, sep="\t")
#sorted and log transformed the data
b=sort(b)
c=log2(as.numeric(b))
c=as.list(c)
write.table(c, "randlog1500.txt", quote=F, row.names=F, col.names=F, sep="\t")
#found a gene of interest (PDYN)
intersect("PDYN", row.names(a))
which(row.names(a) == "PDYN")
head(a[21652, ])
write.table(as.list(a[21652,]), "PDYN.txt", quote=F, row.names=F, col.names=F, sep="\t")
#output files are formatted to be read by the python script
|
8cadeeaf8e47cacefcac9d40eb72807f9d825b8b | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/nlme/examples/varFixed.Rd.R | 8c6ae61c1ba7dc4e2f207a8b08083e3adc1cee17 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 156 | r | varFixed.Rd.R | library(nlme)
### Name: varFixed
### Title: Fixed Variance Function
### Aliases: varFixed
### Keywords: models
### ** Examples
vf1 <- varFixed(~age)
|
de57a578907b0a8aa295809c1b2920582b6d9150 | 22b8821b7da1e03ad6be2fee5bbd1a72b86a4a53 | /plot1.R | aa44c09124423537ce7d89109a05f8c5a3e789da | [] | no_license | hapchen/Exploratory-Analysis_Project2 | b83d0d2595a9c3aae312894dee79a009488acb8c | 8ff670f697f17205baf2e1d997f28581ca8d662f | refs/heads/master | 2020-12-24T21:45:16.011642 | 2016-04-14T22:03:24 | 2016-04-14T22:03:24 | 56,272,093 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 465 | r | plot1.R | setwd("~/Documents/R/exdata-data-NEI_data")
# Read the data
NEI <- readRDS("summarySCC_PM25.rds")
SCC <- readRDS("Source_Classification_Code.rds")
# Plot 1
Emissions_byyear <- aggregate(Emissions ~ year, NEI, sum)
png("plot1.png", width = 480, height = 480)
plot(Emissions_byyear$year,Emissions_byyear$Emissions/10^6,type = 'l',xlab = "Year",
ylab = "PM2.5 Emissions (in 10^6 tons)", main = "Total PM2.5 Emissions from All Resources
in US")
dev.off()
|
034b730567df5a6c297a550fc735290d589e9fcf | ba8c93066b190808f70d54386359ee015639ca33 | /crypt/man.r | d619d4457d5d497a2a1eab4b50d44ed15eb13a21 | [] | no_license | unix-history/tropix-cmd | 0c0b54ae35b9e48c63aca8a7ac06e7935dd5b819 | 244f84697354b1c0e495a77cdff98549875d1b9f | refs/heads/master | 2021-05-27T22:06:51.390547 | 2014-11-06T17:41:20 | 2014-11-06T17:41:20 | 26,281,551 | 1 | 2 | null | null | null | null | ISO-8859-1 | R | false | false | 1,945 | r | man.r | .bp
.he 'CRYPT (man)'TROPIX: Manual de Referência'CRYPT (man)'
.fo 'Atualizado em 20.04.97'Versão 3.0.0'Pag. %'
.b NOME
.in 5
.wo "crypt -"
codifica/decodifica arquivos
.br
.in
.sp
.b SINTAXE
.in 5
.(l
crypt [-c] [<entrada> [<saída>]]
.)l
.in
.sp
.b DESCRIÇÃO
.in 5
O comando "crypt" lê o arquivo <entrada>, codifica-o/decodifica-o
de acordo com certa transformação,
e coloca a versão transformada no arquivo <saída>.
.sp
Se <saída> não for dada, a versão transformada é escrita na saída padrão.
Se além disto, a <entrada> não for dada, o arquivo original
é lido da entrada padrão.
.sp
A transformação é determinada por uma <chave> e uma <complexidade>.
A chave é sempre pedida pelo terminal. A <complexidade> é normalmente
48, a não ser que seja dada a opção "-c", quando então o seu valor
é também pedido pelo terminal.
.sp
A transformação que codifica um certo arquivo é a mesma que o irá
decodificar posteriormente; a seqüência de comandos
.sp
.(l
crypt original transformado
crypt transformado
.)l
.sp
irá escrever o arquivo "original" no terminal (desde naturalmente,
que a mesma <chave> seja dada para os dois comandos).
.in
.sp
.b OBSERVAÇÕES
.in 5
Quanto maior a <complexidade>, maior será o tempo necessário para
a transformação, o que irá dificultar a decodificação por pessoas
não autorizadas.
.sp
Para maior segurança, não utilize <chave>s com menos de 5 caracteres.
.sp
Se dois ou mais arquivos codificados com a mesma chave forem
concatenados, e este resultado for decodificado, apenas o primeiro
arquivo será decodificado corretamente.
.in
.sp
.b
VEJA TAMBÉM
.r
.in 5
.wo "(libc):"
getpass
.br
.in
.sp
.b
REFERÊNCIA
.r
.in 5
Reeds, J.A. & Weinberger, P.J., "File Security and the
Unix System Crypt Command", AT&T Bell Laboratories
Technical Journal, Vol. 63, No. 8, Out. 1984.
.in
.sp
.b ARQUIVOS
.in 5
/dev/tty
.in
.sp
.(t
.b ESTADO
.in 5
Efetivo.
.)t
.in
|
257dcb6c4705fcdb02b0b846e007315035483b8a | 669872ed6a31695b754a6386abcf46c479412dce | /simple_sims.R | 6d8479de593d24482c99614474200a5886343dd0 | [] | no_license | kapelner/CovBalAndRandExpDesign | 766afe315cc8a124521f648668ef105460444250 | 61e55753a93c42fc2a55d3cef2ac39ad8104a142 | refs/heads/master | 2021-07-18T01:10:57.494188 | 2019-01-18T01:57:34 | 2019-01-18T01:57:34 | 141,571,668 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,491 | r | simple_sims.R | source("unique_perm.R")
pacman::p_load(ggplot2)
n = 20
p = 1
sigma_x = 1
mu_x = 0
sigma_z = 1.5 ###change this to give Figures 1,2,3 and comment our the darkorange
# sigma_z = 1
# sigma_z = 0.05 #R^2 = 1
mu_z = 0
beta_T = 1
beta_0 = 1
bbeta = rep(1, p)
set.seed(0) #NOTE: the whole simulation is conditional on this one x
X = matrix(rnorm(n * p, mu_x, sigma_x), ncol = p)
X = X[order(X[,1 ]), , drop = FALSE]
Sinv = solve(var(X))
#get all possible realizations
all_randomizations = uniqueperm2(rep(c(-1, 1), n / 2))
w_size = nrow(all_randomizations)
res_iter = data.frame(matrix(NA, nrow = w_size, ncol = 2))
colnames(res_iter) = c("i", "obs_imbalance")
for (i in 1 : w_size){
indicT = all_randomizations[i, ]
t_idx = indicT == 1
c_idx = indicT == -1
xT = X[t_idx]
xC = X[c_idx]
res_iter[i, ] = c(
i,
(mean(xT) - mean(xC)) %*% Sinv %*% (mean(xT) - mean(xC))
)
}
#now let's order by observed imbalance
res_iter_ord_obs_imb = res_iter[order(res_iter$obs_imbalance), ]
###############START SIM
Nsim = 1000
Nsamprand = 300
#create place for simulation results to be stored and appropriately name it
colnames_results = c("mse_naive", "mse_regr")
rand_res_iter_obs = data.frame(matrix(NA, nrow = Nsim, ncol = length(colnames_results)))
colnames(rand_res_iter_obs) = colnames_results
match_res_iter_obs = data.frame(matrix(NA, nrow = Nsim, ncol = length(colnames_results)))
colnames(match_res_iter_obs) = colnames_results
opt_res_iter_obs = data.frame(matrix(NA, nrow = Nsim, ncol = 2))
colnames(opt_res_iter_obs) = colnames_results
worst_res_iter_obs = data.frame(matrix(NA, nrow = Nsim, ncol = 2))
colnames(worst_res_iter_obs) = colnames_results
r_sq_est = array(NA, Nsim)
for (nsim in 1 : Nsim){
if (nsim %% 100 == 0){
cat("random nsim: ", nsim, "\n")
}
#simulate the unobserved features
z = rnorm(n, 0, sigma_z)
#now sample for random
tx_est = array(NA, Nsamprand)
tx_est_regr = array(NA, Nsamprand)
r_sq_est_int = array(NA, Nsamprand)
for (i in 1 : Nsamprand){
indicT = all_randomizations[sample(1 : w_size, 1), ]
t_idx = indicT == 1
c_idx = indicT == -1
xT = X[t_idx]
xC = X[c_idx]
zT = z[t_idx]
zC = z[c_idx]
y = beta_0 + X %*% bbeta + z + indicT * beta_T
# y = beta_0 + z + indicT * beta_T
yT = y[t_idx]
yC = y[c_idx]
tx_est[i] = (mean(yT) - mean(yC)) / 2
tx_est_regr[i] = coef(lm(y ~ X + indicT))[3]
r_sq_est_int[i] = summary(lm(y ~ z))$r.squared
}
rand_res_iter_obs[nsim, ] = c(
mean((tx_est - beta_T)^2),
mean((tx_est_regr - beta_T)^2)
)
r_sq_est[nsim] = mean(r_sq_est_int)
}
mean(r_sq_est)
sigma_w_crfb = (1 + 1 / (n - 1)) * diag(n) - matrix(rep(1 / (n - 1), n^2), nrow = n)
t(X) %*% sigma_w_crfb %*% X / n^2
sigma_z^2 / n
t(X) %*% sigma_w_crfb %*% X / n^2 + sigma_z^2 / n
mean(rand_res_iter_obs$mse_naive)
for (nsim in 1 : Nsim){
if (nsim %% 100 == 0){
cat("matching nsim: ", nsim, "\n")
}
#simulate the unobserved features outside of the w loop
z = rnorm(n, 0, sigma_z)
tx_est = array(NA, Nsamprand)
tx_est_regr = array(NA, Nsamprand)
for (i in 1 : Nsamprand){
#now sample for pairwise matching
indicT = matrix(NA, n, 1)
for (i_w in seq(2, n, by = 2)){
indicT[c(i_w - 1, i_w), 1] = sample(c(-1, 1))
}
t_idx = indicT == 1
c_idx = indicT == -1
xT = X[t_idx]
xC = X[c_idx]
zT = z[t_idx]
zC = z[c_idx]
y = beta_0 + X %*% bbeta + z + indicT * beta_T
# y = beta_0 + z + indicT * beta_T
yT = y[t_idx]
yC = y[c_idx]
tx_est[i] = (mean(yT) - mean(yC)) / 2
tx_est_regr[i] = coef(lm(y ~ X + indicT))[3]
}
match_res_iter_obs[nsim, ] = c(
mean((tx_est - beta_T)^2),
mean((tx_est_regr - beta_T)^2)
)
}
sigma_w_matching = diag(n)
for (i in seq(from = 2, to = n, by = 2)){
sigma_w_matching[i - 1, i] = -1
sigma_w_matching[i, i - 1] = -1
}
t(X) %*% sigma_w_matching %*% X / n^2
sigma_z^2 / n
t(X) %*% sigma_w_matching %*% X / n^2 + sigma_z^2 / n
mean(match_res_iter_obs$mse_naive)
for (nsim in 1 : Nsim){
if (nsim %% 100 == 0){
cat("opt nsim: ", nsim, "\n")
}
#simulate the unobserved features
z = rnorm(n, 0, sigma_z)
#now sample for optimal
tx_est = array(NA, 2)
tx_est_regr = array(NA, 2)
for (i in 1 : 2){ #there are only two optimal vectors!
indicT = all_randomizations[res_iter_ord_obs_imb$i[i], ]
t_idx = indicT == 1
c_idx = indicT == -1
xT = X[t_idx]
xC = X[c_idx]
zT = z[t_idx]
zC = z[c_idx]
y = beta_0 + X %*% bbeta + z + indicT * beta_T
# y = beta_0 + z + indicT * beta_T
yT = y[t_idx]
yC = y[c_idx]
tx_est[i] = (mean(yT) - mean(yC)) / 2
tx_est_regr[i] = coef(lm(y ~ X + indicT))[3]
}
opt_res_iter_obs[nsim, ] = c(
mean((tx_est - beta_T)^2),
mean((tx_est_regr - beta_T)^2)
)
}
# w_star = all_randomizations[res_iter_ord_obs_imb$i[1], ]
# sigma_w_opt = w_star %*% t(w_star)
#
# t(X) %*% sigma_w_opt %*% X / n^2
# sigma_z^2 / n
# t(X) %*% sigma_w_opt %*% X / n^2 + sigma_z^2 / n
#mean(opt_res_iter_obs$mse_naive)
r_worst = 500
res_iter_ord_obs_imb_worst = res_iter_ord_obs_imb[(w_size - r_worst + 1) : w_size, ]
for (nsim in 1 : Nsim){
if (nsim %% 100 == 0){
cat("worst nsim: ", nsim, "\n")
}
#simulate the unobserved features
z = rnorm(n, 0, sigma_z)
#now sample for worst
tx_est = array(NA, r_worst)
tx_est_regr = array(NA, r_worst)
for (i in 1 : r_worst){
indicT = all_randomizations[res_iter_ord_obs_imb_worst$i[i], ]
t_idx = indicT == 1
c_idx = indicT == -1
xT = X[t_idx]
xC = X[c_idx]
zT = z[t_idx]
zC = z[c_idx]
y = beta_0 + X %*% bbeta + z + indicT * beta_T
yT = y[t_idx]
yC = y[c_idx]
tx_est[i] = (mean(yT) - mean(yC)) / 2
tx_est_regr[i] = coef(lm(y ~ X + indicT))[3]
}
worst_res_iter_obs[nsim, ] = c(
mean((tx_est - beta_T)^2),
mean((tx_est_regr - beta_T)^2)
)
}
#what happened?
mean(rand_res_iter_obs$mse_naive, na.rm = TRUE)
mean(match_res_iter_obs$mse_naive, na.rm = TRUE)
mean(opt_res_iter_obs$mse_naive, na.rm = TRUE)
mean(worst_res_iter_obs$mse_naive)
quantile(rand_res_iter_obs$mse_naive, 0.95, na.rm = TRUE)
quantile(match_res_iter_obs$mse_naive, 0.95, na.rm = TRUE)
quantile(opt_res_iter_obs$mse_naive, 0.95, na.rm = TRUE)
quantile(worst_res_iter_obs$mse_naive, 0.95, na.rm = TRUE)
#calculate the c constants
(quantile(rand_res_iter_obs$mse_naive, 0.95, na.rm = TRUE) - mean(rand_res_iter_obs$mse_naive, na.rm = TRUE)) / sd(rand_res_iter_obs$mse_naive, na.rm = TRUE)
(quantile(match_res_iter_obs$mse_naive, 0.95, na.rm = TRUE) - mean(match_res_iter_obs$mse_naive, na.rm = TRUE)) / sd(match_res_iter_obs$mse_naive, na.rm = TRUE)
(quantile(opt_res_iter_obs$mse_naive, 0.95, na.rm = TRUE) - mean(opt_res_iter_obs$mse_naive, na.rm = TRUE)) / sd(opt_res_iter_obs$mse_naive, na.rm = TRUE)
(quantile(worst_res_iter_obs$mse_naive, 0.95, na.rm = TRUE) - mean(worst_res_iter_obs$mse_naive, na.rm = TRUE)) / sd(opt_res_iter_obs$mse_naive, na.rm = TRUE)
ggplot(data.frame(rand_res_iter_obs)) +
geom_density(aes(mse_naive), alpha = 0.3, fill = "red") +
geom_density(aes(mse_naive), alpha = 0.3, fill = "blue", data = match_res_iter_obs) +
geom_density(aes(mse_naive), alpha = 0.3, fill = "green", data = opt_res_iter_obs) +
geom_density(aes(mse_naive), alpha = 0.3, fill = "darkorange", data = worst_res_iter_obs) +
xlim(0, 0.6) + xlab("MSE") +
geom_vline(xintercept = mean(rand_res_iter_obs$mse_naive), col = "red", alpha = 0.3, lwd = 1) +
geom_vline(xintercept = mean(match_res_iter_obs$mse_naive), col = "blue", alpha = 0.3, lwd = 1) +
geom_vline(xintercept = mean(opt_res_iter_obs$mse_naive), col = "green", alpha = 0.3, lwd = 1) +
geom_vline(xintercept = mean(worst_res_iter_obs$mse_naive), col = "darkorange", alpha = 0.3, lwd = 1) +
geom_vline(xintercept = quantile(rand_res_iter_obs$mse_naive, .95), col = "red", alpha = 0.3, lwd = 1, linetype = "dashed") +
geom_vline(xintercept = quantile(match_res_iter_obs$mse_naive, .95), col = "blue", alpha = 0.3, lwd = 1, linetype = "dashed") +
geom_vline(xintercept = quantile(opt_res_iter_obs$mse_naive, .95), col = "green", alpha = 0.3, lwd = 1, linetype = "dashed") +
geom_vline(xintercept = quantile(worst_res_iter_obs$mse_naive, .95), col = "darkorange", alpha = 0.3, lwd = 1, linetype = "dashed")
max(rand_res_iter_obs$mse_naive)
max(match_res_iter_obs$mse_naive)
max(opt_res_iter_obs$mse_naive)
max(worst_res_iter_obs$mse_naive)
#conclusion: matching wins
### investigate regression estimator
mean(rand_res_iter_obs$mse_regr, na.rm = TRUE)
mean(match_res_iter_obs$mse_regr, na.rm = TRUE)
mean(opt_res_iter_obs$mse_regr, na.rm = TRUE)
mean(worst_res_iter_obs$mse_regr)
quantile(rand_res_iter_obs$mse_regr, 0.95, na.rm = TRUE)
quantile(match_res_iter_obs$mse_regr, 0.95, na.rm = TRUE)
quantile(opt_res_iter_obs$mse_regr, 0.95, na.rm = TRUE)
quantile(worst_res_iter_obs$mse_regr, 0.95, na.rm = TRUE)
ggplot(data.frame(rand_res_iter_obs)) +
geom_density(aes(mse_regr), alpha = 0.3, fill = "red") +
geom_density(aes(mse_regr), alpha = 0.3, fill = "blue", data = match_res_iter_obs) +
geom_density(aes(mse_regr), alpha = 0.3, fill = "green", data = opt_res_iter_obs) +
geom_density(aes(mse_regr), alpha = 0.3, fill = "darkorange", data = worst_res_iter_obs) +
xlim(0, 0.6) + xlab("MSE") +
geom_vline(xintercept = mean(rand_res_iter_obs$mse_regr), col = "red", alpha = 0.3, lwd = 1) +
geom_vline(xintercept = mean(match_res_iter_obs$mse_regr), col = "blue", alpha = 0.3, lwd = 1) +
geom_vline(xintercept = mean(opt_res_iter_obs$mse_regr), col = "green", alpha = 0.3, lwd = 1) +
geom_vline(xintercept = mean(worst_res_iter_obs$mse_regr), col = "darkorange", alpha = 0.3, lwd = 1) +
geom_vline(xintercept = quantile(rand_res_iter_obs$mse_regr, .95), col = "red", alpha = 0.3, lwd = 1, linetype = "dashed") +
geom_vline(xintercept = quantile(match_res_iter_obs$mse_regr, .95), col = "blue", alpha = 0.3, lwd = 1, linetype = "dashed") +
geom_vline(xintercept = quantile(opt_res_iter_obs$mse_regr, .95), col = "green", alpha = 0.3, lwd = 1, linetype = "dashed") +
geom_vline(xintercept = quantile(worst_res_iter_obs$mse_regr, .95), col = "darkorange", alpha = 0.3, lwd = 1, linetype = "dashed")
max(rand_res_iter_obs$mse_regr)
max(match_res_iter_obs$mse_regr)
max(opt_res_iter_obs$mse_regr)
max(worst_res_iter_obs$mse_regr)
|
c6122fcd21dc2693f1862069828beafdc1ee98c2 | 033597efd692538cb3b59076059290c06a474cd1 | /prepOrbidata_liposome_expts_simplified.R | 32555583d63950f410454e7eb50c7b4238591920 | [
"MIT"
] | permissive | jamesrco/LipidPhotoOxBox | a071c25d38a67dde196ef6c00f601276be2d2f19 | 077c53ec6efd8dfd870353f573d1d93f699ace70 | refs/heads/master | 2021-04-30T22:41:17.989729 | 2018-05-10T00:06:49 | 2018-05-10T00:06:49 | 66,294,849 | 2 | 0 | MIT | 2018-05-08T14:51:51 | 2016-08-22T17:46:03 | R | UTF-8 | R | false | false | 10,324 | r | prepOrbidata_liposome_expts_simplified.R |
# ******************************************************************
################ Basic user begin editing here #############
# ******************************************************************
################ User: define locations of data files and database(s) #############
working_dir = "/Volumes/Lab/Jamie Collins/mzXML/PAL1314/Exp_13/" # specify working directory
setwd(working_dir) # set working directory to working_dir
# specify directories subordinate to the working directory in which the .mzXML files for xcms can be found; per xcms documentation, use subdirectories within these to divide files according to treatment/primary environmental variable (e.g., station number along a cruise transect) and file names to indicate timepoint/secondary environmental variable (e.g., depth)
mzXMLdirs = c("neg/","pos/")
# specify which of the directories above you wish to analyze this time through
chosenFileSubset = "neg/"
################# Load in mzXML files, get xcms settings from IPO or user input #############
# load selected subset for processing
mzXMLfiles.raw = list.files(chosenFileSubset, recursive = TRUE, full.names = TRUE)
# verify the ion mode of the data in these files
# subset.polarity = getSubsetIonMode(mzXMLfiles.raw)
# provide some feedback to user
print(paste0("Loaded ",length(mzXMLfiles.raw)," mzXML files. These files contain ",subset.polarity," ion mode data. Raw dataset consists of:"))
print(mzXMLfiles.raw)
# check whether user has elected to exclude any files, and exclude them if they happen to be in this subset
if (exists("excluded.mzXMLfiles") & length("excluded.mzXMLfiles")>0) {
excludedfiles = getFNmatches(IDnumlist = excluded.mzXMLfiles, filelist = mzXMLfiles.raw) # index files to be excluded
print(paste0("The following files will be excluded from processing based on user's input:"))
print(mzXMLfiles.raw[excludedfiles])
mzXMLfiles = mzXMLfiles.raw[-excludedfiles] # exclude the files from mzXMLfiles
} else {
mzXMLfiles = mzXMLfiles.raw
}
################# Create xcmsSet using selected settings #############
print(paste0("Creating xcmsSet object from ",length(mzXMLfiles)," mzXML files remaining in dataset using specified settings..."))
# create xcms xset object; runs WAY faster with multicore tasking enabled;
xset_centWave = xcmsSet(mzXMLfiles,
method = "centWave",
profparam = centW.profparam,
ppm = centW.ppm,
peakwidth = c(centW.min_peakwidth,centW.max_peakwidth),
fitgauss = centW.fitgauss,
noise = centW.noise,
mzdiff = centW.mzdiff,
verbose.columns = centW.verbose.columns,
snthresh = centW.snthresh,
integrate = centW.integrate,
prefilter = centW.prefilter,
mzCenterFun = centW.mzCenterFun,
# sleep = centW.sleep
nSlaves = centW.nSlaves
)
print(paste0("xcmsSet object xset_centWave created:"))
print(xset_centWave)
# Some notes:
#
# 1. If using massifquant or centWave and you are sure your input data are centroided, can ignore warning message "It looks like this file is in profile mode. [method] can process only centroid mode data !" since this is just based on a heuristic. That is, you can ignore the message if you are certain data are in centroid mode. You can verify this by opening one of your converted .mzXML files in a text reader. You should see: <dataProcessing centroided="1"></dataProcessing> (a "0" is bad)
#
# For more on this error, see http://metabolomics-forum.com/viewtopic.php?f=8&t=267 or https://groups.google.com/forum/#!topic/xcms/xybDDQTaQiY
#
# 2. So long as the number of peak data insertion problems is relatively low (i.e., < 100), you can safely ignore the error. Otherwise, might try lowering the ppm
#
# 3. On-the-fly plotting features (i.e., with sleep ≥ 0.001 enabled) don't appear to function properly in Mac RStudio
#####################################################################################
##### Grouping and retention time correction using xcms (and IPO, if desired) #######
#####################################################################################
################# Perform grouping and retention time correction on dataset #############
print(paste0("Performing grouping and retention time correction on dataset"))
print(paste0("Using group.density and retcor.",retcor.meth))
# initial grouping
# # method "nearest" with settings below seems to work better than method = "density," but takes absolutely forever; however, it seems to take less time crunching centWave picked data than massifquant picked data
# xset_centWave = group(xset_centWave,
# method = "nearest",
# mzVsRTbalance=10,
# mzCheck=0.2,
# rtCheck=30,
# kNN=10
# )
# using method = "density" with settings from above
xset_gr = group(xset_centWave,
method = "density",
bw = density.bw,
minfrac = density.minfrac,
minsamp = density.minsamp,
mzwid = density.mzwid,
max = density.max,
sleep = density.sleep
)
# chromatographic alignment (retention time correction)
if (retcor.meth=="loess") {
xset_gr.ret = retcor(xset_gr,
# method = "loess", # this appears unnecessary
missing = loess.missing,
extra = loess.extra,
smooth = "loess",
span = loess.span,
family = loess.family,
plottype = loess.plottype,
col = NULL,
ty = NULL
)
} else if (retcor.meth=="obiwarp") {
xset_gr.ret = retcor.peakgroups(xset_gr,
method = "obiwarp",
plottype = obiwarp.plottype,
profStep = obiwarp.profStep,
center = obiwarp.center,
response = obiwarp.response,
distFunc = obiwarp.distFunc,
gapInit = obiwarp.gapInit,
gapExtend = obiwarp.gapInit,
factorDiag = obiwarp.factorDiag,
factorGap = obiwarp.factorGap,
localAlignment = obiwarp.localAlignment,
initPenalty = 0
)
}
# perform grouping again
print(paste0("Performing second peak grouping after application of retcor..."))
# using method = "density" with settings from above
xset_gr.ret.rg = group(xset_gr.ret,
method = "density",
bw = density.bw,
minfrac = density.minfrac,
minsamp = density.minsamp,
mzwid = density.mzwid,
max = density.max,
sleep = density.sleep
)
# fill missing peaks
print(paste0("Filling missing peaks..."))
xset_gr.ret.rg.fill = fillPeaks.chrom(xset_gr.ret.rg, nSlaves = 4)
#####################################################################################
##### Isotope peak identification, creation of xsAnnotate object using CAMERA #######
#####################################################################################
print(paste0("Applying CAMERA to identify isotopic peaks, create xsAnnotate object, and create CAMERA pseudospectra using correlation of xcms peak groups between and within samples. These pseudospectra are the groups within which the adduct hierarchy and retention time screening criteria will be applied using LOBSTAHS"))
# first, a necessary workaround to avoid a import error; see https://support.bioconductor.org/p/69414/
imports = parent.env(getNamespace("CAMERA"))
unlockBinding("groups", imports)
imports[["groups"]] = xcms::groups
lockBinding("groups", imports)
# create annotated xset using wrapper annotate(), allowing us to perform all CAMERA tasks at once
xset_a = annotate(xset_gr.ret.rg.fill,
quick=FALSE, # set to FALSE because we want to run groupCorr; will also cause CAMERA to run adduct annotation. while LOBSTAHS will do its own adduct identification later, it doesn't hurt to do this now if it lets CAMERA create better pseudospectra
sample=NA, # use all samples
nSlaves=4, # use 4 sockets
# group FWHM settings
# using defaults for now
sigma=6,
perfwhm=0.6,
# groupCorr settings
# using defaults for now
cor_eic_th=0.75,
graphMethod="hcs",
pval=0.05,
calcCiS=TRUE,
calcIso=TRUE,
calcCaS=FALSE, # weird results with this set to TRUE
# findIsotopes settings
maxcharge=4,
maxiso=4,
minfrac=0.5, # 0.25?
# adduct annotation settings
psg_list=NULL,
rules=NULL,
polarity=subset.polarity,
multiplier=3,
max_peaks=100,
# common to multiple tasks
intval="into",
ppm=2.5,
mzabs=0.0015
)
cleanParallel(xset_a) # kill sockets
# at this point, should have an xsAnnotate object called "xset_a" in hand, which will serve as the primary input to the main screening and annotation function "doLOBscreen" in LOBSTAHS
print(paste0("xsAnnotate object 'xset_a' has been created. User can now use LOBSTAHS to perform screening..."))
print(xset_a)
library(LOBSTAHS)
LOBset = doLOBscreen(xset_a, polarity="negative",match.ppm=2.5)
getLOBpeaklist(LOBset,gen.csv=TRUE)
Exp_13_neg = LOBset
save(file = "Exp_13_neg.RData", Exp_13_neg)
|
76ff7f8cf83b8e2937fec489b9f2a06423f08bed | d522ef4f0a283059649a2209be2365b1268007b1 | /man/sim.Stasis.RW.Rd | 4e63db6f01c4bc44e17b1349382041c10b4612cc | [] | no_license | cran/paleoTS | 70280825ad602403414e2a61bbc0910c1fd2bec7 | afe481e8e7837942a9a5115adc4415dadf1f4245 | refs/heads/master | 2022-09-20T01:32:29.993387 | 2022-08-08T18:10:06 | 2022-08-08T18:10:06 | 17,698,190 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,568 | rd | sim.Stasis.RW.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/complexModels.R
\name{sim.Stasis.RW}
\alias{sim.Stasis.RW}
\title{Simulate trait evolution with a mode shift}
\usage{
sim.Stasis.RW(
ns = c(20, 20),
order = c("Stasis-RW", "RW-Stasis"),
anc = 0,
omega = 1,
ms = 0,
vs = 1,
vp = 1,
nn = 30,
tt = NULL
)
}
\arguments{
\item{ns}{vector of the number of samples in each segment}
\item{order}{whether stasis or random walk come first, one of \code{"Stasis-RW"} or
\code{"RW-Stasis"}}
\item{anc}{starting trait value}
\item{omega}{variance of stasis segment}
\item{ms}{step mean during random walk segment}
\item{vs}{step variance during random walk segment}
\item{vp}{phenotypic trait variance for each population}
\item{nn}{vector of sample sizes for each population}
\item{tt}{vector of times (ages) for each population}
}
\value{
a \code{paleoTSfit} object
}
\description{
Trait evolution is modeled as a shift from a random walk (general or unbiased)
to stasis, or vice versa.
}
\details{
The \code{anc} argument is the starting trait value, and if the
first segment is stasis, this is also the value of the stasis mean. When the first segment
is a random walk, the stasis mean in the second segment is equal to the true trait mean at
the end of the initial random walk.
}
\examples{
x1 <- sim.Stasis.RW(omega = 0.1, ms = 5, order = "Stasis-RW")
x2 <- sim.Stasis.RW(omega = 0.1, ms = 5, order = "RW-Stasis")
plot(x1)
plot(x2, add = TRUE, col = "blue")
abline(v = 19, lty=3)
}
\seealso{
\code{\link{fitModeShift}}
}
|
56398751fa24dd9cc787baa2812c39f42ec2b144 | 1e45d64203edd6d5125980bf23db3daedc9da89d | /sources/modules/VEHouseholdVehicles/R/AssignVehicleFeaturesFuture.R | 1ae8635c36aadd3917efe558b249a95e12e675fe | [
"Apache-2.0"
] | permissive | VisionEval/VisionEval-Dev | 5c1600032307c729b96470355c40ef6cbbb9f05b | 701bf7f68d94bf1b4b73a0dfd622672a93d4af5f | refs/heads/development | 2023-08-19T17:53:55.037761 | 2023-08-15T12:33:50 | 2023-08-15T12:33:50 | 144,179,471 | 6 | 34 | Apache-2.0 | 2023-09-07T20:39:13 | 2018-08-09T16:44:22 | R | UTF-8 | R | false | false | 12,089 | r | AssignVehicleFeaturesFuture.R | #' @include AssignVehicleFeatures.R
NULL
#========================
#AssignVehicleFeaturesFuture.R
#========================
# This module is a vehicle model from RPAT version.
# This module assigns household vehicle ownership, vehicle types, and ages to
# each household vehicle, based on household, land use,
# and transportation system characteristics. Vehicles are classified as either
# a passenger car (automobile) or a light truck (pickup trucks, sport utility
# vehicles, vans, etc.). A 'Vehicle' table is created which has a record for
# each household vehicle. The type and age of each vehicle owned or leased by
# households is assigned to this table along with the household ID (HhId)to
# enable this table to be joined with the household table.
# library(visioneval)
#=============================================
#SECTION 1: ESTIMATE AND SAVE MODEL PARAMETERS
#=============================================
## Current implementation
### The current version implements the models used in the RPAT (GreenSTEP)
### ecosystem.
## Future Development
## Use estimation data set to create models
# Load vehicle ownership model
load("data/VehOwnModels_ls.rda")
# Load LtTrk Ownership
#-------------------------
# LtTrk ownership model
load("data/LtTruckModels_ls.rda")
#================================================
#SECTION 2: DEFINE THE MODULE DATA SPECIFICATIONS
#================================================
#Define the data specifications
#------------------------------
AssignVehicleFeaturesFutureSpecifications <- list(
#Level of geography module is applied at
RunBy = "Region",
#Specify new tables to be created by Inp if any
#---------------------------
#Specify new tables to be created by Set if any
#Specify input data (similar to the assignvehiclefeatures module from this package)
#Specify data to be loaded from data store
Get = items(
item(
NAME = "Marea",
TABLE = "Marea",
GROUP = "Year",
TYPE = "character",
UNITS = "ID",
PROHIBIT = "",
ISELEMENTOF = ""
),
item(
NAME = items(
"TranRevMiPCFuture",
"FwyLaneMiPCFuture"),
TABLE = "Marea",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/PRSN",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "Marea",
TABLE = "Bzone",
GROUP = "Year",
TYPE = "character",
UNITS = "ID",
PROHIBIT = "",
ISELEMENTOF = ""
),
item(
NAME = "Bzone",
TABLE = "Bzone",
GROUP = "Year",
TYPE = "character",
UNITS = "ID",
PROHIBIT = "",
ISELEMENTOF = ""
),
item(
NAME =
items("HhId",
"Azone",
"Marea"),
TABLE = "Household",
GROUP = "Year",
TYPE = "character",
UNITS = "ID",
PROHIBIT = "",
ISELEMENTOF = ""
),
item(
NAME = "Income",
TABLE = "Household",
GROUP = "Year",
TYPE = "currency",
UNITS = "USD.2001",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "HhType",
TABLE = "Household",
GROUP = "Year",
TYPE = "character",
UNITS = "category",
PROHIBIT = "",
ISELEMENTOF = c("SF", "MF", "GQ")
),
item(
NAME = "HhSize",
TABLE = "Household",
GROUP = "Year",
TYPE = "people",
UNITS = "PRSN",
PROHIBIT = c("NA", "<= 0"),
ISELEMENTOF = ""
),
item(
NAME =
items(
"Age0to14",
"Age65Plus"),
TABLE = "Household",
GROUP = "Year",
TYPE = "people",
UNITS = "PRSN",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "HhPlaceTypes",
TABLE = "Household",
GROUP = "Year",
TYPE = "character",
UNITS = "category",
PROHIBITED = "NA"
),
item(
NAME = "DrvLevels",
TABLE = "Household",
GROUP = "Year",
TYPE = "character",
UNITS = "category",
PROHIBITED = "NA",
ISELEMENTOF = c("Drv1", "Drv2", "Drv3Plus")
),
item(
NAME = "LtTruckProp",
TABLE = "Model",
GROUP = "Global",
TYPE = "double",
UNITS = "multiplier",
PROHIBIT = c('NA', '< 0'),
ISELEMENTOF = ""
),
item(
NAME = items(
"AutoMpg",
"LtTruckMpg",
"TruckMpg",
"BusMpg",
"TrainMpg"
),
TABLE = "Vehicles",
GROUP = "Global",
TYPE = "compound",
UNITS = "MI/GAL",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
),
item(
NAME = "ModelYear",
TABLE = "Vehicles",
GROUP = "Global",
TYPE = "time",
UNITS = "YR",
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = ""
)
),
#---------------------------
#Specify data to saved in the data store
Set = items(
item(
NAME =
items("HhIdFuture",
"VehIdFuture",
"AzoneFuture",
"MareaFuture"),
TABLE = "Vehicle",
GROUP = "Year",
TYPE = "character",
UNITS = "ID",
NAVALUE = -1,
PROHIBIT = "NA",
ISELEMENTOF = "",
DESCRIPTION =
items("Unique household ID using future data",
"Unique vehicle ID using future data",
"Azone ID using future data",
"Marea ID using future data")
),
item(
NAME = "VehiclesFuture",
TABLE = "Household",
GROUP = "Year",
TYPE = "vehicles",
UNITS = "VEH",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Number of automobiles and light trucks owned or leased by the household
using future data"
),
item(
NAME = "TypeFuture",
TABLE = "Vehicle",
GROUP = "Year",
TYPE = "character",
UNITS = "category",
NAVALUE = -1,
PROHIBIT = "NA",
ISELEMENTOF = c("Auto", "LtTrk"),
SIZE = 5,
DESCRIPTION = "Vehicle body type: Auto = automobile, LtTrk = light trucks (i.e. pickup, SUV, Van) using future data"
),
item(
NAME = "AgeFuture",
TABLE = "Vehicle",
GROUP = "Year",
TYPE = "time",
UNITS = "YR",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = "Vehicle age in years using future data"
),
item(
NAME = items(
"NumLtTrkFuture",
"NumAutoFuture"),
TABLE = "Household",
GROUP = "Year",
TYPE = "vehicles",
UNITS = "VEH",
NAVALUE = -1,
PROHIBIT = c("NA", "< 0"),
ISELEMENTOF = "",
SIZE = 0,
DESCRIPTION = items(
"Number of light trucks (pickup, sport-utility vehicle, and van) owned or leased by household using future data",
"Number of automobiles (i.e. 4-tire passenger vehicles that are not light trucks) owned or leased by household using future data"
)
),
item(
NAME = "MileageFuture",
TABLE = "Vehicle",
GROUP = "Year",
TYPE = "compound",
UNITS = "MI/GAL",
PROHIBIT = c("NA", "<0"),
ISELEMENTOF = "",
DESCRIPTION = "Mileage of vehicles (automobiles and light truck) using future data"
),
item(
NAME = "DvmtPropFuture",
TABLE = "Vehicle",
GROUP = "Year",
TYPE = "double",
UNITS = "proportion",
PROHIBIT = c("NA", "<0", "> 1"),
ISELEMENTOF = "",
DESCRIPTION = "Proportion of average household DVMT using future data"
)
)
)
#Save the data specifications list
#---------------------------------
#' Specifications list for AssignVehicleFeaturesFuture module
#'
#' A list containing specifications for the AssignVehicleFeaturesFuture module.
#'
#' @format A list containing 3 components:
#' \describe{
#' \item{RunBy}{the level of geography that the module is run at}
#' \item{Get}{module inputs to be read from the datastore}
#' \item{Set}{module outputs to be written to the datastore}
#' }
#' @source AssignVehicleFeaturesFuture.R script.
"AssignVehicleFeaturesFutureSpecifications"
visioneval::savePackageDataset(AssignVehicleFeaturesFutureSpecifications, overwrite = TRUE)
#=======================================================
#SECTION 3: DEFINE FUNCTIONS THAT IMPLEMENT THE SUBMODEL
#=======================================================
#Main module function that calculates vehicle features
#------------------------------------------------------
#' Create vehicle table and populate with vehicle type, age, and mileage records.
#'
#' \code{AssignVehicleFeaturesFuture} populate vehicle table with
#' vehicle type, age, and mileage records using future data.
#'
#' This function populates vehicle table with records of
#' vehicle types, ages, mileage, and mileage proportions
#' along with household IDs using future data.
#'
#' @param L A list containing the components listed in the Get specifications
#' for the module.
#' @return A list containing the components specified in the Set
#' specifications for the module.
#' @name AssignVehicleFeaturesFuture
#' @import visioneval stats
#' @export
AssignVehicleFeaturesFuture <- function(L) {
#Set up
#------
# Function to rename variables to be consistent with Get specfications
# of AssignVehicleFeatures.
# Function to add suffix 'Future' at the end of all the variable names
AddSuffixFuture <- function(x, suffix = "Future"){
# Check if x is a list
if(is.list(x)){
if(length(x) > 0){
# Check if elements of x is a list
isElementList <- unlist(lapply(x,is.list))
# Modify the names of elements that are not the list
noList <- x[!isElementList]
if(!identical(names(noList),character(0))){
names(noList) <- paste0(names(noList),suffix)
}
# Repeat the function for elements that are list
yesList <- lapply(x[isElementList], AddSuffixFuture, suffix = suffix)
x <- unlist(list(noList,yesList), recursive = FALSE)
return(x)
}
return(x)
}
return(NULL)
}
# Function to remove suffix 'Future' from all the variable names
RemoveSuffixFuture <- function(x, suffix = "Future"){
# Check if x is a list
if(is.list(x)){
if(length(x) > 0){
# Check if elements of x is a list
isElementList <- unlist(lapply(x,is.list))
# Modify the names of elements that are not the list
noList <- x[!isElementList]
if(length(noList)>0){
names(noList) <- gsub(suffix,"",names(noList))
}
# Repeat the function for elements that are list
yesList <- lapply(x[isElementList], RemoveSuffixFuture, suffix = suffix)
x <- unlist(list(noList,yesList), recursive = FALSE)
return(x)
}
return(x)
}
return(NULL)
}
# Modify the input data set
L <- RemoveSuffixFuture(L)
#Return the results
#------------------
# Call the AssignVehicleFeatures function with the new dataset
Out_ls <- AssignVehicleFeatures(L)
# Add 'Future' suffix to all the variables
Out_ls <- AddSuffixFuture(Out_ls)
#Return the outputs list
return(Out_ls)
}
#================================
#Code to aid development and test
#================================
#Test code to check specifications, loading inputs, and whether datastore
#contains data needed to run module. Return input list (L) to use for developing
#module functions
#-------------------------------------------------------------------------------
# TestDat_ <- testModule(
# ModuleName = "AssignVehicleFeaturesFuture",
# LoadDatastore = TRUE,
# SaveDatastore = TRUE,
# DoRun = FALSE
# )
# L <- TestDat_$L
#Test code to check everything including running the module and checking whether
#the outputs are consistent with the 'Set' specifications
#-------------------------------------------------------------------------------
# TestDat_ <- testModule(
# ModuleName = "AssignVehicleOwnership",
# LoadDatastore = TRUE,
# SaveDatastore = TRUE,
# DoRun = TRUE
# )
|
8e7365edd05dc811965915f0d44d2db61c1a4fb9 | d7505c673b2455ef8889c0595f96a0be03747b01 | /Documentos de Seminario en el lenguaje R/Programa Para Generar Datos Sinteticos.R | fd05f785795d2109382d252b188f6e0ac2e730f9 | [] | no_license | ElvinSantos93/SEMINARIO | b71af096c134d1277813a7c4030d14cb66e7afed | 28b84a4a4032d640e6066d075509bec9d411b55e | refs/heads/main | 2023-07-14T13:38:42.583880 | 2021-08-17T04:36:45 | 2021-08-17T04:36:45 | 380,421,606 | 0 | 0 | null | null | null | null | WINDOWS-1250 | R | false | false | 52,558 | r | Programa Para Generar Datos Sinteticos.R | ################################# GENERANDO DATOS SINTETICOS ######################################
#==================================================================================================
############### Instalando los paquetes requeridos para generar datos sinteticos ###############
install.packages("MVA")
install.packages("synthpop")
install.packages("tidyverse")
install.packages("univariateML")
install.packages("cowplot")
install.packages("Rcpp")
#==================================================================================================
#==================================================================================================
############### Cargando los paquetes requeridos para generar datos sinteticos ###############
library(MVA)
library(synthpop)
library(tidyverse)
library(univariateML)
library(cowplot)
library(Rcpp)
#==================================================================================================
#==================================================================================================
########## Seleccion de los datos ##########
Datos_original <- USairpollution
vars <- c("SO2" , "temp" , "manu" , "popul" , "wind" , "precip" , "predays")
Datos_original <- Datos_original[, vars]
head(Datos_original) # Visualizando las primeras 6 ciudades
which(is.na(Datos_original)) # Se puede observar que no hay valores ausentes
#==================================================================================================
#==================================================================================================
########## Sintesis predeterminada ##########
my.seed <- 1500
sds.default <- syn(Datos_original, seed = my.seed)
sds.default
names(sds.default)
#==================================================================================================
#==================================================================================================
########## Generando los datos sinteticos ##########
Datos_sinteticos <- syn ( Datos_original, m = 2, seed = my.seed ) # Crea los datos sintéticos
Datos_sinteticos
#==================================================================================================
#==================================================================================================
############ Comparación visual de conjuntos de datos originales y sintéticos ############
compare(Datos_sinteticos, Datos_original)
compare(Datos_sinteticos, Datos_original, nrow = 3, ncol = 4, cols = c("#62B6CB", "#1B4965"))$plot
#==================================================================================================
### Construyendo modelos de regresión lineal a partir de nuestro conjunto de datos sinteticos y
### comparar dichos modelos con los modelos del conjunto de datos original
##################################################################################################
#==================================================================================================
#======== Modelo de regresion de la variable de interes (SO2) con las variables temp y manu =======
############## Prueba del efecto principal del grupo para confirmar la equivalencia ##############
full_SO2 = lm( SO2 ~ 1 + temp + manu, data = Datos_original )
null_SO2 = lm( SO2 ~ 1 + temp + manu + temp:manu, data = Datos_original )#
summary(null_SO2)
anova(null_SO2, full_SO2)
########### Modelo a partir de datos observados y modelo a partir de datos sinteticos ###########
model_orig <- lm(SO2 ~ 1 + temp + manu + temp:manu, data = Datos_original)# Modelo a partir de datos observados
model_orig_sum <- summary(model_orig)
model_syn <- lm.synds(SO2 ~ 1 + temp + manu + temp:manu, data = Datos_sinteticos)# Modelo a partir de datos sinteticos
model_syn_sum <- summary(model_syn)
############ Comparacion de los dos modelos ############
compare(model_syn, Datos_original)# Comparacion de los modelos
############ Estimaciones de los coeficientes de una sola sintesis ############
lm.synds(formula = SO2 ~ 1 + temp + manu + temp:manu, data = Datos_sinteticos)
#==================================================================================================
#==================================================================================================
#========== Modelo de regresion de la variable de interes con las variables popul y wind ==========
############## Prueba del efecto principal del grupo para confirmar la equivalencia ##############
full_SO2_n1 = lm( SO2 ~ 1 + popul + wind, data = Datos_original )
null_SO2_n1 = lm( SO2 ~ 1 + popul + wind + popul:wind, data = Datos_original )
summary(null_SO2_n1)
anova(null_SO2_n1, full_SO2_n1)
########### Modelo a partir de datos observados y modelo a partir de datos sinteticos ###########
model_orig_m1 <- lm(SO2 ~ 1 + popul + wind + popul:wind, data = Datos_original)# Modelo a partir de datos observados
model_orig_sum_m1 <- summary(model_orig_m1)
model_syn_s1 <- lm.synds(SO2 ~ 1 + popul + wind + popul:wind, data = Datos_sinteticos)# Modelo a partir de datos sinteticos
model_syn_sum_s1 <- summary(model_syn_s1)
############ Comparacion de los dos modelos ############
compare(model_syn_s1, Datos_original)# Comparacion de los modelos
############ Estimaciones de los coeficientes de una sola sintesis ############
lm.synds(formula = SO2 ~ 1 + popul + wind + popul:wind, data = Datos_sinteticos)
#==================================================================================================
#==================================================================================================
#========== Modelo de regresion de la variable de interes con las variables precip y predays ==========
############## Prueba del efecto principal del grupo para confirmar la equivalencia ##############
full_SO2_n2 = lm( SO2 ~ 1 + precip + predays, data = Datos_original )
null_SO2_n2 = lm( SO2 ~ 1 + precip + predays + precip:predays, data = Datos_original )
summary(null_SO2_n2)
anova(null_SO2_n2, full_SO2_n2)
########### Modelo a partir de datos observados y modelo a partir de datos sinteticos ###########
model_orig_m2 <- lm(SO2 ~ 1 + precip + predays + precip:predays, data = Datos_original)# Modelo a partir de datos observados
model_orig_sum_m2 <- summary(model_orig_m2)
model_syn_s2 <- lm.synds(SO2 ~ 1 + precip + predays + precip:predays, data = Datos_sinteticos)# Modelo a partir de datos sinteticos
model_syn_sum_s2 <- summary(model_syn_s2)
############ Comparacion de los dos modelos ############
compare(model_syn_s2, Datos_original)# Comparacion de los modelos
############ Estimaciones de los coeficientes de una sola sintesis ############
lm.synds(formula = SO2 ~ 1 + precip + predays + precip:predays, data = Datos_sinteticos)
#==================================================================================================
#==================================================================================================
#========== Modelo de regresion de la variable de interes con las demas variables ==========
############## Prueba del efecto principal del grupo para confirmar la equivalencia ##############
full_SO2_n3 = lm( SO2 ~ 1 + temp + manu + popul + wind + precip + predays, data = Datos_original )
null_SO2_n3 = lm( SO2 ~ 1 + temp + manu + popul + wind + precip + predays , data = Datos_original )
summary(null_SO2_n3)
anova(null_SO2_n3, full_SO2_n3)
########### Modelo a partir de datos observados y modelo a partir de datos sinteticos ###########
model_orig_m3 <- lm(SO2 ~ 1 + temp + manu + popul + wind + precip + predays , data = Datos_original)# Modelo a partir de datos observados
model_orig_sum_m3 <- summary(model_orig_m3)
model_syn_s3 <- lm.synds(SO2 ~ 1 + temp + manu + popul + wind + precip + predays , data = Datos_sinteticos)# Modelo a partir de datos sinteticos
model_syn_sum_s3 <- summary(model_syn_s3)
############ Comparacion de los dos modelos ############
compare(model_syn_s3, Datos_original)# Comparacion de los modelos
############ Estimaciones de los coeficientes de una sola sintesis ############
lm.synds(formula = SO2 ~ 1 + temp + manu + popul + wind + precip + predays , data = Datos_sinteticos)
#==================================================================================================
############################################################
# AJUSTE DE DISTRIBUCIONES #
############################################################
#==================================================================================================
######### COMPARACION DE DISTRIBUCIONES, UTILIZANDO LAS METRICAS de AJUSTE "AIC" y "BIC" ##########
###################################################################################################
#==================================================================================================
#Se comparan únicamente las distribuciones con un dominio [0, +inf)
#### Comparacion de la variable "SO2" de los Datos Originales, con la metrica de ajuste "AIC" ####
comparacion_aic <- AIC(
mlbetapr(Datos_original$SO2),
mlexp(Datos_original$SO2),
mlinvgamma(Datos_original$SO2),
mlgamma(Datos_original$SO2),
mllnorm(Datos_original$SO2),
mlrayleigh(Datos_original$SO2),
mlinvgauss(Datos_original$SO2),
mlweibull(Datos_original$SO2),
mlinvweibull(Datos_original$SO2),
mllgamma(Datos_original$SO2)
)
comparacion_aic %>% rownames_to_column(var = "distribucion") %>% arrange(AIC)
#### Comparacion de la variable "SO2" de los Datos Originales, con la metrica de ajuste "BIC" ####
comparacion_bic <- BIC(
mlbetapr(Datos_original$SO2),
mlexp(Datos_original$SO2),
mlinvgamma(Datos_original$SO2),
mlgamma(Datos_original$SO2),
mllnorm(Datos_original$SO2),
mlrayleigh(Datos_original$SO2),
mlinvgauss(Datos_original$SO2),
mlweibull(Datos_original$SO2),
mlinvweibull(Datos_original$SO2),
mllgamma(Datos_original$SO2)
)
comparacion_bic %>% rownames_to_column(var = "distribucion") %>% arrange(BIC)
#==================================================================================================
## REPRESENTACION GRAFICA DE LAS DISTRIBUCIONES QUE SE MEJOR SE AJUSTAN DE LOS DATOS ORIGINALES ##
#==================================================================================================
hist(Datos_original$SO2,
main = "Distribución del contenido de SO2 en el Aire",
freq = FALSE,
ylim = c(0, 0.00025))
lines(mllgamma(Datos_original$SO2), lwd = 2, lty = 1, col = "blue")
lines(mlinvgauss(Datos_original$SO2), lwd = 2, lty = 2, col = "red")
legend(x = 15000, y = 0.0001, legend = c("lgamma", "invgauss"),
col = c("blue", "red"), lty = 1:2)
rug(Datos_original$SO2)
##################################################################################################
ggplot(data = Datos_original) +
geom_histogram(aes(x = SO2, y = after_stat(density)),
bins = 40,
alpha = 0.3, color = "black") +
geom_rug(aes(x = SO2)) +
stat_function(fun = function(.x){dml(x = .x, obj = mllgamma(Datos_original$SO2))},
aes(color = "log-gamma"),
size = 1) +
stat_function(fun = function(.x){dml(x = .x, obj = mlinvgauss(Datos_original$SO2))},
aes(color = "inverse-gaussian"),
size = 1) +
scale_color_manual(breaks = c("log-gamma", "inverse-gaussian"),
values = c("log-gamma" = "red", "inverse-gaussian" = "blue")) +
labs(title = "Distribución del contenido de SO2 en el Aire",
color = "Distribución") +
theme_bw() +
theme(legend.position = "bottom")
#==================================================================================================
#==================================================================================================
############### Ajustes de las distribuciones de los datos originales ###############
# Se ajusta una distribucion log-gamma a los datos de SO2
distribucion <- mllgamma(x = Datos_original$SO2)
summary(distribucion)
# Se ajusta una distribucion inverse-gaussian a los datos de SO2
distribucion1 <- mlinvgauss(x = Datos_original$SO2)
summary(distribucion1)
#==================================================================================================
#==================================================================================================
########## Intervalos de confianza por bootstraping de los datos originales ##########
# Intervalo de confianza del 95% estimados por bootstrapping
bootstrapml(distribucion, probs = c(0.05, 0.95), reps = 1000)
# Intervalo de confianza del 95% estimados por bootstrapping
bootstrapml(distribucion1, probs = c(0.05, 0.95), reps = 1000)
#==================================================================================================
#==================================================================================================
############ Muestras nuevas de SO2 de acorde a la distribucion ajustada ############
# Muestreo de nuevos valores de la distribucion ajustada log-gamma
set.seed(1500)
rml(n = 5, obj = distribucion)
# Muestras de nuevos valores de la distribucion ajustada inverse-gaussian
set.seed(1500)
rml(n = 5, obj = distribucion1)
#==================================================================================================
###################################################################################################
#==================================================================================================
#Se comparan únicamente las distribuciones con un dominio [0, +inf)
#### Comparacion de la variable "SO2" de los Datos Sinteticos, con la metrica de ajuste "AIC" ####
comparacion_aic <- AIC(
mlbetapr(Datos_sinteticos[["syn"]][[1]]$SO2),
mlexp(Datos_sinteticos[["syn"]][[1]]$SO2),
mlinvgamma(Datos_sinteticos[["syn"]][[1]]$SO2),
mlgamma(Datos_sinteticos[["syn"]][[1]]$SO2),
mllnorm(Datos_sinteticos[["syn"]][[1]]$SO2),
mlrayleigh(Datos_sinteticos[["syn"]][[1]]$SO2),
mlinvgauss(Datos_sinteticos[["syn"]][[1]]$SO2),
mlweibull(Datos_sinteticos[["syn"]][[1]]$SO2),
mlinvweibull(Datos_sinteticos[["syn"]][[1]]$SO2),
mllgamma(Datos_sinteticos[["syn"]][[1]]$SO2)
)
comparacion_aic %>% rownames_to_column(var = "distribucion") %>% arrange(AIC)
#### Comparacion de la variable "SO2" de los Datos Sinteticos, con la metrica de ajuste "BIC" ####
comparacion_bic <- BIC(
mlbetapr(Datos_sinteticos[["syn"]][[1]]$SO2),
mlexp(Datos_sinteticos[["syn"]][[1]]$SO2),
mlinvgamma(Datos_sinteticos[["syn"]][[1]]$SO2),
mlgamma(Datos_sinteticos[["syn"]][[1]]$SO2),
mllnorm(Datos_sinteticos[["syn"]][[1]]$SO2),
mlrayleigh(Datos_sinteticos[["syn"]][[1]]$SO2),
mlinvgauss(Datos_sinteticos[["syn"]][[1]]$SO2),
mlweibull(Datos_sinteticos[["syn"]][[1]]$SO2),
mlinvweibull(Datos_sinteticos[["syn"]][[1]]$SO2),
mllgamma(Datos_sinteticos[["syn"]][[1]]$SO2)
)
comparacion_bic %>% rownames_to_column(var = "distribucion") %>% arrange(BIC)
#==================================================================================================
## REPRESENTACION GRAFICA DE LAS DISTRIBUCIONES QUE SE MEJOR SE AJUSTAN DE LOS DATOS SINTETICOS ##
#==================================================================================================
hist(Datos_sinteticos[["syn"]][[1]]$SO2,
main = "Distribución del contenido de SO2 en el Aire",
freq = FALSE,
ylim = c(0, 0.00025))
lines(mlinvgauss(Datos_sinteticos[["syn"]][[1]]$SO2), lwd = 2, lty = 1, col = "blue")
lines(mlgamma(Datos_sinteticos[["syn"]][[1]]$SO2), lwd = 2, lty = 2, col = "red")
legend(x = 15000, y = 0.0001, legend = c("invgauss", "lgamma"),
col = c("blue", "red"), lty = 1:2)
rug(Datos_sinteticos[["syn"]][[1]]$SO2)
##################################################################################################
ggplot(data = Datos_sinteticos[["syn"]][[1]]) +
geom_histogram(aes(x = SO2, y = after_stat(density)),
bins = 40,
alpha = 0.3, color = "black") +
geom_rug(aes(x = SO2)) +
stat_function(fun = function(.x){dml(x = .x, obj = mlinvgauss(Datos_sinteticos[["syn"]][[1]]$SO2))},
aes(color = "inverse-gaussian"),
size = 1) +
stat_function(fun = function(.x){dml(x = .x, obj = mlgamma(Datos_sinteticos[["syn"]][[1]]$SO2))},
aes(color = "log-gamma"),
size = 1) +
scale_color_manual(breaks = c("inverse-gaussian", "log-gamma"),
values = c("inverse-gaussian" = "red", "log-gamma" = "blue")) +
labs(title = "Distribución del contenido de SO2 en el Aire",
color = "Distribución",
y = "SDF1") +
theme_bw() +
theme(legend.position = "bottom")
#==================================================================================================
#==================================================================================================
############### Ajustes de las distribuciones de los datos sinteticos ###############
# Se ajusta una distribucion inverse-gaussian a los datos de la base1 sintetica
distribucion <- mlinvgauss(x = Datos_sinteticos[["syn"]][[1]]$SO2)
summary(distribucion)
# Se ajusta una distribucion log-gamma a los datos de base1 sintetica
distribucion1 <- mllgamma(x = Datos_sinteticos[["syn"]][[1]]$SO2)
summary(distribucion1)
#==================================================================================================
#==================================================================================================
########## Intervalos de confianza por bootstraping de los datos sinteticos ##########
# Intervalo de confianza del 95% estimados por bootstrapping
bootstrapml(distribucion, probs = c(0.05, 0.95), reps = 1000)
# Intervalo de confianza del 95% estimados por bootstrapping
bootstrapml(distribucion1, probs = c(0.05, 0.95), reps = 1000)
#==================================================================================================
#==================================================================================================
############ Muestras nuevas de la base1 sintetica de acorde a la distribucion ajustada ############
# Muestras de la distribucion ajustada inverse-gaussian
set.seed(1500)
rml(n = 5, obj = distribucion)
# Muestras de la distribucion ajustada log-gamma
set.seed(1500)
rml(n = 5, obj = distribucion1)
#==================================================================================================
###################################################################################################
#==================================================================================================
#Se comparan únicamente las distribuciones con un dominio [0, +inf)
#### Comparacion de la variable "SO2" de los Datos Sinteticos, con la metrica de ajuste "AIC" ####
comparacion_aic <- AIC(
mlbetapr(Datos_sinteticos[["syn"]][[2]]$SO2),
mlexp(Datos_sinteticos[["syn"]][[2]]$SO2),
mlinvgamma(Datos_sinteticos[["syn"]][[2]]$SO2),
mlgamma(Datos_sinteticos[["syn"]][[2]]$SO2),
mllnorm(Datos_sinteticos[["syn"]][[2]]$SO2),
mlrayleigh(Datos_sinteticos[["syn"]][[2]]$SO2),
mlinvgauss(Datos_sinteticos[["syn"]][[2]]$SO2),
mlweibull(Datos_sinteticos[["syn"]][[2]]$SO2),
mlinvweibull(Datos_sinteticos[["syn"]][[2]]$SO2),
mllgamma(Datos_sinteticos[["syn"]][[2]]$SO2)
)
comparacion_aic %>% rownames_to_column(var = "distribucion") %>% arrange(AIC)
#### Comparacion de la variable "temp" de los Datos Sinteticos, con la metrica de ajuste "BIC" ####
comparacion_bic <- BIC(
mlbetapr(Datos_sinteticos[["syn"]][[2]]$SO2),
mlexp(Datos_sinteticos[["syn"]][[2]]$SO2),
mlinvgamma(Datos_sinteticos[["syn"]][[2]]$SO2),
mlgamma(Datos_sinteticos[["syn"]][[2]]$SO2),
mllnorm(Datos_sinteticos[["syn"]][[2]]$SO2),
mlrayleigh(Datos_sinteticos[["syn"]][[2]]$SO2),
mlinvgauss(Datos_sinteticos[["syn"]][[2]]$SO2),
mlweibull(Datos_sinteticos[["syn"]][[2]]$SO2),
mlinvweibull(Datos_sinteticos[["syn"]][[2]]$SO2),
mllgamma(Datos_sinteticos[["syn"]][[2]]$SO2)
)
comparacion_bic %>% rownames_to_column(var = "distribucion") %>% arrange(BIC)
#==================================================================================================
## REPRESENTACION GRAFICA DE LAS DISTRIBUCIONES QUE SE MEJOR SE AJUSTAN DE LOS DATOS SINTETICOS ##
#==================================================================================================
hist(Datos_sinteticos[["syn"]][[2]]$SO2,
main = "Distribución del contenido de SO2 en el Aire",
freq = FALSE,
ylim = c(0, 0.0025))
lines(mlinvweibull(Datos_sinteticos[["syn"]][[2]]$SO2), lwd = 2, lty = 1, col = "blue")
lines(mlinvgamma(Datos_sinteticos[["syn"]][[2]]$SO2), lwd = 2, lty = 2, col = "red")
legend(x = 15000, y = 0.0001, legend = c("invweibull", "invgamma"),
col = c("blue", "red"), lty = 1:2)
rug(Datos_sinteticos[["syn"]][[2]]$SO2)
##################################################################################################
ggplot(data = Datos_sinteticos[["syn"]][[2]]) +
geom_histogram(aes(x = SO2, y = after_stat(density)),
bins = 40,
alpha = 0.3, color = "black") +
geom_rug(aes(x = SO2)) +
stat_function(fun = function(.x){dml(x = .x, obj = mlinvweibull(Datos_sinteticos[["syn"]][[2]]$SO2))},
aes(color = "inverse-weibull"),
size = 1) +
stat_function(fun = function(.x){dml(x = .x, obj = mlinvgamma(Datos_sinteticos[["syn"]][[2]]$SO2))},
aes(color = "inverse-gamma"),
size = 1) +
scale_color_manual(breaks = c("inverse-weibull", "inverse-gamma"),
values = c("inverse-weibull" = "red", "inverse-gamma" = "blue")) +
labs(title = "Distribución del contenido de SO2 en el Aire",
color = "Distribución",
y = "SDF2") +
theme_bw() +
theme(legend.position = "bottom")
#==================================================================================================
#==================================================================================================
############### Ajustes de las distribuciones de los datos sinteticos ###############
# Se ajusta una distribucion inverse-weibull a los datos de la base2 sintetica
distribucion <- mlinvweibull(x = Datos_sinteticos[["syn"]][[2]]$SO2)
summary(distribucion)
# Se ajusta una distribucion inverse-gamma a los datos de base2 sintetica
distribucion1 <- mlinvgamma(x = Datos_sinteticos[["syn"]][[2]]$SO2)
summary(distribucion1)
#==================================================================================================
#==================================================================================================
########## Intervalos de confianza por bootstraping de los datos sinteticos ##########
# Intervalo de confianza del 95% estimados por bootstrapping
bootstrapml(distribucion, probs = c(0.05, 0.95), reps = 1000)
# Intervalo de confianza del 95% estimados por bootstrapping
bootstrapml(distribucion1, probs = c(0.05, 0.95), reps = 1000)
#==================================================================================================
#==================================================================================================
############ Muestras nuevas de la base2 sintetica de acorde a la distribucion ajustada ############
# Muestras de la distribucion ajustada inverse-weibull
set.seed(1500)
rml(n = 5, obj = distribucion)
# Muestras de la distribucion ajustada inverse-gamma
set.seed(1500)
rml(n = 5, obj = distribucion1)
#==================================================================================================
#==================================================================================================
#Se comparan únicamente las distribuciones con un dominio [0, +inf)
#### Comparacion de la variable "temp" de los Datos Originales, con la metrica de ajuste "AIC" ####
comparacion_aic <- AIC(
mlbetapr(Datos_original$temp),
mlexp(Datos_original$temp),
mlinvgamma(Datos_original$temp),
mlgamma(Datos_original$temp),
mllnorm(Datos_original$temp),
mlrayleigh(Datos_original$temp),
mlinvgauss(Datos_original$temp),
mlweibull(Datos_original$temp),
mlinvweibull(Datos_original$temp),
mllgamma(Datos_original$temp)
)
comparacion_aic %>% rownames_to_column(var = "distribucion") %>% arrange(AIC)
#### Comparacion de la variable "temp" de los Datos Originales, con la metrica de ajuste "BIC" ####
comparacion_bic <- BIC(
mlbetapr(Datos_original$temp),
mlexp(Datos_original$temp),
mlinvgamma(Datos_original$temp),
mlgamma(Datos_original$temp),
mllnorm(Datos_original$temp),
mlrayleigh(Datos_original$temp),
mlinvgauss(Datos_original$temp),
mlweibull(Datos_original$temp),
mlinvweibull(Datos_original$temp),
mllgamma(Datos_original$temp)
)
comparacion_bic %>% rownames_to_column(var = "distribucion") %>% arrange(BIC)
#==================================================================================================
## REPRESENTACION GRAFICA DE LAS DISTRIBUCIONES QUE SE MEJOR SE AJUSTAN DE LOS DATOS ORIGINALES ##
#==================================================================================================
hist(Datos_original$temp,
main = "Distribución de la temperatura media anual",
freq = FALSE,
ylim = c(0, 0.00025))
lines(mlinvweibull(Datos_original$temp), lwd = 2, lty = 1, col = "blue")
lines(mlinvgamma(Datos_original$temp), lwd = 2, lty = 2, col = "red")
legend(x = 15000, y = 0.0001, legend = c("invweibull", "invgamma"),
col = c("blue", "red"), lty = 1:2)
rug(Datos_original$temp)
##################################################################################################
ggplot(data = Datos_original) +
geom_histogram(aes(x = temp, y = after_stat(density)),
bins = 40,
alpha = 0.3, color = "black") +
geom_rug(aes(x = temp)) +
stat_function(fun = function(.x){dml(x = .x, obj = mlinvweibull(Datos_original$temp))},
aes(color = "inverse-weibull"),
size = 1) +
stat_function(fun = function(.x){dml(x = .x, obj = mlinvgamma(Datos_original$temp))},
aes(color = "inverse-gamma"),
size = 1) +
scale_color_manual(breaks = c("inverse-weibull", "inverse-gamma"),
values = c("inverse-weibull" = "red", "inverse-gamma" = "blue")) +
labs(title = "Distribución de la temperatura media anual",
color = "Distribución") +
theme_bw() +
theme(legend.position = "bottom")
#==================================================================================================
#==================================================================================================
############### Ajustes de las distribuciones de los datos originales ###############
# Se ajusta una distribucion inverse-weibull a los datos de temp
distribucion <- mlinvweibull(x = Datos_original$temp)
summary(distribucion)
# Se ajusta una distribucion inverse-gamma a los datos de manu
distribucion1 <- mlinvgamma(x = Datos_original$temp)
summary(distribucion1)
#==================================================================================================
#==================================================================================================
########## Intervalos de confianza por bootstraping de los datos originales ##########
# Intervalo de confianza del 95% estimados por bootstrapping
bootstrapml(distribucion, probs = c(0.05, 0.95), reps = 1000)
# Intervalo de confianza del 95% estimados por bootstrapping
bootstrapml(distribucion1, probs = c(0.05, 0.95), reps = 1000)
#==================================================================================================
#==================================================================================================
############ Muestras nuevas de SO2 de acorde a la distribucion ajustada ############
# Muestras de la distribucion ajustada inverse-weibull
set.seed(1500)
rml(n = 5, obj = distribucion)
# Muestras de la distribucion ajustada inverse-gamma
set.seed(1500)
rml(n = 5, obj = distribucion1)
#==================================================================================================
#==================================================================================================
#Se comparan únicamente las distribuciones con un dominio [0, +inf)
#### Comparacion de la variable "manu" de los Datos Originales, con la metrica de ajuste "AIC" ####
comparacion_aic <- AIC(
mlbetapr(Datos_original$manu),
mlexp(Datos_original$manu),
mlinvgamma(Datos_original$manu),
mlgamma(Datos_original$manu),
mllnorm(Datos_original$manu),
mlrayleigh(Datos_original$manu),
mlinvgauss(Datos_original$manu),
mlweibull(Datos_original$manu),
mlinvweibull(Datos_original$manu),
mllgamma(Datos_original$manu)
)
comparacion_aic %>% rownames_to_column(var = "distribucion") %>% arrange(AIC)
#### Comparacion de la variable "manu" de los Datos Originales, con la metrica de ajuste "BIC" ####
comparacion_bic <- BIC(
mlbetapr(Datos_original$manu),
mlexp(Datos_original$manu),
mlinvgamma(Datos_original$manu),
mlgamma(Datos_original$manu),
mllnorm(Datos_original$manu),
mlrayleigh(Datos_original$manu),
mlinvgauss(Datos_original$manu),
mlweibull(Datos_original$manu),
mlinvweibull(Datos_original$manu),
mllgamma(Datos_original$manu)
)
comparacion_bic %>% rownames_to_column(var = "distribucion") %>% arrange(BIC)
#==================================================================================================
## REPRESENTACION GRAFICA DE LAS DISTRIBUCIONES QUE SE MEJOR SE AJUSTAN DE LOS DATOS ORIGINALES ##
#==================================================================================================
hist(Datos_original$manu,
main = "Distribución del numero de empresas manufactureras",
freq = FALSE,
ylim = c(0, 0.00025))
lines(mllnorm(Datos_original$manu), lwd = 2, lty = 1, col = "blue")
lines(mllgamma(Datos_original$manu), lwd = 2, lty = 2, col = "red")
legend(x = 15000, y = 0.0001, legend = c("lnorm", "lgamma"),
col = c("blue", "red"), lty = 1:2)
rug(Datos_original$manu)
##################################################################################################
ggplot(data = Datos_original) +
geom_histogram(aes(x = manu, y = after_stat(density)),
bins = 40,
alpha = 0.3, color = "black") +
geom_rug(aes(x = manu)) +
stat_function(fun = function(.x){dml(x = .x, obj = mllnorm(Datos_original$manu))},
aes(color = "log-normal"),
size = 1) +
stat_function(fun = function(.x){dml(x = .x, obj = mllgamma(Datos_original$manu))},
aes(color = "log-gamma"),
size = 1) +
scale_color_manual(breaks = c("log-normal", "log-gamma"),
values = c("log-normal" = "red", "log-gamma" = "blue")) +
labs(title = "Distribución del numero de empresas manufactureras",
color = "Distribución") +
theme_bw() +
theme(legend.position = "bottom")
#==================================================================================================
#==================================================================================================
############### Ajustes de las distribuciones de los datos originales ###############
# Se ajusta una distribucion log-normal a los datos de manu
distribucion <- mllnorm(x = Datos_original$manu)
summary(distribucion)
# Se ajusta una distribucion log-gamma a los datos de manu
distribucion1 <- mllgamma(x = Datos_original$manu)
summary(distribucion1)
#==================================================================================================
#==================================================================================================
########## Intervalos de confianza por bootstraping de los datos originales ##########
# Intervalo de confianza del 95% estimados por bootstrapping
bootstrapml(distribucion, probs = c(0.05, 0.95), reps = 1000)
# Intervalo de confianza del 95% estimados por bootstrapping
bootstrapml(distribucion1, probs = c(0.05, 0.95), reps = 1000)
#==================================================================================================
#==================================================================================================
############ Muestras nuevas de manu de acorde a la distribucion ajustada ############
# Muestras de la distribucion ajustada log-normal
set.seed(1500)
rml(n = 5, obj = distribucion)
# Muestras de la distribucion ajustada log-gamma
set.seed(1500)
rml(n = 5, obj = distribucion1)
#==================================================================================================
#==================================================================================================
#Se comparan únicamente las distribuciones con un dominio [0, +inf)
#### Comparacion de la variable "popul" de los Datos Originales, con la metrica de ajuste "AIC" ####
comparacion_aic <- AIC(
mlbetapr(Datos_original$popul),
mlexp(Datos_original$popul),
mlinvgamma(Datos_original$popul),
mlgamma(Datos_original$popul),
mllnorm(Datos_original$popul),
mlrayleigh(Datos_original$popul),
mlinvgauss(Datos_original$popul),
mlweibull(Datos_original$popul),
mlinvweibull(Datos_original$popul),
mllgamma(Datos_original$popul)
)
comparacion_aic %>% rownames_to_column(var = "distribucion") %>% arrange(AIC)
#### Comparacion de la variable "manu" de los Datos Originales, con la metrica de ajuste "BIC" ####
comparacion_bic <- BIC(
mlbetapr(Datos_original$popul),
mlexp(Datos_original$popul),
mlinvgamma(Datos_original$popul),
mlgamma(Datos_original$popul),
mllnorm(Datos_original$popul),
mlrayleigh(Datos_original$popul),
mlinvgauss(Datos_original$popul),
mlweibull(Datos_original$popul),
mlinvweibull(Datos_original$popul),
mllgamma(Datos_original$popul)
)
comparacion_bic %>% rownames_to_column(var = "distribucion") %>% arrange(BIC)
#==================================================================================================
## REPRESENTACION GRAFICA DE LAS DISTRIBUCIONES QUE SE MEJOR SE AJUSTAN DE LOS DATOS ORIGINALES ##
#==================================================================================================
hist(Datos_original$popul,
main = "Distribución del tamańo de la poblacion en miles",
freq = FALSE,
ylim = c(0, 0.00025))
lines(mllnorm(Datos_original$popul), lwd = 2, lty = 1, col = "blue")
lines(mllgamma(Datos_original$popul), lwd = 2, lty = 2, col = "red")
legend(x = 15000, y = 0.0001, legend = c("lnorm", "lgamma"),
col = c("blue", "red"), lty = 1:2)
rug(Datos_original$popul)
##################################################################################################
ggplot(data = Datos_original) +
geom_histogram(aes(x = popul, y = after_stat(density)),
bins = 40,
alpha = 0.3, color = "black") +
geom_rug(aes(x = popul)) +
stat_function(fun = function(.x){dml(x = .x, obj = mllnorm(Datos_original$popul))},
aes(color = "log-normal"),
size = 1) +
stat_function(fun = function(.x){dml(x = .x, obj = mllgamma(Datos_original$popul))},
aes(color = "log-gamma"),
size = 1) +
scale_color_manual(breaks = c("log-normal", "log-gamma"),
values = c("log-normal" = "red", "log-gamma" = "blue")) +
labs(title = "Distribución del tamańo de la poblacion en miles",
color = "Distribución") +
theme_bw() +
theme(legend.position = "bottom")
#==================================================================================================
#==================================================================================================
############### Ajustes de las distribuciones de los datos originales ###############
# Se ajusta una distribucion log-normal a los datos de popul
distribucion <- mllnorm(x = Datos_original$popul)
summary(distribucion)
# Se ajusta una distribucion log-gamma a los datos de popul
distribucion1 <- mllgamma(x = Datos_original$popul)
summary(distribucion1)
#==================================================================================================
#==================================================================================================
########## Intervalos de confianza por bootstraping de los datos originales ##########
# Intervalo de confianza del 95% estimados por bootstrapping
bootstrapml(distribucion, probs = c(0.05, 0.95), reps = 1000)
# Intervalo de confianza del 95% estimados por bootstrapping
bootstrapml(distribucion1, probs = c(0.05, 0.95), reps = 1000)
#==================================================================================================
#==================================================================================================
############ Muestras nuevas de popul de acorde a la distribucion ajustada ############
# Muestras de la distribucion ajustada log-normal
set.seed(1500)
rml(n = 5, obj = distribucion)
# Muestras de la distribucion ajustada log-gamma
set.seed(1500)
rml(n = 5, obj = distribucion1)
#==================================================================================================
#==================================================================================================
#Se comparan únicamente las distribuciones con un dominio [0, +inf)
#### Comparacion de la variable "wind" de los Datos Originales, con la metrica de ajuste "AIC" ####
comparacion_aic <- AIC(
mlbetapr(Datos_original$wind),
mlexp(Datos_original$wind),
mlinvgamma(Datos_original$wind),
mlgamma(Datos_original$wind),
mllnorm(Datos_original$wind),
mlrayleigh(Datos_original$wind),
mlinvgauss(Datos_original$wind),
mlweibull(Datos_original$wind),
mlinvweibull(Datos_original$wind),
mllgamma(Datos_original$wind)
)
comparacion_aic %>% rownames_to_column(var = "distribucion") %>% arrange(AIC)
#### Comparacion de la variable "wind" de los Datos Originales, con la metrica de ajuste "BIC" ####
comparacion_bic <- BIC(
mlbetapr(Datos_original$wind),
mlexp(Datos_original$wind),
mlinvgamma(Datos_original$wind),
mlgamma(Datos_original$wind),
mllnorm(Datos_original$wind),
mlrayleigh(Datos_original$wind),
mlinvgauss(Datos_original$wind),
mlweibull(Datos_original$wind),
mlinvweibull(Datos_original$wind),
mllgamma(Datos_original$wind)
)
comparacion_bic %>% rownames_to_column(var = "distribucion") %>% arrange(BIC)
#==================================================================================================
## REPRESENTACION GRAFICA DE LAS DISTRIBUCIONES QUE SE MEJOR SE AJUSTAN DE LOS DATOS ORIGINALES ##
#==================================================================================================
hist(Datos_original$wind,
main = "Distribución de la velocidad media anual del viento",
freq = FALSE,
ylim = c(0, 0.00025))
lines(mlgamma(Datos_original$wind), lwd = 2, lty = 1, col = "blue")
lines(mllnorm(Datos_original$wind), lwd = 2, lty = 2, col = "red")
legend(x = 15000, y = 0.0001, legend = c("gamma", "lnorm"),
col = c("blue", "red"), lty = 1:2)
rug(Datos_original$wind)
##################################################################################################
ggplot(data = Datos_original) +
geom_histogram(aes(x = wind, y = after_stat(density)),
bins = 40,
alpha = 0.3, color = "black") +
geom_rug(aes(x = wind)) +
stat_function(fun = function(.x){dml(x = .x, obj = mlgamma(Datos_original$wind))},
aes(color = "gamma"),
size = 1) +
stat_function(fun = function(.x){dml(x = .x, obj = mllnorm(Datos_original$wind))},
aes(color = "log-normal"),
size = 1) +
scale_color_manual(breaks = c("gamma", "log-normal"),
values = c("gamma" = "red", "log-normal" = "blue")) +
labs(title = "Distribución de la velocidad media anual del viento",
color = "Distribución") +
theme_bw() +
theme(legend.position = "bottom")
#==================================================================================================
#==================================================================================================
############### Ajustes de las distribuciones de los datos originales ###############
# Se ajusta una distribucion gamma a los datos de wind
distribucion <- mlgamma(x = Datos_original$wind)
summary(distribucion)
# Se ajusta una distribucion log-normal a los datos de wind
distribucion1 <- mllnorm(x = Datos_original$wind)
summary(distribucion1)
#==================================================================================================
#==================================================================================================
########## Intervalos de confianza por bootstraping de los datos originales ##########
# Intervalo de confianza del 95% estimados por bootstrapping
bootstrapml(distribucion, probs = c(0.05, 0.95), reps = 1000)
# Intervalo de confianza del 95% estimados por bootstrapping
bootstrapml(distribucion1, probs = c(0.05, 0.95), reps = 1000)
#==================================================================================================
#==================================================================================================
############ Muestras nuevas de wind de acorde a la distribucion ajustada ############
# Muestras de la distribucion ajustada gamma
set.seed(1500)
rml(n = 5, obj = distribucion)
# Muestras de la distribucion ajustada log-normal
set.seed(1500)
rml(n = 5, obj = distribucion1)
#==================================================================================================
#==================================================================================================
#Se comparan únicamente las distribuciones con un dominio [0, +inf)
#### Comparacion de la variable "precip" de los Datos Originales, con la metrica de ajuste "AIC" ####
comparacion_aic <- AIC(
mlbetapr(Datos_original$precip),
mlexp(Datos_original$precip),
mlinvgamma(Datos_original$precip),
mlgamma(Datos_original$precip),
mllnorm(Datos_original$precip),
mlrayleigh(Datos_original$precip),
mlinvgauss(Datos_original$precip),
mlweibull(Datos_original$precip),
mlinvweibull(Datos_original$precip),
mllgamma(Datos_original$precip)
)
comparacion_aic %>% rownames_to_column(var = "distribucion") %>% arrange(AIC)
#### Comparacion de la variable "precip" de los Datos Originales, con la metrica de ajuste "BIC" ####
comparacion_bic <- BIC(
mlbetapr(Datos_original$precip),
mlexp(Datos_original$precip),
mlinvgamma(Datos_original$precip),
mlgamma(Datos_original$precip),
mllnorm(Datos_original$precip),
mlrayleigh(Datos_original$precip),
mlinvgauss(Datos_original$precip),
mlweibull(Datos_original$precip),
mlinvweibull(Datos_original$precip),
mllgamma(Datos_original$precip)
)
comparacion_bic %>% rownames_to_column(var = "distribucion") %>% arrange(BIC)
#==================================================================================================
## REPRESENTACION GRAFICA DE LAS DISTRIBUCIONES QUE SE MEJOR SE AJUSTAN DE LOS DATOS ORIGINALES ##
#==================================================================================================
hist(Datos_original$precip,
main = "Distribución de la precipitacion media anual",
freq = FALSE,
ylim = c(0, 0.00025))
lines(mlweibull(Datos_original$precip), lwd = 2, lty = 1, col = "blue")
lines(mlgamma(Datos_original$precip), lwd = 2, lty = 2, col = "red")
legend(x = 15000, y = 0.0001, legend = c("weibull", "gamma"),
col = c("blue", "red"), lty = 1:2)
rug(Datos_original$precip)
##################################################################################################
ggplot(data = Datos_original) +
geom_histogram(aes(x = precip, y = after_stat(density)),
bins = 40,
alpha = 0.3, color = "black") +
geom_rug(aes(x = precip)) +
stat_function(fun = function(.x){dml(x = .x, obj = mlweibull(Datos_original$precip))},
aes(color = "weibull"),
size = 1) +
stat_function(fun = function(.x){dml(x = .x, obj = mlgamma(Datos_original$precip))},
aes(color = "gamma"),
size = 1) +
scale_color_manual(breaks = c("weibull", "gamma"),
values = c("weibull" = "red", "gamma" = "blue")) +
labs(title = "Distribución de la precipitacion media anual",
color = "Distribución") +
theme_bw() +
theme(legend.position = "bottom")
#==================================================================================================
#==================================================================================================
############### Ajustes de las distribuciones de los datos originales ###############
# Se ajusta una distribucion weibull a los datos de precip
distribucion <- mlweibull(x = Datos_original$precip)
summary(distribucion)
# Se ajusta una distribucion gamma a los datos de precip
distribucion1 <- mlgamma(x = Datos_original$precip)
summary(distribucion1)
#==================================================================================================
#==================================================================================================
########## Intervalos de confianza por bootstraping de los datos originales ##########
# Intervalo de confianza del 95% estimados por bootstrapping
bootstrapml(distribucion, probs = c(0.05, 0.95), reps = 1000)
# Intervalo de confianza del 95% estimados por bootstrapping
bootstrapml(distribucion1, probs = c(0.05, 0.95), reps = 1000)
#==================================================================================================
#==================================================================================================
############ Muestras nuevas de precip de acorde a la distribucion ajustada ############
# Muestras de la distribucion ajustada weibull
set.seed(1500)
rml(n = 5, obj = distribucion)
# Muestras de la distribucion ajustada gamma
set.seed(1500)
rml(n = 5, obj = distribucion1)
#==================================================================================================
#==================================================================================================
#Se comparan únicamente las distribuciones con un dominio [0, +inf)
#### Comparacion de la variable "predays" de los Datos Originales, con la metrica de ajuste "AIC" ####
comparacion_aic <- AIC(
mlbetapr(Datos_original$predays),
mlexp(Datos_original$predays),
mlinvgamma(Datos_original$predays),
mlgamma(Datos_original$predays),
mllnorm(Datos_original$predays),
mlrayleigh(Datos_original$predays),
mlinvgauss(Datos_original$predays),
mlweibull(Datos_original$predays),
mlinvweibull(Datos_original$predays),
mllgamma(Datos_original$predays)
)
comparacion_aic %>% rownames_to_column(var = "distribucion") %>% arrange(AIC)
#### Comparacion de la variable "predays" de los Datos Originales, con la metrica de ajuste "BIC" ####
comparacion_bic <- BIC(
mlbetapr(Datos_original$predays),
mlexp(Datos_original$predays),
mlinvgamma(Datos_original$predays),
mlgamma(Datos_original$predays),
mllnorm(Datos_original$predays),
mlrayleigh(Datos_original$predays),
mlinvgauss(Datos_original$predays),
mlweibull(Datos_original$predays),
mlinvweibull(Datos_original$predays),
mllgamma(Datos_original$predays)
)
comparacion_bic %>% rownames_to_column(var = "distribucion") %>% arrange(BIC)
#==================================================================================================
## REPRESENTACION GRAFICA DE LAS DISTRIBUCIONES QUE SE MEJOR SE AJUSTAN DE LOS DATOS ORIGINALES ##
#==================================================================================================
hist(Datos_original$predays,
main = "Distribución del numero medio de dias con precipitacion",
freq = FALSE,
ylim = c(0, 0.00025))
lines(mlweibull(Datos_original$predays), lwd = 2, lty = 1, col = "blue")
lines(mlgamma(Datos_original$predays), lwd = 2, lty = 2, col = "red")
legend(x = 15000, y = 0.0001, legend = c("weibull", "gamma"),
col = c("blue", "red"), lty = 1:2)
rug(Datos_original$predays)
##################################################################################################
ggplot(data = Datos_original) +
geom_histogram(aes(x = predays, y = after_stat(density)),
bins = 40,
alpha = 0.3, color = "black") +
geom_rug(aes(x = predays)) +
stat_function(fun = function(.x){dml(x = .x, obj = mlweibull(Datos_original$predays))},
aes(color = "weibull"),
size = 1) +
stat_function(fun = function(.x){dml(x = .x, obj = mlgamma(Datos_original$predays))},
aes(color = "gamma"),
size = 1) +
scale_color_manual(breaks = c("weibull", "gamma"),
values = c("weibull" = "red", "gamma" = "blue")) +
labs(title = "Distribución del numero medio de dias con precipitacion",
color = "Distribución") +
theme_bw() +
theme(legend.position = "bottom")
#==================================================================================================
#==================================================================================================
############### Ajustes de las distribuciones de los datos originales ###############
# Se ajusta una distribucion weibull a los datos de predays
distribucion <- mlweibull(x = Datos_original$predays)
summary(distribucion)
# Se ajusta una distribucion gamma a los datos de predays
distribucion1 <- mlgamma(x = Datos_original$predays)
summary(distribucion1)
#==================================================================================================
#==================================================================================================
########## Intervalos de confianza por bootstraping de los datos originales ##########
# Intervalo de confianza del 95% estimados por bootstrapping
bootstrapml(distribucion, probs = c(0.05, 0.95), reps = 1000)
# Intervalo de confianza del 95% estimados por bootstrapping
bootstrapml(distribucion1, probs = c(0.05, 0.95), reps = 1000)
#==================================================================================================
#==================================================================================================
############ Muestras nuevas de predays de acorde a la distribucion ajustada ############
# Muestras de la distribucion ajustada weibull
set.seed(1500)
rml(n = 5, obj = distribucion)
# Muestras de la distribucion ajustada gamma
set.seed(1500)
rml(n = 5, obj = distribucion1)
#==================================================================================================
|
54dec448f2c1e83fa45286fc0072eb29c3f6e499 | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/4696_0/rinput.R | 60cfd769e77b84c8bca875bc8ce2175905c934ae | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 135 | r | rinput.R | library(ape)
testtree <- read.tree("4696_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="4696_0_unrooted.txt") |
352605ddf2244c73422a67b79406a3e42faad4a8 | e250086535e666710c037e50cbfa322aa5e18b55 | /R/dtiReconstruction.R | 1c1fa1d942f794dcd8176f3a14eca015b8894329 | [] | no_license | jeffduda/DANTsR | 2efe5776eea2e462ae84eef054cff7495b6cae08 | e220a37051c95527b0f1fc13a14eda6f7898a4f5 | refs/heads/master | 2022-10-05T03:10:25.461452 | 2022-09-19T19:17:14 | 2022-09-19T19:17:14 | 102,148,794 | 1 | 3 | null | 2022-09-19T19:17:15 | 2017-09-01T20:01:50 | C++ | UTF-8 | R | false | false | 1,853 | r | dtiReconstruction.R | dtiReconstruction.r.svd <- function(x, basis, bMat) {
if ( dim(bMat)[2] > 6 ) {
x = bMat %*% x
}
solve(basis, x)
}
#' @title dtiReconstruction
#' @description calculate an anistropy image from DTI
#' @param dwi an N-channel antsImage
#' @param gradients Nx4 matrix of gradient directions and b-values
#' @param method the reconstruction algorithm to use (default = "itk-svd")
#' \itemize{
#' \item{itk-svd}{uses the itk::DiffusionTensor3DReconstructionImageFilter filter}
#' \item{r-svd} uses the r 'solve' function
#' }
#' @param mask 'antsImage' mask indicating where to perform reconstruction
#' @export
dtiReconstruction = function(dwi, gradients, method, mask=NULL)
{
method = tolower(method)
if ( method=="r-svd" ) {
dat = channelToDimension(dwi)
if ( is.null(mask) ) {
mask = extractSlice(dat, 1, 4)*0+1
}
mat = timeseries2matrix(dat, mask)
bmat = gradients[which(gradients[,4]!=0),1:3]
bmat = cbind( bmat[,1]*bmat[,1],
2*bmat[,1]*bmat[,2],
2*bmat[,1]*bmat[,3],
bmat[,2]*bmat[,2],
2*bmat[,2]*bmat[,3],
bmat[,3]*bmat[,3])
bvalMat = gradients[which(gradients[,4]!=0)]
bValue = bvalMat[1]
tensorBasis=NA
if (dim(bmat)[1] > 6) {
tensorBasis = t(bmat) %*% bmat
}
else {
tensorBasis = bmat
}
bmat = t(bmat)
bvals = gradients[,4]
b0ids = which(bvals==0)
b0=mat[b0ids,]
if ( length(b0ids) > 1 ) {
b0 = colMeans(b0)
}
mat = mat[-b0ids,]
invalidB0 = which(b0==0)
b0[invalidB0] = 1
mat = apply(mat, 1, function(x) ( -log(x/b0)/bValue ) )
mat = apply(mat, 1, dtiReconstruction.r.svd, basis=tensorBasis, bMat=bmat)
mat
}
else {
.Call( "dtiReconstruction", dwi, gradients, method, PACKAGE="DANTsR")
}
}
|
d66bfcd29c219012adc925c1ea9188ae6f214677 | cea3466a2947e429a4f4fff5a65df740241c8190 | /R/tindex.R | 7033516b8e1a0528be7e659f5e7b50e26c7f7368 | [
"MIT"
] | permissive | cran/term | 73420e2257b6a0ed12a063a0133ec1e25ba4aa3d | 167b06989f44e37d0dd592b0a7ff5470edd91b65 | refs/heads/master | 2022-10-08T15:13:04.078807 | 2022-09-29T15:20:11 | 2022-09-29T15:20:11 | 236,950,026 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 803 | r | tindex.R | #' Term Index
#'
#' Gets the index for each term of an term or term_rcrd object.
#'
#' For example the index of `beta[2,1]` is `c(2L, 1L)`
#' while the index for `sigma` is `1L`.
#' It is useful for extracting the values of individual terms.
#'
#' @inheritParams params
#' @return A named list of integer vectors of the index for each term.
#' @seealso [dims()], [ndims()], [npdims()] and [pdims()]
#' @family term
#' @export
#'
#' @examples
#' tindex(term("alpha", "alpha[2]", "beta[1,1]", "beta[2 ,1 ]"))
tindex <- function(x) {
if (!is_term(x) && !is_term_rcrd(x)) {
lifecycle::deprecate_soft(
"0.2.1", "term::tindex(x = 'must be a term or term_rcrd object')"
)
x <- as_term(x)
}
tindex <- field(as_term_rcrd(x), "dim")
names(tindex) <- as.character(as_term(x))
tindex
}
|
5fee96953b0a26ff22b53a61be98a7b7ff88f8c2 | f0e1cac1696064c9dd26a0b428c82279bec98442 | /Getting and Cleaning Data Course Project/run_analysis.R | 1b60a96f5e1d887a48fd7622ab25ccf0adb8a68d | [] | no_license | oscaramtz/DataScience-JHSPH | 6b813285444ceab4510289bc1a670ec35839d8c1 | c30cd4ac6c7536a2a917caba9f7f85e71584ce9e | refs/heads/master | 2020-04-18T00:19:29.051254 | 2019-02-18T21:19:18 | 2019-02-18T21:19:18 | 167,072,554 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,167 | r | run_analysis.R | #Assignment __Getting and Cleaning Data
## Keeping just the variables of interest mean() and std() variables
install.packages("dplyr")
install.packages("tidyr")
library(dplyr)
library(tidyr)
var_labels <- read.table("./data/UCI HAR Dataset/features.txt")
kept <- c(rep("NULL", 561)); kept_var <- grep(var_labels[,2], pattern = "std[()]|mean[()]" ); kept[kept_var] <- "numeric"
## Structuring the data frame and setting names for all variables
raw_set_train <- read.table("./data/UCI HAR Dataset/train/x_train.txt", colClasses = kept)
raw_set_test <- read.table("./data/UCI HAR Dataset/test/x_test.txt", colClasses = kept)
### Setting up var names
colnames(raw_set_train) <- var_labels[kept_var,2]
colnames(raw_set_test) <- var_labels[kept_var,2]
## Combine the activities classification and subject ID
activities_train <- read.table("./data/UCI HAR Dataset/train/y_train.txt", col.names = "activity.code")
activities_test <- read.table("./data/UCI HAR Dataset/test/y_test.txt", col.names = "activity.code")
setactivity_train <- cbind(raw_set_train, activities_train)
setactivity_test <- cbind(raw_set_test, activities_test)
subject_train <- read.table("./data/UCI HAR Dataset/train/subject_train.txt", col.names = "user.code", colClasses = "numeric" )
subject_test <- read.table("./data/UCI HAR Dataset/test/subject_test.txt", col.names = "user.code", colClasses = "numeric")
set_train <- cbind(subject_train, setactivity_train)
set_test <- cbind(subject_test, setactivity_test)
binded_set <- rbind(set_train, set_test)
## reading activities labels
activity_labels <- read.table("./data/UCI HAR Dataset/activity_labels.txt", col.names = c("activity.code", "activity"))
activity_labels$activity <- tolower(sub(activity_labels$activity, pattern = "_", replacement = ""))
## labeling activities
complete_set1 <- merge(activity_labels ,binded_set, by.x = "activity.code",by.y = "activity.code")
### Tidying the table_DataFrame summary
library(tidyr)
complete_set <- complete_set1[,-1] ## Drop the unused column
##summary the mean and standar deviation
df_summary <- complete_set %>%
gather(variables, value, -c(user.code, activity)) %>%
group_by(activity, user.code, variables) %>%
summarize(avg_variables = mean(value)) %>%
spread(variables, avg_variables)
## Cleaning column names
names(df_summary) <- sub(names(df_summary), pattern = "^[ft]BodyAcc", replacement = "Body.Acceleration")
names(df_summary) <- sub(names(df_summary), pattern = "^[ft]BodyAccJer$", replacement = "Body.Acceleration.Jerk.Sig")
names(df_summary) <- sub(names(df_summary), pattern = "^[ft]BodyGyro", replacement = "Body.Gyroscope")
names(df_summary) <- sub(names(df_summary), pattern = "^[ft]GravityAcc", replacement = "Gravity.Acceleration")
names(df_summary) <- sub(names(df_summary), pattern = "^[ft]BodyGyroJerk", replacement = "Body.Gyroscope.Jerk.sig")
names(df_summary) <- sub(names(df_summary), pattern = "[(]", replacement = "")
names(df_summary) <- sub(names(df_summary), pattern = "[)]", replacement = "")
##Writeing the dataframe
df_summary <- df_summary[, c(2,1,3:68)]
write.table(df_summary, file = "Activity_Recognition_df_summary.txt", row.names = FALSE)
|
e4cca658067ce68e30a2f799c76c39e6a1976b8c | e0d5bf73eeb38770651573ae9cfed2ed6f591695 | /R/Fx_survival.R | d7864b8d09ab409f83067b80c15c7aad7ff90705 | [] | no_license | cran/OptimalDesign | c6ebd669045f8fee81528b806d676d80d6bdaff6 | 93b9ad670b23a164d0a2a8122505bdaefbacbde9 | refs/heads/master | 2020-04-01T21:03:30.348007 | 2019-12-02T07:50:07 | 2019-12-02T07:50:07 | 64,430,702 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 998 | r | Fx_survival.R | Fx_survival <- function(formula, theta0, censor.time, survival.model="phI",
lower=NULL, upper=NULL, n.levels=NULL, echo=TRUE) {
# Generate Fx for a survival model
cl <- match.call()
verify(cl, formula = formula, survival.model = survival.model, theta0 = theta0,
censor.time = censor.time, lower = lower, upper = upper,
n.levels = n.levels, echo = echo)
if (survival.model == "phI")
u <- function(f) 1 - exp(-censor.time * exp(t(f) %*% theta0))
if (survival.model == "phrand")
u <- function(f) 1 - (1 - exp(-censor.time * exp(t(f) %*% theta0))) /
(censor.time * exp(t(f) %*% theta0))
F.lin <- Fx_cube(formula, lower, upper, n.levels, echo = FALSE)
n <- nrow(F.lin); m <- ncol(F.lin)
Fx <- matrix(0, nrow = n, ncol = m)
for (i in 1:n) Fx[i, ] <- sqrt(u(F.lin[i, ])) %*% F.lin[i, ]
cnms <- rep("", m)
for (j in 1:m) cnms[j] <- paste("S", j, sep = "")
colnames(Fx) <- cnms
return(Fx)
}
|
ead368f75fc16361383bf13b1e27650b2da35266 | b417642e09478a3a6441054a3539a82cc09f3bff | /man/nyt_cg_memberappear.Rd | 00f70e49f23d5188226806d1e290d6d3a6bc73f2 | [
"MIT"
] | permissive | leeper/rtimes | 77f15c4e64204be96c043687edaf509f4a98902b | 96020842e54271bc2bd907ff3059093d8f754682 | refs/heads/master | 2021-01-20T08:09:46.958943 | 2014-01-14T19:37:09 | 2014-01-14T19:37:09 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,177 | rd | nyt_cg_memberappear.Rd | \name{nyt_cg_memberappear}
\alias{nyt_cg_memberappear}
\title{Get information about a particular member's appearances on the House or
Senate floor.}
\usage{
nyt_cg_memberappear(memberid = NULL, key = getOption("NYTCongressKey",
stop("need an API key for the NYT Congress API")), callopts = list())
}
\arguments{
\item{key}{your SunlightLabs API key; loads from
.Rprofile}
\item{callopts}{Optional additional curl options
(debugging tools mostly)}
\item{memberid}{The member's unique ID number
(alphanumeric). To find a member's ID number, get the
list of members for the appropriate House or Senate. You
can also use the Biographical Directory of the United
States Congress to get a member's ID. In search results,
each member's name is linked to a record by index ID
(e.g.,
http://bioguide.congress.gov/scripts/biodisplay.pl?index=C001041).
Use the index ID as member-id in your request.}
}
\value{
Get information about a particular member's appearances on
the House or Senate floor.
}
\description{
Get information about a particular member's appearances on
the House or Senate floor.
}
\examples{
\dontrun{
nyt_cg_memberappear('S001181')
}
}
|
717f3367a4ced04c934ab5436c5edc6c9c7f3e10 | b4dd54123785b310d03a88835a19fcee77b38b65 | /R/zzz.R | c836c60f1c82055b607690632acc2201dbffb890 | [
"MIT"
] | permissive | tidyverse/readr | 5b8a49899586ab0a7a28108b9a0ef634e60940d5 | 80e4dc1a8e48571323cdec8703d31eb87308eb01 | refs/heads/main | 2023-08-31T01:10:09.896284 | 2023-08-01T20:02:52 | 2023-08-01T20:02:52 | 11,663,980 | 631 | 271 | NOASSERTION | 2023-09-03T11:49:42 | 2013-07-25T15:28:22 | R | UTF-8 | R | false | false | 1,261 | r | zzz.R | # nocov start
.onLoad <- function(libname, pkgname) {
tzdb::tzdb_initialize()
register_s3_method("testthat", "compare", "col_spec")
register_s3_method("testthat", "compare", "tbl_df")
register_s3_method("waldo", "compare_proxy", "spec_tbl_df")
opt <- options()
opt_readr <- list(
readr.show_progress = TRUE
)
to_set <- !(names(opt_readr) %in% names(opt))
if (any(to_set)) options(opt_readr[to_set])
invisible()
}
release_questions <- function() {
c(
"Have checked with the IDE team?"
)
}
register_s3_method <- function(pkg, generic, class, fun = NULL) {
check_string(pkg)
check_string(generic)
check_string(class)
if (is.null(fun)) {
fun <- get(paste0(generic, ".", class), envir = parent.frame())
} else {
stopifnot(is.function(fun))
}
if (pkg %in% loadedNamespaces()) {
registerS3method(generic, class, fun, envir = asNamespace(pkg))
}
# Always register hook in case package is later unloaded & reloaded
setHook(
packageEvent(pkg, "onLoad"),
function(...) {
registerS3method(generic, class, fun, envir = asNamespace(pkg))
}
)
}
is_testing <- function() {
identical(Sys.getenv("TESTTHAT"), "true") &&
identical(Sys.getenv("TESTTHAT_PKG"), "readr")
}
# nocov end
|
3cf50cde02596d3825cdc09470afd3a69850ff02 | 6b50cc48b9da0ff22e16e5939aaa4bee2954369c | /05-results/flhm-compare/compare-catches.R | 442c661a3f10b088e3e8874a122bb08e409d5d6e | [
"MIT"
] | permissive | hennip/WGBAST | 129e3e7c99a2874d0c51ef2db00733029416f959 | b934eaddcecb6c639b0c923413b3a40e223e46ff | refs/heads/main | 2023-06-23T11:42:35.271725 | 2023-06-21T11:01:07 | 2023-06-21T11:01:07 | 99,205,026 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,514 | r | compare-catches.R | # Compare BUGS/JAGS results
#source("models-select.R")
## ---- load-catches
# unrep coefs (needed to adjust BUGS but not JAGS)
# =================
# # coef_r<-c(rep(NA,5),rep(1.24,9), rep(1.22,7), rep(1.23,(length(YearsB)-5)-16))
# # coef_c<-c(rep(NA,5),rep(1.33,9), rep(1.21,7),rep(1.2,5), rep(1.11,(length(YearsB)-5)-21))
# # coef_o<-c(rep(NA,5),rep(1.18,9), rep(1.15,7),rep(1.16,5), rep(1.12,(length(YearsB)-5)-21))
# # cbind(YearsB,coef_r,coef_c,coef_o)
#
# ureport_r=c(rep(NA,times=5),rep(1.24,times=9),rep(1.22,times=7),rep(1.23,times=11))
# ureport_c=c(rep(NA,times=5),rep(1.33,times=9),rep(1.21,times=7),rep(1.20,times=5),rep(1.11,times=6))
# ureport_o=c(rep(NA,times=5),rep(1.18,times=9),rep(1.15,times=7),rep(1.16,times=4),rep(1.12,times=7))
# cbind(YearsB,ureport_r,ureport_c,ureport_o)
# Catch data (reported catches)
# =================
# Note! If trolling is included as a separate fishery, estimates of nct_ObsTotX needs to be added
# as a separate graph. Be sure to use corresponding Catch.txt file
if(trolling2==T){
tmp<-read_tsv(str_c(PathData_FLHM, "Catch_TrollingSeparated.txt"), show_col_types = FALSE)
colnames(tmp)<-c("river", "coast", "offs", "trolling")
}else{
tmp<-read_tsv(str_c(PathData_FLHM, "Catch.txt"), show_col_types = FALSE)
colnames(tmp)<-c("river", "coast", "offs")
}
obs_r<-tmp[,1]%>%
mutate(Type="River", Year=Years[1:length(Years)], obs_catch=river)%>%select(-river)
obs_c<-tmp[,2]%>%
mutate(Type="Coast", Year=Years[1:length(Years)], obs_catch=coast)%>%select(-coast)
obs_o<-tmp[,3]%>%
mutate(Type="Offshore", Year=Years[1:length(Years)], obs_catch=offs)%>%select(-offs)
obs<-full_join(obs_r,obs_c, by=NULL)
obs<-full_join(obs,obs_o, by=NULL)
if(trolling2==T){
obs_tr<-tmp[,4]%>%
mutate(Type="Trolling", Year=Years[1:length(Years)], obs_catch=trolling)%>%select(-trolling)
obs<-full_join(obs,obs_tr, by=NULL)
}
# Total catch, including trolling if separated
obs_tot<-obs%>%group_by(Year)%>%
summarise(obs_catch=sum(obs_catch))%>%
mutate(Type="Total")
obs<-full_join(obs, obs_tot, by=NULL)
obs2<-obs
#View(obs)
# Model 1:
# =================
catch_tot<-array(NA, dim=c(length(chains1[,"ncr_ObsTotX[1]"][[1]]),length(YearsB)-0))
dim(catch_tot)
# for(y in 1:length(YearsB)){
# catch_tot[,y]<-chains1[,str_c("ncr_ObsTotX[",y,"]")][[1]]+
# chains1[,str_c("ncc_ObsTotX[",y,"]")][[1]]+
# chains1[,str_c("nco_ObsTotX[",y,"]")][[1]]+
# ifelse(trolling1==T,
# chains[,str_c("nct_ObsTotX[",y,"]")][[1]],0)
#
# }
if(nchains1==1){
catch_tot<-array(NA, dim=c(nsims1,length(YearsB)))
dim(catch_tot)
for(y in 1:(length(YearsB))){
catch_tot[,y]<-chains1[,str_c("ncr_ObsTotX[",y,"]")]+
chains1[,str_c("ncc_ObsTotX[",y,"]")]+
chains1[,str_c("nco_ObsTotX[",y,"]")]+
chains1[,str_c("nct_ObsTotX[",y,"]")]
# ifelse(trolling1==T,
# chains1[,str_c("nct_ObsTotX[",y,"]")],0)
}
}
if(nchains1==2){
catch_tot<-array(NA, dim=c(nsims1,length(YearsB)-fix1))
dim(catch_tot)
for(y in 1:(length(YearsB)-fix1)){
catch_tot[,y]<-chains1[,str_c("ncr_ObsTotX[",y,"]")][[1]]+
chains1[,str_c("ncc_ObsTotX[",y,"]")][[1]]+
chains1[,str_c("nco_ObsTotX[",y,"]")][[1]]+
chains1[,str_c("nct_ObsTotX[",y,"]")][[1]]
# ifelse(trolling1==T,
# chains1[,str_c("nct_ObsTotX[",y,"]")][[1]],0)
}
}
dfr<-boxplot.jags.df(chains1, "ncr_ObsTotX[", 1:(length(YearsB)-fix1))%>%
mutate(Type="River")
dfc<-boxplot.jags.df(chains1, "ncc_ObsTotX[", 1:(length(YearsB)-fix1))%>%
mutate(Type="Coast")
dfo<-boxplot.jags.df(chains1, "nco_ObsTotX[", 1:(length(YearsB)-fix1))%>%
mutate(Type="Offshore")
if(trolling1==T){
dftr<-boxplot.jags.df(chains1, "nct_ObsTotX[", 1:(length(YearsB)-fix1))%>%
mutate(Type="Trolling")
}
dft<-boxplot.bugs.df(catch_tot, 1:(length(YearsB)-fix1))%>%
mutate(Type="Total", x=y)%>%select(-y)
df<-full_join(dfr,dfc,by=NULL)%>%
full_join(dfo,by=NULL)%>%
#if(trolling1==T){df<-full_join(df,dftr,by=NULL)}
full_join(dftr,by=NULL)%>%
full_join(dft,by=NULL)
df.1<-as_tibble(setNames(df,c("Year","q5","q25","q50","q75","q95","Type")))%>%
mutate(Year=Year+1986)
df.1
df.1<-full_join(df.1,obs,by=NULL)
# Model 2:
# =================
#summary(chains[ ,regexpr("ncr_ObsTotX",varnames(chains))>0])
#summary(chains[ ,regexpr("nct_ObsTotX",varnames(chains))>0])
if(nchains2==1){
catch_tot<-array(NA, dim=c(nsims2,length(Years)))
dim(catch_tot)
for(y in 1:(length(Years))){
catch_tot[,y]<-chains[,str_c("ncr_ObsTotX[",y,"]")]+
chains[,str_c("ncc_ObsTotX[",y,"]")]+
chains[,str_c("nco_ObsTotX[",y,"]")]+
chains[,str_c("nct_ObsTotX[",y,"]")]
# ifelse(trolling2==T,
# chains[,str_c("nct_ObsTotX[",y,"]")],0)
}
}
if(nchains2==2){
catch_tot<-array(NA, dim=c(nsims2,length(Years)))
dim(catch_tot)
for(y in 1:(length(Years))){
catch_tot[,y]<-chains[,str_c("ncr_ObsTotX[",y,"]")][[1]]+
chains[,str_c("ncc_ObsTotX[",y,"]")][[1]]+
chains[,str_c("nco_ObsTotX[",y,"]")][[1]]+
chains[,str_c("nct_ObsTotX[",y,"]")][[1]]
# ifelse(trolling2==T,
# chains[,str_c("nct_ObsTotX[",y,"]")][[1]],0)
}
}
dfr<-boxplot.jags.df(chains, "ncr_ObsTotX[", 1:(length(Years)))%>%
mutate(Type="River")
dfc<-boxplot.jags.df(chains, "ncc_ObsTotX[", 1:(length(Years)))%>%
mutate(Type="Coast")
dfo<-boxplot.jags.df(chains, "nco_ObsTotX[", 1:(length(Years)))%>%
mutate(Type="Offshore")
#if(trolling2==T){
dftr<-boxplot.jags.df(chains, "nct_ObsTotX[", 1:(length(Years)))%>%
mutate(Type="Trolling")
#}
dft<-boxplot.bugs.df(catch_tot, 1:length(Years))%>%
mutate(Type="Total", x=y)%>%select(-y)
df<-full_join(dfr,dfc,by=NULL)%>%
full_join(dfo,by=NULL)%>%
full_join(dftr,by=NULL)%>%
full_join(dft,by=NULL)
df.2<-as_tibble(setNames(df,c("Year","q5","q25","q50","q75","q95","Type")))%>%
mutate(Year=Year+1986)
df.2
df.2<-full_join(df.2,obs2,by=NULL)
#View(df.2)
#View(df.1)
# Draw boxplots to compare
# ==========================
## ---- graphs-catches
df.1<-filter(df.1, Year>1991)
df.2<-filter(df.2, Year>1991)
for(i in 1:4){
#i<-3
if(i==1){ df1<-filter(df.1, Type=="River");df2<-filter(df.2, Type=="River")}
if(i==2){ df1<-filter(df.1, Type=="Coast");df2<-filter(df.2, Type=="Coast")}
if(i==3){ df1<-filter(df.1, Type=="Offshore");df2<-filter(df.2, Type=="Offshore")}
if(i==4){ df1<-filter(df.1, Type=="Total");df2<-filter(df.2, Type=="Total")}
plot<-
ggplot(df2, aes(Year, group=Year))+
theme_bw()+
geom_boxplot(
data=df1,
mapping= aes(ymin = q5, lower = q25, middle = q50, upper = q75, ymax = q95),
stat = "identity",
colour="grey", fill="grey95")+
geom_boxplot(
aes(ymin = q5, lower = q25, middle = q50, upper = q75, ymax = q95),
stat = "identity",fill=rgb(1,1,1,0.1))+
labs(x="Year", y="Catch (in thousands)", title="")+
geom_line(aes(Year,q50))+
geom_line(data=df1,aes(Year,q50),col="grey")+
geom_point(data=df1,aes(Year,obs_catch), col="red")+
geom_point(data=df2,aes(Year,obs_catch), col="blue")+
scale_x_continuous(breaks = scales::pretty_breaks(n = 5))+
facet_grid(Type~.)
#print(plot)
if(i==1){plot1<-plot}
if(i==2){plot2<-plot}
if(i==3){plot3<-plot}
if(i==4){plot4<-plot}
}
#windows()
#par(mfrow=c(3,1))
grid.arrange(plot1, plot2, plot3, plot4, nrow=2, ncol=2)
df1<-filter(df.1, Type=="Trolling")
df2<-filter(df.2, Type=="Trolling")
ggplot(df2, aes(Year, group=Year))+
theme_bw()+
geom_boxplot(
data=df1,
mapping= aes(ymin = q5, lower = q25, middle = q50, upper = q75, ymax = q95),
stat = "identity",
colour="grey", fill="grey95")+
geom_boxplot(
aes(ymin = q5, lower = q25, middle = q50, upper = q75, ymax = q95),
stat = "identity",fill=rgb(1,1,1,0.1))+
labs(x="Year", y="Catch (in thousands)", title="Offshore trolling")+
geom_line(aes(Year,q50))+
# geom_line(data=df1,aes(Year,q50),col="grey")+
# geom_point(data=df1,aes(Year,obs_catch), col="red")+
geom_point(data=df2,aes(Year,obs_catch), col="blue")+
scale_x_continuous(breaks = scales::pretty_breaks(n = 5))
#summary(chains[ ,regexpr("nct",varnames(chains))>0])
par(mfrow=c(2,3))
for(i in 6:(length(Years)-1)){
gd<-gelman.diag(chains[,str_c("nct_ObsTotX[",i,"]")])
#print(gd)
if(gd$psrf[2]>1.2){
#print(c(i, gd$psrf))
traceplot(chainsGR[,str_c("nct_ObsTotX[",i,"]")], main=str_c("nct_ObsTotX ",df.2$Year[i]))
}
# }
}
|
9b0ee86fbbfb02638c9d608e0944d20f03a3c595 | ce0b9d348b82b861090fa3fd9d9cc7a06485b9c9 | /r_exercises/exercise2.R | 059ede80830ed35a0b12e12c162e340d788eb9cb | [] | no_license | HHS-AHRQ/MEPS-workshop | 0573ef4513947375211e0e2a1ef7afe92f5e438b | c69c4471ff1290be9c8cfacf3b74da49edf687f4 | refs/heads/master | 2023-04-14T02:51:26.693511 | 2023-03-29T13:17:07 | 2023-03-29T13:17:07 | 147,705,872 | 25 | 15 | null | null | null | null | UTF-8 | R | false | false | 6,287 | r | exercise2.R | # -----------------------------------------------------------------------------
# This program illustrates how to pool MEPS data files from different years.
# It demonstrates use of the Pooled Variance file (h36u19) to pool data years
# before and after 2019.
#
#
# The program pools 2018, 2019, and 2020 data and calculates:
# - Percentage of people with Bladder Cancer (CABLADDR)
# - Average expenditures per person with Bladder Cancer (TOTEXP, TOTSLF)
#
# Notes:
# - Variables with year-specific names must be renamed before combining files
# (e.g. 'TOTEXP19' and 'TOTEXP20' renamed to 'totexp')
#
# - When pooling data years before and after 2002 or 2019, the Pooled Variance
# file (h36u20) must be used for correct variance estimation
#
#
# Input files:
# - C:/MEPS/h224.dta (2020 Full-year file)
# - C:/MEPS/h216.dta (2019 Full-year file)
# - C:/MEPS/h209.dta (2018 Full-year file)
# - C:/MEPS/h36u20.dta (Pooled Variance Linkage file)
#
# -----------------------------------------------------------------------------
# Install/load packages and set global options --------------------------------
# Can skip this part if already installed
# install.packages("survey") # for survey analysis
# install.packages("foreign") # for loading SAS transport (.ssp) files
# install.packages("haven") # for loading Stata (.dta) files
# install.packages("dplyr") # for data manipulation
# install.packages("devtools") # for loading "MEPS" package from GitHub
#
# devtools::install_github("e-mitchell/meps_r_pkg/MEPS") # easier file import
# Load libraries (run this part each time you re-start R)
library(survey)
library(foreign)
library(haven)
library(dplyr)
library(MEPS)
# Set survey option for lonely PSUs
options(survey.lonely.psu='adjust')
options(survey.adjust.domain.lonely = TRUE)
# Load datasets ---------------------------------------------------------------
# Option 1 - load data files using read_MEPS from the MEPS package
fyc20 = read_MEPS(year = 2020, type = "FYC") # 2020 FYC
fyc19 = read_MEPS(year = 2019, type = "FYC") # 2019 FYC
fyc18 = read_MEPS(year = 2018, type = "FYC") # 2018 FYC
linkage = read_MEPS(type = "Pooled linkage") # Pooled Linkage file
# Option 2 - load Stata data files using read_dta from the haven package
# >> Replace "C:/MEPS" below with the directory you saved the files to.
# fyc20 = read_dta("C:/MEPS/h224.dta") # 2020 FYC
# fyc19 = read_dta("C:/MEPS/h216.dta") # 2019 FYC
# fyc18 = read_dta("C:/MEPS/h209.dta") # 2018 FYC
# >> Note: File name for linkage file will change every year!!
# linkage = read_dta("C:/MEPS/h36u20.dta") # Pooled Linkage file
# View data -------------------------------------------------------------------
# From the documentation:
# - Questions about cancer were asked only of persons aged 18 or older.
# - CANCERDX asks whether person ever diagnosed with cancer
# - If YES, then asked what type (CABLADDR, CABLOOD, CABREAST...)
fyc20 %>% count(CABLADDR)
fyc20 %>%
mutate(AGEgt18 = ifelse(AGELAST >= 18, "18+", "AGE < 18")) %>%
count(AGEgt18, CANCERDX, CABLADDR)
# Create variables ------------------------------------------------------------
# - bladder_cancer = "1 Yes" if CABLADDR = 1
# - bladder_cancer = "2 No" if CABLADDR = 2 or CANCERDX = 2
fyc20x = fyc20 %>%
mutate(bladder_cancer = case_when(
CABLADDR == 1 ~ "1 Yes",
CABLADDR == 2 ~ "2 No",
CANCERDX == 2 ~ "2 No",
TRUE ~ "Missing"))
fyc19x = fyc19 %>%
mutate(bladder_cancer = case_when(
CABLADDR == 1 ~ "1 Yes",
CABLADDR == 2 ~ "2 No",
CANCERDX == 2 ~ "2 No",
TRUE ~ "Missing"))
fyc18x = fyc18 %>%
mutate(bladder_cancer = case_when(
CABLADDR == 1 ~ "1 Yes",
CABLADDR == 2 ~ "2 No",
CANCERDX == 2 ~ "2 No",
TRUE ~ "Missing"))
# QC variables:
fyc20x %>% count(CANCERDX, CABLADDR, bladder_cancer)
fyc19x %>% count(CANCERDX, CABLADDR, bladder_cancer)
fyc18x %>% count(CANCERDX, CABLADDR, bladder_cancer)
# Rename year-specific variables prior to combining ---------------------------
fyc20p = fyc20x %>%
rename(
perwt = PERWT20F,
totslf = TOTSLF20,
totexp = TOTEXP20) %>%
select(
DUPERSID, PANEL, VARSTR, VARPSU, perwt, totslf, totexp, AGELAST,
CANCERDX, CABLADDR, bladder_cancer)
fyc19p = fyc19x %>%
rename(
perwt = PERWT19F,
totslf = TOTSLF19,
totexp = TOTEXP19) %>%
select(
DUPERSID, PANEL, VARSTR, VARPSU, perwt, totslf, totexp, AGELAST,
CANCERDX, CABLADDR, bladder_cancer)
fyc18p = fyc18x %>%
rename(
perwt = PERWT18F,
totslf = TOTSLF18,
totexp = TOTEXP18) %>%
select(
DUPERSID, PANEL, VARSTR, VARPSU, perwt, totslf, totexp, AGELAST,
CANCERDX, CABLADDR, bladder_cancer)
head(fyc20p)
head(fyc19p)
head(fyc18p)
# Stack data and define pooled weight variable ---------------------------------
# - for poolwt, divide perwt by number of years (3):
pool = bind_rows(fyc20p, fyc19p, fyc18p) %>%
mutate(poolwt = perwt / 3)
# Merge the Pooled Linkage Variance file (since pooling before and after 2019 data)
# Notes:
# - DUPERSIDs are recycled, so must join by DUPERSID AND PANEL
# - File name will change every year!! (e.g. 'h36u21' once 2021 data is added)
head(pool)
head(linkage)
linkage_sub = linkage %>%
select(DUPERSID, PANEL, STRA9620, PSU9620)
pool_linked = left_join(pool, linkage_sub, by = c("DUPERSID", "PANEL"))
# QC:
pool %>% count(PANEL)
pool_linked %>% count(PANEL)
# Define the survey design ----------------------------------------------------
# - Use PSU9620 and STRA9620 variables, since pooling before and after 2019
pool_dsgn = svydesign(
id = ~PSU9620,
strata = ~STRA9620,
weights = ~poolwt,
data = pool_linked,
nest = TRUE)
# Calculate survey estimates ---------------------------------------------------
# - Percentage of adults with Bladder Cancer
# - Average expenditures per person, by Joint Pain status (totexp, totslf)
# Percent with bladder cancer
svymean(~bladder_cancer, design = subset(pool_dsgn, bladder_cancer != "Missing"))
# Avg. expenditures per person
svyby(~totslf + totexp, by = ~bladder_cancer, FUN = svymean,
design = subset(pool_dsgn, bladder_cancer != "Missing"))
|
b20c688ae08bfd531380a9641bb7125d7a6a9ba3 | 0b5a37105142146695fa263c6c484459ef922d58 | /man/nveg.Rd | 7840036f015551d4d51cd3974552352fcfa16ed1 | [] | no_license | cran/dave | ce802a9247698b66fefef3e44e9216d9a7d3db02 | fee91f2d2cda746367d8eee613e134b060d081e5 | refs/heads/master | 2021-03-12T23:42:13.149391 | 2017-10-13T20:06:14 | 2017-10-13T20:06:14 | 17,695,397 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,778 | rd | nveg.Rd | \name{nveg}
\alias{nveg}
\docType{data}
\title{
European beach forest data, vegetation
}
\description{
European beach forest data, vegetation. Site factors are in data frame \code{\link{nsit}}.
}
\usage{data(nveg)}
\format{
A data frame with 11 observations on the following 21 species, the variables (0 to 6 scale used).
\describe{
\item{\code{Fagus.silvatica}}{a numeric vector}
\item{\code{Quercus.petraea}}{a numeric vector}
\item{\code{Acer.pseudoplatanus}}{a numeric vector}
\item{\code{Fraxinus.excelsior}}{a numeric vector}
\item{\code{Lonicera.xylosteum}}{a numeric vector}
\item{\code{Sambucus.racemosa}}{a numeric vector}
\item{\code{Sambucus.nigra}}{a numeric vector}
\item{\code{Vaccinium.myrtillus}}{a numeric vector}
\item{\code{Carex.silvatica}}{a numeric vector}
\item{\code{Oxalis.acetosella}}{a numeric vector}
\item{\code{Viola.silvestris}}{a numeric vector}
\item{\code{Luzula.nemorosa}}{a numeric vector}
\item{\code{Veronica.officinalis}}{a numeric vector}
\item{\code{Galium.odoratum}}{a numeric vector}
\item{\code{Lamium.galeobdolon}}{a numeric vector}
\item{\code{Primula.elatior}}{a numeric vector}
\item{\code{Allium.ursinum}}{a numeric vector}
\item{\code{Arum.maculatum}}{a numeric vector}
\item{\code{Ranunculus.ficaria}}{a numeric vector}
\item{\code{Eurhynchium.striatum}}{a numeric vector}
\item{\code{Polytrichum.formosum}}{a numeric vector}
}
}
\details{
Artificial data
}
\source{
Wildi, O. & Orloci, L. 1996. Numerical Exploration
of Community Patterns. 2nd ed. SPB Academic Publishing, The Hague.
}
\references{
Wildi, O. 2017. Data Analysis in Vegetation Ecology. 3rd ed. CABI, Oxfordshire, Boston.
}
\examples{
summary(nveg)
}
\keyword{datasets}
|
2954379ad3d8525019bd3864e385b6a20ac0bbe0 | e03daf38d0e6b8755a28b321bd3f9102c47a409e | /man/PBMDesign.Rd | 084a2d5fb1ac402bdb68e290afa5f16410634fdd | [
"MIT"
] | permissive | pkimes/upbm | 9709b857c5bb9e5b468785d567728094a30eccba | 019977afd48c75534e5ce5e87e9d2fcfe46da53e | refs/heads/master | 2021-03-22T03:02:25.365308 | 2020-10-16T15:23:24 | 2020-10-16T15:23:24 | 118,683,405 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,948 | rd | PBMDesign.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/AllGenerics.R, R/PBMDesign-constructor.R
\name{PBMDesign}
\alias{PBMDesign}
\alias{PBMDesign,data.frame-method}
\alias{PBMDesign,DataFrame-method}
\alias{PBMDesign,PBMExperiment-method}
\title{Create a new PBMDesign object}
\usage{
PBMDesign(object, ...)
\S4method{PBMDesign}{data.frame}(object, ...)
\S4method{PBMDesign}{DataFrame}(object, ...)
\S4method{PBMDesign}{PBMExperiment}(object)
}
\arguments{
\item{object}{a data.frame with each row corresponding to a probe on the array.
Must include `Sequence' and (unique) `probeID' columns, along with any
other metadata for probes, e.g. array `Row' or `Column' spatial coordinates.
Alternatively, a \code{\link[=PBMExperiment-class]{PBMExperiment}} object to
return the associated \code{PBMDesign} object.}
\item{...}{optional probe design parameters to be defined as part of the \code{PBMDesign}
object. See the \code{\link[=PBMDesign-class]{PBMDesign}} class definition for
a list of probe design parameters. Important parameters are as described in
the Details section below.}
}
\value{
\code{PBMDesign} object.
}
\description{
Create a new PBMDesign object of protein binding microarray probe design information.
Alternatively, the function can be called on a PBMExperiment to extract the
probe design information associated with experimental data.
}
\details{
Probe design parameters can be specified by name. The following are a couple important
parameters which are defined by default for universal PBM designs in the \pkg{upbmAux}
package.
\enumerate{
\item \code{probeFilter}: optional named list of probe filters to be used to subset
probes during data analysis steps. List names must correspond to columns in `design'.
List entries must be single-parameter functions to be called on the corresponding column
to return a logical vector of probes to keep (TRUE) and drop (FALSE) during analysis.
\item \code{probeTrim}: optional integer vector of length 2 specifying start and end
positions in probe `Sequence' to use in analysis steps.
}
}
\examples{
## Universal array designs included with the Universal PBM Analysis Suite software
## available at the referenced link can be read in as data frames or tibbles (here
## as an object 'mydesign') and converted to a PBMDesign object.
## The 'probeFilter=' and 'probeTrim=' settings here filter to de Bruijn sequences
## and use only the first 36 bases of each probe sequence for downstream analysis.
\dontrun{
PBMDesign(
object = mydesign,
probeFilter = list(probeID = function(x) { grepl("^dBr", x) }),
probeTrim = c(1, 36)
)
}
}
\references{
\itemize{
\item Berger, M. F., & Bulyk, M. L. (2017). Universal Protein Binding Microarray (PBM) Analysis Suite Homepage. Retrieved October 16, 2020, from \url{http://thebrain.bwh.harvard.edu/PBMAnalysisSuite/indexSep2017.html}
}
}
\seealso{
\code{\link{PBMDesign-class}}
}
\author{
Patrick Kimes
}
|
7d813eca51827614b52b6e95886844f500c14f68 | 67af503e72307e2171b9862b280f5f290f9384d0 | /clean_r_to_submit.R | 7f9261e21bb4a29672599398d9aa1e65701b1a9a | [] | no_license | pozsgaig/search_location | ba476076e26eaf28c35615341636adc7e0acf7ae | 9183a9e5990ac52349c115ef5ae3d40f2989888e | refs/heads/main | 2023-07-17T14:30:05.730418 | 2021-09-09T15:40:03 | 2021-09-09T15:40:03 | 404,402,319 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 13,605 | r | clean_r_to_submit.R | #### Loading necessary packages and functions ####
library(data.table) # for character issues and calculating AADP
library(scales) # for rescaling data
library(olsrr) # for testing heteroscedasticity
library(fitdistrplus) # for checking dsitribution
library(car) # for boxcox transformation
library(yarrr) # for pirate plots
library(BiodiversityR) # quick plotting and vegdist
library(reshape2) # for melt
library(ggplot2) # plotting
library(ggpubr) # plotting
library(sp) #for convex hull areas
library(welchADF) # for Welch ADF
### function replacing NAs with 0
NA20<- function(x)
{
x[is.na(x)]<-0
x}
#### Data loading and data preparation ####
search_hits<-read.table("ecol_search_hits.csv", header = T, quote = "\"")
# Centering data
search_hits_group_means<- data.table(search_hits)
search_hits_group_means[, AAD:=abs(Number_hits-mean(Number_hits)),
by = c("Keyword_complexity", "Search_engine", "Browser", "Cache")]
search_hits_group_means[, meanHits:=mean(Number_hits),
by = c("Keyword_complexity", "Search_engine", "Browser", "Cache")]
search_hits_group_means[, sumHits:=sum(Number_hits),
by = c("Keyword_complexity", "Search_engine", "Browser", "Cache")]
search_hits_group_means[, AADP := abs(Number_hits-mean(Number_hits))/mean(Number_hits),
by = c("Keyword_complexity","Search_engine")]
# non-absolute average deviation
search_hits_group_means[, NAADP := (Number_hits-mean(Number_hits))/mean(Number_hits),
by = c("Keyword_complexity","Search_engine")]
search_hits_group_means[, scaledHits_SD := sd(Number_hits)/mean(Number_hits),
by = c("Keyword_complexity","Search_engine")]
search_hits_group_means[, logscaledHits := log(1+abs(Number_hits-mean(Number_hits))/mean(Number_hits)),
by = c("Keyword_complexity","Search_engine")]
search_hits_group_means$loghits<-log(1+search_hits_group_means$Number_hits)
# The percentage one institution got from the max value of hits per search expressions
search_hits_group_means[, gmedianhits := max(Number_hits),
by = c("Search_engine","Keyword_complexity", "Topic")]
search_hits_group_means$maxperc<-search_hits_group_means$Number_hits/search_hits_group_means$gmedianhits
search_hits_group_means$negAADP<-1-search_hits_group_means$AADP
# add scaled (0,1) AADP (lehetne (0.00000001, 0.99999999) is betareg-hez)
search_hits_group_means$SAADP<-rescale(search_hits_group_means$AADP, to=c(0.000001, 0.999999))
# add grouping factor
search_hits_group_means$gr_fact<-as.factor(paste0(search_hits_group_means$Search_engine,
search_hits_group_means$Keyword_complexity,
search_hits_group_means$Browser,
search_hits_group_means$Cache))
### Preparation for multivariate analysis
ecol_lines<-read.table("ecol_search_lines.csv", header = T, quote = "\"")
ecol_searches<-read.table("ecol_search_twenty.csv", header = T, quote = "\"")
com_mat<-with(ecol_searches,
tapply(First_auth, list(paste("SL", Search_line, Affiliation, Computer, Replica, sep="_"), num_id), length)
)
com_mat<-NA20(com_mat)
com_mat<-as.data.frame(com_mat)
sort(rowSums(com_mat))
sort(colSums(com_mat))
env_mat<-with(ecol_searches,
aggregate(Author, list(Search_line=Search_line, Affiliation = Affiliation, Computer=Computer, Replica=Replica), length)
)
env_mat[,5]<-NULL
nrow(env_mat)
nrow(com_mat)
env_mat<-merge(env_mat, ecol_lines, by= "Search_line")
rownames(env_mat)<-paste("SL", env_mat$Search_line, env_mat$Affiliation, env_mat$Computer, env_mat$Replica, sep="_")
com_mat<-com_mat[rownames(env_mat),]
rownames(env_mat)
rownames(com_mat)
####################################
#### Testing normality and homoscedasticity ####
### Checking distributions with fitdistrplus package
### https://stats.stackexchange.com/questions/132652/how-to-determine-which-distribution-fits-my-data-best
par(ask=T)
with(search_hits_group_means,
by(search_hits_group_means, gr_fact, function(x) descdist(x$AADP, discrete = F))
)
descdist(search_hits_group_means$AADP, discrete = F)
### Testing heteroscedasticity with olsrr package (Breusch Pagan Test)
### https://cran.r-project.org/web/packages/olsrr/vignettes/heteroskedasticity.html
lm1<-lm(AADP ~ Search_engine*(Keyword_complexity+Browser+Cache), data=search_hits_group_means)
ols_test_breusch_pagan(lm1, rhs = TRUE)
with(search_hits_group_means,
by(search_hits_group_means, Keyword_complexity, function(x) {
lm1<-lm(log(0.11+AADP) ~ Search_engine*Browser*Cache, data=x)
m<-ols_test_breusch_pagan(lm1, rhs = TRUE)
return(m)}
)
)
### Trying log transformation
search_hits_group_means[, logscaledHits := log(1+abs(Number_hits-mean(Number_hits))/mean(Number_hits)),
by = c("Keyword_complexity","Search_engine", "Topic", "Browser", "Cache")]
### Arcsin transformation does not work because of the values greater than 1
### Trying square root transformation
search_hits_group_means[, sqrtscaledHits := sqrt(AADP),
by = c("Keyword_complexity","Search_engine", "Topic", "Browser", "Cache")]
### Trying boxcox transformations
# with car package
search_hits_group_means[, boxcoxgHits := boxCoxVariable(AADP+0.0000001),
by = c("Keyword_complexity","Search_engine", "Topic", "Browser", "Cache")]
###################################
#### Welch tests ####
omnibus_LSM <- welchADF.test(search_hits_group_means, response = "negAADP", between.s =
c("Search_engine", "Keyword_complexity", "Browser", "Cache"), contrast = "omnibus")
omnibus_trimmed <- update(omnibus_LSM, trimming = TRUE)
omnibus_trimmed_boot <- update(omnibus_trimmed, bootstrap = TRUE, seed = 12345)
summary(omnibus_LSM)
summary(omnibus_trimmed_boot)
pairwise_trimmed <- welchADF.test(AADP ~ Search_engine, data = temp, effect = "Search_engine",
contrast = "all.pairwise", trimming = TRUE, effect.size = TRUE)
pairwise_trimmed_boot <- update(pairwise_trimmed, bootstrap = TRUE, seed = 12345)
summary(pairwise_trimmed)
##################################
#### Basic stats and graphs ####
# Hit number means
tapply(search_hits$Number_hits, list(search_hits$Keyword_complexity,
search_hits$Search_engine), function(x) sd(x)/mean(x))
tapply(search_hits_group_means$Number_hits,
list(search_hits_group_means$Keyword_complexity,
search_hits_group_means$Search_engine), sd)
### Tables for paper
grouped_hits_mean<-with(search_hits,
tapply(Number_hits, list(paste(Search_engine, Browser, Cache, sep="_"), Keyword_complexity), mean))
grouped_hits_sd<-with(search_hits,
tapply(Number_hits, list(paste(Search_engine, Browser, Cache, sep="_"), Keyword_complexity), sd))
# AADP
tapply(search_hits_group_means_$AADP,
list(search_hits_group_means$Keyword_complexity,
search_hits_group_means$Search_engine), mean)
tapply(search_hits_group_means$AADP,
list(search_hits_group_means$Keyword_complexity,
search_hits_group_means$Search_engine), sd)
### Plotting
par(mfrow=c(1,1), las=1)
pirateplot(logscaledHits~Search_engine, data=search_hits_group_means)
par(mfrow=c(1,1), las=1)
pirateplot(AADP~Search_engine, data=search_hits_group_means)
par(mfrow=c(1,1), las=1)
pirateplot(NAADP~Search_engine, data=search_hits_group_means)
par(mfrow=c(2,2), las=1)
for (i in unique(search_hits_group_means$Topic))
{
for (j in unique(search_hits_group_means$Keyword_complexity))
{
x<- search_hits_group_means[search_hits_group_means$Topic==i & search_hits_group_means$Keyword_complexity==j,]
pirateplot(log(0.001+AADP) ~ Search_engine, data=x, main= paste(i, j))
}
}
### GGPLOT version
ggboxplot(data=search_hits_group_means, x="Search_engine", y="AADP", color="Search_engine",
palette = "jco", add = "jitter", add.params = list(size = 1, alpha = 0.2), lwd=0.7,
facet.by = "Keyword_complexity",
xlab = "Search engine", ylab = "AADP", legend.title="") +
rotate_x_text()
par(mfrow=c(2,2), las=1)
for (i in unique(search_hits_group_means$Topic))
{
for (j in unique(search_hits_group_means$Keyword_complexity))
{
x<- search_hits_group_means[search_hits_group_means$Topic==i & search_hits_group_means$Keyword_complexity==j,]
pirateplot(AADP ~ Search_engine*Browser, data=x, main= paste(i, j))
}
}
pirateplot(AADP ~ Search_engine*Cache, data=search_hits_group_means)
par(mfrow=c(2,1), las=2)
for (n in unique(env_mat$Keyword_complexity))
{
sim_data<-NULL
for (k in unique(env_mat$Search_engine))
{
e_mat<-env_mat[as.character(env_mat$Keyword_complexity) == n & as.character(env_mat$Search_engine) == k,]
c_mat<-com_mat[rownames(com_mat) %in% rownames(e_mat),]
c_mat<-c_mat[rowSums(c_mat)>0,colSums(c_mat)>0]
e_mat<-e_mat[rownames(e_mat) %in% rownames(c_mat),]
c_mat<-c_mat[order(rownames(c_mat)),]
e_mat<-e_mat[order(rownames(e_mat)),]
c_mat <-as.matrix(vegdist(c_mat, "jaccard", binary=F))
sim_df<-subset(melt(c_mat))
sim_df$Search_engine<-rep(k, nrow(sim_df))
sim_data<-rbind(sim_data, sim_df)
}
colnames(sim_data)[3]<-"Similarity"
plottitle<-paste(substring(as.character(e_mat[1,"Keyword_complexity"]),
1, nchar(as.character(e_mat[1,"Keyword_complexity"]))-4))
pirateplot(Similarity~Search_engine,
data=sim_data, main = plottitle,
xlab = "")
}
#################################
#### Multivariate for unique papers ####
### Convex hulls
article_dist<-vegdist(com_mat, distance='jaccard')
Ordination.model1 <- monoMDS(article_dist,
data=env_mat)
Ordination.model1 <- capscale(com_mat ~ Search_engine*(Browser +Cache),
data=env_mat, distance='jaccard', sqrt.dist=F, add=F)
plot1 <- ordiplot(Ordination.model1, choices=c(1,2), main=n)
plot(Ordination.model1)
check.ordiscores(com_mat, Ordination.model1, check.species=T)
summary(Ordination.model1, scaling='sites')
eigenvals(Ordination.model1)
RsquareAdj(Ordination.model1)
deviance(Ordination.model1)
vif.cca(Ordination.model1)
permutest(Ordination.model1, permutations=100)
anova.cca(Ordination.model1, step=100, by='terms')
anova.cca(Ordination.model1, step=100, by='margin')
par(mfrow=c(1,1), cex=1)
plot1 <- ordiplot(Ordination.model1, type='none',choices=c(1,2),
scaling='sites')
attach(env_mat)
summary(ordiellipse(plot1, groups=Search_engine, conf=0.9, kind='sd',
show.groups = env_mat[env_mat$Keyword_complexity=="simple_KWE",]))
par (mfrow=c(2,1))
all_hulls<-NULL
for (n in unique(env_mat$Keyword_complexity))
{
cat(n)
cat("\n")
#for testing
# n="medicine_complex_KWE"
e_mat<-env_mat[env_mat$Keyword_complexity == n,]
nrow(c_mat)
c_mat<-com_mat[rownames(com_mat) %in% rownames(e_mat),]
# c_mat<-c_mat[rowSums(c_mat)>0,colSums(c_mat)>0]
e_mat<-e_mat[rownames(e_mat) %in% rownames(c_mat),]
c_mat<-c_mat[order(rownames(c_mat)),]
e_mat<-e_mat[order(rownames(e_mat)),]
c_mat <- as.matrix(vegdist(c_mat, "jaccard", binary=T))
melted_c_mat <- melt(c_mat)
ggplot(data = melted_c_mat, aes(x=Var1, y=Var2, fill=value)) +
geom_tile()
Ordination.model1 <- capscale(c_mat ~ Search_engine*(Browser +Cache), data=e_mat)
Ordination.model1$points<-rescale(Ordination.model1$points, to=c(1, 100))
plot1 <- ordiplot(Ordination.model1, type='none',choices=c(1,2), main=n)
env_hull<-with(e_mat, ordihull(plot1, groups=Search_engine), col=1:4)
# env_hull<-lapply(env_hull, FUN= function(x) rescale(x, to=c(1, 100)))
adonis_result<-with(e_mat,
adonis2(c_mat ~ Search_engine*Browser*Cache, data=e_mat, method='jaccard',
permutations=100)
)
hullsizes<-sapply(names(env_hull), function(x) {chull.poly <- Polygon(env_hull[x], hole=F)
chull.area <- chull.poly@area
return(chull.area)})
hullsizes<-as.data.frame(t(hullsizes))
rownames(hullsizes)<-n
all_hulls<-rbind(all_hulls, hullsizes)
print(hullsizes)
cat("\n")
print(adonis_result)
cat("\n")
cat("******************")
}
|
546d550b85fba9d26cc91baca75ccaa9c9896fa0 | 6a477dfdb76af585f1760767053cabf724a341ce | /man/combine_plots.Rd | 842b58c71be45cdf88523d298b266b4adb6e8052 | [] | no_license | gmlang/ezplot | ba94bedae118e0f4ae7448e6dd11b3ec20a40ab4 | 9c48771a22d3f884d042a6185939765ae534cb84 | refs/heads/master | 2022-09-20T11:43:50.578034 | 2022-09-16T02:05:24 | 2022-09-16T02:05:24 | 34,095,132 | 6 | 2 | null | null | null | null | UTF-8 | R | false | true | 1,329 | rd | combine_plots.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/combine_plots.R
\name{combine_plots}
\alias{combine_plots}
\title{Combines several ggplot objects (plots) into one figure.}
\usage{
combine_plots(
...,
align = "v",
ncol = 2,
labels = "auto",
label_size = 10,
title = "Grand Title",
title_size = 12
)
}
\arguments{
\item{...}{Several ggplot objects. Pass in no more than 6 to avoid crowdness.}
\item{align}{Specifies whether the plots should be horizontally ("h") or
vertically ("v") aligned. Options are "none", "h", "v" (default), and "hv"
(align in both directions).}
\item{ncol}{Number of columns in the plot grid. Default = 2.}
\item{labels}{List of labels to be added to the plots. You can also set
\code{labels="auto"} (default) to auto-generate lower-case labels (a. b. c. d. ...),
\code{labels="AUTO"} for upper-case labels (A. B. C. D. ...), or
\code{labels='autonum'} for integer labels (1. 2. 3. 4. ...).}
}
\value{
A ggplot object that shows multiple graphs in one figure.
}
\description{
Sometimes we want to put several (ggplot) plots together in one figure for
publication. \code{combine_plots} does exactly that, allowing additional
parameters to control the alignment and index labels of the individual plots,
and etc.
}
\examples{
inst/examples/ex-combine_plots.R
}
|
b37e3450ce18dea57b1778175c68eae37ed44dbd | 4f00cefef94157300a498ecaa4ee354df1a2615f | /binomial/man/bin_skewness.Rd | 6a812564f4277197261a36b97cb1015680173406 | [] | no_license | stat133-sp19/hw-stat133-feng1128 | 288b2101de9338b6384e3c23a084f6531affd0e3 | 9412f3a4ca07a9e1448cd6ab38580c71d4a0ed82 | refs/heads/master | 2020-04-28T19:02:29.233673 | 2019-05-03T20:10:38 | 2019-05-03T20:10:38 | 175,498,541 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 528 | rd | bin_skewness.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/function.R
\name{bin_skewness}
\alias{bin_skewness}
\title{bin_skewness}
\usage{
bin_skewness(trials, prob)
}
\arguments{
\item{trials}{the number of trials}
\item{prob}{the probability of success on each trial}
}
\value{
the skewness of a random binomial variable
}
\description{
calculate the skewness of a random binomial variable
}
\examples{
# the skewness of 5 trials with the probability of success in each trial = 0.5
bin_skewness(5, 0.5)
}
|
a2791034d6854347a2adc7f01861d088372caad2 | 80f6a2f941ce640a5a3281c0203a5bebc631143b | /R/publish.R | d758dc4f1f0834bc3254538dc9ec61220c145767 | [] | no_license | Yixf-Self/bookdown | d9d0237d41c8ef3a3e3ac7a32723352b8e840b24 | c9b8fee5e345c06b9ad748d7cc69244e66fb25ff | refs/heads/master | 2021-01-19T11:53:03.519825 | 2016-03-27T05:18:06 | 2016-03-27T05:18:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,554 | r | publish.R | #' Publish a book to the web
#'
#' Publish a book to the web. Note that you should be sure to render all
#' versions of the book before publishing, unless you have specified
#' \code{render = TRUE}.
#'
#' @param name Name of the book (this will be used in the URL path of the
#' published book). Defaults to the \code{book_filename} in
#' \code{_bookdown.yml} if not specified.
#' @param account Account name to publish to. Will default to any previously
#' published to account or any single account already associated with
#' \code{server}.
#' @param server Server to publish to (by default beta.rstudioconnect.com but
#' any RStudio Connect server can be published to).
#' @param sourceCode Should the book's source code be included in the
#' upload?
#' @param render \code{TRUE} to render all formats prior to publishing (defaults
#' to \code{FALSE}, however, this can be modified via the
#' \code{bookdown.render_on_publish} option). Note that this requires the use
#' of either an R script \file{_render.R} (or a \file{Makefile} if
#' \file{_render.R} is not found) to provide the implementaiton of rendering
#' all formats that you want to publish. If neither \code{_render.R} nor
#' \code{Makefile} exists, it falls back to \code{render_book()}.
#'
#' @export
publish_book = function(
name = NULL, account = NULL, server = NULL, sourceCode = FALSE,
render = getOption('bookdown.render_on_publish', FALSE)
) {
# if there are no RS Connect accounts setup on this machine
# then offer to add one for bookdown.org
accounts <- rsconnect::accounts()
accounts <- subset(accounts, server != "shinyapps.io")
if (nrow(accounts) == 0) {
# add the server if we need to
servers = rsconnect::servers()
if (nrow(subset(servers, name == 'bookdown.org')) == 0)
rsconnect::addServer("https://bookdown.org/__api__", 'bookdown.org')
# see if they want to configure an account (bail if they don't)
message('You do not currently have a bookdown.org publishing account ',
'configured on this system.')
result = readline('Would you like to configure one now? [Y/n]: ')
if (tolower(result) == 'n') return(invisible())
# configure the account
rsconnect::connectUser(server = 'bookdown.org')
}
# deploy the book
rsconnect::deploySite(siteDir = getwd(),
siteName = name,
account = account,
server = server,
sourceCode = sourceCode,
render = render)
}
|
b5b9100ef7bcc82182a51027b20377e45b4534d5 | 719dcbe5e619a1b6d08db0ccda1f1a788b161b6c | /R/6_fig4bFlippedBoxplots.R | 18a08fa26a30e5b1cdace2a032dc2cb237a4021a | [
"MIT"
] | permissive | blmoore/3dgenome | f19d3b1ba0e8d2f4a25b83b81ad45537d5e67fbe | 1fa7523bd512cb0d853ea90f3c566a08aee01097 | refs/heads/master | 2020-12-25T19:14:46.458280 | 2015-06-29T16:14:02 | 2015-06-29T16:14:02 | 16,910,905 | 6 | 0 | null | null | null | null | UTF-8 | R | false | false | 17,435 | r | 6_fig4bFlippedBoxplots.R | ######### Cell-type specific / shared enrichments ###########
# Look at chromHMM / SegWay combined annotations in flipped #
# open, flipped closed compartments and test for enrichment #
# or depletion. For some states compare regions that are #
# shared between cell types and those that are specific to #
# one (cell type-specfic vs. shared; e.g. enhancers). #
#############################################################
# notes = {
# runtime: 5-10 mins,
# external deps: [bedtools, chromhmm+segway files]
# };
library("data.table")
library("dplyr")
library("GenomicRanges")
library("ggplot2")
library("gridExtra")
library("blmR")
# To run this script you must download the following files:
# * chromhmm.segway.gm12878.comb11.concord4.bed
# * chromhmm.segway.h1hesc.comb11.concord4.bed
# * chromhmm.segway.k562.comb11.concord4.bed
# and place them under bedfiles/
# These are the ENCODE combined chromatin state predictions
# for ChromHMM + Segway, from the Jan 2011 data freeze. At
# the time of writing, these are a default track in UCSC genome
# browser, and can also be downloaded from:
# http://ebi.edu.au/ftp/software/software/ensembl/encode/integration_data_jan2011/byDataType/segmentations/jan2011/Combined_7_state/
if(!file.exists("data/bedfiles/chromhmm.segway.gm12878.comb11.concord4.bed"))
stop("No chromatin state file found at:
\tdata/bedfiles/chromhmm.segway.gm12878.comb11.concord4.bed \nCannot run.")
g.dat <- readRDS("data/rds/Gm12878_35Vars.rds")
h.dat <- readRDS("data/rds/H1hesc_35Vars.rds")
k.dat <- readRDS("data/rds/K562_35Vars.rds")
## 1) Calculate intersections between flipped regions
## and chromHMM / SegWay chromatin states
eigs <- data.frame(g=g.dat$eigen, h=h.dat$eigen, k=k.dat$eigen)
rownames(eigs) <- rownames(g.dat)
eigs <- transform(eigs, g.state = callStates(g)[,2],
h.state = callStates(h)[,2],
k.state = callStates(k)[,2])
eigs$sum <- rowSums(eigs[,4:6])
eigs$flip <- NA
eigs$flip <- with(eigs, {
# if all 2 or all 1
ifelse(sum == 6 | sum == 3, "none",
ifelse(h.state == 1 & sum == 5, "h.closed",
ifelse(g.state == 1 & sum == 5, "g.closed",
ifelse(k.state == 1 & sum ==5, "k.closed",
ifelse(h.state == 2 & sum == 4, "h.open",
ifelse(g.state == 2 & sum == 4, "g.open",
ifelse(k.state == 2 & sum == 4, "k.open", "NA")))))))
})
# Optional: look at distribution of open, closed:
# pie(table(eigs$flip), col=brewer.pal(7, "Reds"))
writeBed <- function(flip=c("h.open", "h.closed",
"g.open", "g.closed",
"k.open", "k.closed",
"none")){
options(scipen=99)
rns <- rownames(eigs[eigs$flip == flip,])
df <- data.frame(chr = gsub("-.*", "", rns),
start = gsub(".*-", "", rns),
end = as.numeric(gsub(".*-", "", rns)) + 1e6,
id = paste0(flip, ".", seq_along(rns)))
write.table(df, file=paste0("data/bedfiles/", flip, ".bed"),
quote=F, row.names=F, col.names=F, sep="\t")
}
### As-is, none.bed:
# h g k
# 1 2 1 <- g.open
# 2 2 2 <- none
# 1 2 2 <- h.closed
# 2 1 2
# write all types of flip (+none) to bed files for intersection
writeBed <- Vectorize(writeBed)
writeBed(unique(eigs$flip))
fnames <- c(paste0(c("h", "g", "k"), rep(c(".open.bed", ".closed.bed"), 3)), "none.bed")
for(f in fnames){
state.file <- ifelse(substr(f, 1, 1) == "h",
"chromhmm.segway.h1hesc.comb11.concord4.bed",
ifelse(substr(f, 1, 1) == "g",
"chromhmm.segway.gm12878.comb11.concord4.bed",
"chromhmm.segway.k562.comb11.concord4.bed"))
cmd <- paste0("bedtools intersect -a data/bedfiles/", f, " -b data/bedfiles/",
state.file, " -wao > data/text/", f, "_isect.out")
cat("Running: ", cmd, "\n")
res <- system(cmd, intern=F)
}
### END 1 ###
## 2) Aggregate and analyse intersection results (no cell type specific / shared distinction)
ctFeats <- function(ct=c("h", "g", "k")){
hoi <- read.table(paste0("data/text/", ct, ".open.bed_isect.out"), stringsAsFactors=F)
hci <- read.table(paste0("data/text/", ct, ".closed.bed_isect.out"), stringsAsFactors=F)
none <- read.table("data/text/none.bed_isect.out", stringsAsFactors=F)
cnames <- c("chr", "start", "end", "id",
"chr.b", "start.b", "end.b", "feature",
"1k", "dot", "start.o", "end.o", "col", "overlap")
colnames(hoi) <- cnames
colnames(hci) <- cnames
colnames(none) <- cnames
hoi$flip <- "open"
hci$flip <- "closed"
none$flip <- "none"
hi <- rbind(hoi, hci, none)
perFeat <- group_by(hi, flip, id, feature) %>% summarise(olap = sum(overlap))
perFeat <- subset(perFeat, feature != ".")
perFeat$ct <- ct
return(perFeat)
}
hf1 <- ctFeats("h")
gf1 <- ctFeats("g")
kf1 <- ctFeats("k")
f1 <- rbind(hf1, gf1, kf1)
## 3) Build subset of annotations that are preserved in all cell types (?)
class.vec <- c("character", "numeric", "numeric", "character", "numeric", "character",
"numeric", "numeric", "character")
chrom.gm <- read.table("data/bedfiles/chromhmm.segway.gm12878.comb11.concord4.bed",
stringsAsFactors=F, colClasses=class.vec)
chrom.h1 <- read.table("data/bedfiles/chromhmm.segway.h1hesc.comb11.concord4.bed",
stringsAsFactor=F, colClasses=class.vec)
chrom.k5 <- read.table("data/bedfiles/chromhmm.segway.k562.comb11.concord4.bed",
stringsAsFactors=F, colClasses=class.vec)
c.gm <- with(chrom.gm, GRanges(V1, IRanges(start=V2, end=V3), "+", feat=V4))
c.h1 <- with(chrom.h1, GRanges(V1, IRanges(start=V2, end=V3), "+", feat=V4))
c.k5 <- with(chrom.k5, GRanges(V1, IRanges(start=V2, end=V3), "+", feat=V4))
featSubset <- function(f=unique(c.k5$feat), c1=c("g", "h", "k")){
# f = "E"
# c1 = "h"
f1 <- if(c1 == "g") c.gm else if(c1 == "h") c.h1 else c.k5
f2 <- if(c1 == "g") c.h1 else if(c1 == "h") c.k5 else c.gm
f3 <- if(c1 == "g") c.k5 else if(c1 == "h") c.gm else c.h1
f1 <- subset(f1, feat == f)
f2 <- subset(f2, feat == f)
f3 <- subset(f3, feat == f)
## Find overlapping regions:
f4 <- subsetByOverlaps(f1, f2)
f4.2 <- subsetByOverlaps(f1, f3)
f5 <- subsetByOverlaps(f4, f3)
cat("%", f, "shared in all cell types", 100* length(f5) / length(f1), "\n")
# unique ID for each element
id.assign <- function(df) return(as.vector(with(df, paste(seqnames, start, feat, sep="."))))
# Get elements in all cell types
shared <- as.data.frame(f5)
shared$id <- id.assign(shared)
# Get those in any two (single shared)
single <- rbind(as.data.frame(f4), as.data.frame(f4.2))
single$id <- id.assign(single)
#cat(length(shared$id))
orig <- as.data.frame(f1)
orig$id <- id.assign(orig)
orig$status <- ifelse(!orig$id %in% single$id, "in:one",
ifelse(orig$id %in% shared$id, "in:all", "in:some"))
return(orig)
}
gm.feats <- sapply(unique(c.k5$feat), featSubset, c1="g", simplify=F)
gm.feats <- do.call(rbind, gm.feats)
# unused:
par(mfrow=c(4,4), mar=c(2,4,3,1.5))
options(scipen=99)
pie.dens <- function(feat){
pie(table(gm.feats[gm.feats$feat == feat,]$status), main=feat)
plot(ecdf(gm.feats[gm.feats$feat == feat,]$width/1000), main=feat,
verticals=T)
}
sapply(unique(gm.feats$feat), pie.dens)
dev.off()
h1.feats <- sapply(unique(c.k5$feat), featSubset, c1="h", simplify=F)
h1.feats <- do.call(rbind, h1.feats)
k5.feats <- sapply(unique(c.k5$feat), featSubset, c1="k", simplify=F)
k5.feats <- do.call(rbind, k5.feats)
## then write each of these three types to seperate files for
## bedtools intersect (i.e. step 1)
splitWrite <- function(feats, ct=c("g", "h", "k")){
options(scipen=99)
write.table(subset(feats, status == "in:all")[,c(1:3, 6)],
file = paste0("data/bedfiles/", ct, "_shared_chromstates.bed"),
row.names=F, col.names=F, quote=F, sep="\t")
write.table(subset(feats, status == "in:one")[,c(1:3, 6)],
file = paste0("data/bedfiles/", ct, "_celltypespecific_chromstates.bed"),
row.names=F, col.names=F, quote=F, sep="\t")
write.table(subset(feats, status == "in:some")[,c(1:3, 6)],
file = paste0("data/bedfiles/", ct, "_partshared_chromstates.bed"),
row.names=F, col.names=F, quote=F, sep="\t")
write.table(subset(feats, status %in% c("in:all", "in:some"))[,c(1:3, 6)],
file = paste0("data/bedfiles/", ct, "_shared2plus_chromstates.bed"),
row.names=F, col.names=F, quote=F, sep="\t")
}
splitWrite(gm.feats, ct="g")
splitWrite(h1.feats, ct="h")
splitWrite(k5.feats, ct="k")
for(f in fnames[-length(fnames)]){
ct <- substr(f, 1, 1)
flip <- ifelse(grepl("closed", f), "closed",
ifelse(grepl("open", f), "open", "none"))
cmd1 <- paste0("bedtools intersect -a data/bedfiles/", f, " -b data/bedfiles/",
ct, "_celltypespecific_chromstates.bed -wao > data/bedfiles/",
f, "_cts.out")
## This command treats "shared" as present in ALL THREE only:
# cmd2 <- paste0("bedtools intersect -a bedfiles/", f, " -b bedfiles/",
# ct, "_shared_chromstates.bed -wao > bedfiles/",
# f, "_shared.out")
## This one considers "shared" as anything not cell type specific:
cmd2 <- paste0("bedtools intersect -a data/bedfiles/", f, " -b data/bedfiles/",
ct, "_shared2plus_chromstates.bed -wao > data/bedfiles/",
f, "_shared.out")
cat("Running: ", cmd1, "\n\n")
system(cmd1, intern=F)
cat("Running: ", cmd2, "\n\n")
system(cmd2, intern=F)
}
## process the blocks with no flip for comparison:
for(ct in c("h", "g", "k")){
## Same as above, rm "2plus" for shared == ALL 3 cell type overlap
cmd3 <- paste0("bedtools intersect -a data/bedfiles/none.bed -b data/bedfiles/",
ct, "_shared2plus_chromstates.bed -wao > data/bedfiles/",
ct, ".none_shared.out")
cmd4 <- paste0("bedtools intersect -a data/bedfiles/none.bed -b data/bedfiles/",
ct, "_celltypespecific_chromstates.bed -wao > data/bedfiles/",
ct, ".none_cts.out")
cat("Running: ", cmd3, "\n\n")
system(cmd3, intern=F)
cat("Running: ", cmd4, "\n\n")
system(cmd4, intern=F)
}
## 4) Read results and analyse, as per #2
ctFeats.p2 <- function(ct=c("h", "g", "k"), overlap=TRUE){
## Debugging:
# ct = "h"
# overlap = FALSE
cnames <- c("chr", "start", "end", "id",
"chr.b", "start.b", "end.b", "feature", "overlap")
read.feat <- function(file){
f <- read.table(paste0("data/bedfiles/", file), stringsAsFactors=F, col.names=cnames)
f$flip <- ifelse(grepl("open", file), "open",
ifelse(grepl("closed", file), "closed", "none"))
f$type <- ifelse(grepl("shared", file), "shared",
ifelse(grepl("cts", file), "cts", "ERROR"))
return(f)
}
oc <- read.feat(paste0(ct, ".open.bed_cts.out"))
cc <- read.feat(paste0(ct, ".closed.bed_cts.out"))
os <- read.feat(paste0(ct, ".open.bed_shared.out"))
cs <- read.feat(paste0(ct, ".closed.bed_shared.out"))
ns <- read.feat(paste0(ct, ".none_shared.out"))
nc <- read.feat(paste0(ct, ".none_cts.out"))
i <- rbind(oc, cc, os, cs, ns, nc)
perFeat <- if(overlap == T){
## sum base pairs
group_by(i, type, flip, id, feature) %.% dplyr::summarise(olap = sum(overlap))
} else {
## count number
group_by(i, type, flip, id, feature) %.% dplyr::summarise(olap = n())
}
## nb do this after summarising
perFeat <- subset(perFeat, feature != ".")
perFeat$feature <- factor(perFeat$feature)
perFeat$id <- factor(perFeat$id)
all.combs <- expand.grid(levels(perFeat$id), unique(perFeat$type), levels(perFeat$feature))
pfdt <- data.table(perFeat)
setkey(pfdt, id)
all.combs <- data.table(all.combs)
# nb id is tied to flip
setnames(all.combs, 1:3, c("id", "type", "feature"))
setkey(all.combs, id)
all.combs$flip <- ifelse(grepl("open", all.combs$id), "open",
ifelse(grepl("closed", all.combs$id), "closed", "none"))
m <- merge(all.combs, pfdt, by=c("id", "type", "flip", "feature"), all.x=T, allow.cartesian=T)
m$olap[is.na(m$olap)] <- 0
perFeat <- as.data.frame(m)
# "." == NULL feature, i.e. no overlaps, instead add a zero for each feature?
perFeat$ct <- ct
return(perFeat)
}
pF.g <- ctFeats.p2("g", overlap=F)
pF.k <- ctFeats.p2("k", overlap=F)
pF.h <- ctFeats.p2("h", overlap=F)
pF <- rbind(pF.g, pF.h, pF.k)
pF$ct <- ifelse(pF$ct == "h", "H1 hESC",
ifelse(pF$ct == "g", "GM12878", "K562"))
stopifnot(length(unique(pF$type)) == 2)
pF$type <- ifelse(pF$type == "cts", "Cell type specific", "Shared")
saveRDS(pF, "data/rds/chromFeaturesFlipped.rds")
options(scipen=99)
pd <- position_dodge(width=.9)
rt <- subset(pF, feature %in% c("R", "T") & type == "Cell type specific")
rt.counts <- group_by(rt, ct, flip, feature) %.% summarise(top=max(olap)*1.05, count=paste0("n = ", n()))
pf.i <- subset(pF, feature %in% c("R", "T") & type == "Cell type specific")
group_by(pf.i, feature, flip, feature) %.% summarise(p=wilcox.test(olap)$p.value)
f4.plot <- function(chrom.state){
epf <- subset(pF, feature == chrom.state & type == "Cell type specific")
epf.shared <- subset(pF, feature == chrom.state & type == "Shared")
e.counts <- group_by(epf, ct, flip, feature) %.% summarise(top=max(olap)*.7, count=paste0("n = ", n()))
grid.arrange(
ggplot(epf, aes(x=flip, y=olap, fill=flip,
group=interaction(flip,type,feature))) +
scale_fill_brewer(palette="Blues") + theme_bw() +
coord_cartesian(ylim = quantile(epf$olap, c(0, 0.95))) +
geom_boxplot(width=.8, notch=T, outlier.size=0, position=pd,
colour="black") +
stat_summary(fun.y=mean, geom="point", position=pd,
colour="grey40", shape=18, size=3) +
facet_grid(type~ct,shrink=T) +
labs(list(y="Number of annotated features per Mb", x="",
fill="Flipped state")) #+ theme(legend.position="none")
,
ggplot(epf.shared, aes(x=flip, y=olap, fill=flip,
group=interaction(flip,type,feature))) +
scale_fill_brewer(palette="Blues") + theme_bw() +
coord_cartesian(ylim = quantile(epf.shared$olap, c(0, 0.95))) +
geom_boxplot(width=.8, notch=T, outlier.size=0, position=pd, colour="black") +
stat_summary(fun.y=mean, geom="point", position=pd,
colour="grey40", shape=18, size=3) +
facet_grid(type~ct, shrink=T) +
labs(list(y="Number of annotated features per Mb", x="",
fill="Flipped state")) #+ theme(legend.position="none")
, ncol=1)
}
# Figure 4b, flipped open compartments enriched for enhancers
pdf("figures/f4b_enhancerEnrichFlipped.pdf", 7, 5.5)
f4.plot("E")
dev.off()
pdf("figures/suppl/s6_transcribedFlipped.pdf", 7, 5.5)
f4.plot("T")
dev.off()
# others not used in manuscript:
#f4.plot("WE")
#f4.plot("CTCF")
#f4.plot("TSS")
#f4.plot("R")
#f4.plot("PF")
# Open/closed vs. None wilcox tests
cat("Cell type\tFlip\tType\tSignif\tMedian(flipped)\tMedian(none)\n")
for(c in c("GM12878", "H1 hESC", "K562")){
for(f in c("open", "closed")){
for(t in c("Cell type specific", "Shared")){
res <- with(pF, wilcox.test(pF[feature == "E" & flip == f & ct == c & type == t, "olap"],
pF[feature == "E" & flip == "none" & ct == c & type == t, "olap"]))
cat(c, "\t", f, "\t", t, "\t", res$p.value, "\t",
with(pF, median(pF[feature == "E" & flip == f & ct == c & type == t, "olap"])),
with(pF, median(pF[feature == "E" & flip == "none" & ct == c & type == t, "olap"])),
"\n")
}
}
}
## Open vs. Closed wilcox tests
cat("Cell type\tFlip\tType\tSignif\tMedian(flipped)\tMedian(none)\n")
for(c in c("GM12878", "H1 hESC", "K562")){
for(t in c("Cell type specific", "Shared")){
res <- with(pF, wilcox.test(pF[feature == "E" & flip == "open" & ct == c & type == t, "olap"],
pF[feature == "E" & flip == "closed" & ct == c & type == t, "olap"]))
cat(c, "\t", f, "\t", t, "\t", res$p.value, "\t",
with(pF, median(pF[feature == "E" & flip == f & ct == c & type == t, "olap"])),
with(pF, median(pF[feature == "E" & flip == "none" & ct == c & type == t, "olap"])),
"\n")
}
}
## Supplementary figure: all tests:
pdf("figures/suppl/s7_allBeans.pdf", 9, 12)
ggplot(pF, aes(x=flip, y=olap, fill=flip,
group=interaction(flip,type,feature,ct), ymax=0)) +
scale_fill_brewer(palette="Blues") +
geom_violin(scale="width", position=pd) +
geom_boxplot(width=.1, outlier.size=0, position=pd, fill="black") +
stat_summary(fun.y=median, geom="point", position=pd,
fill="white", shape=21, size=3) +
facet_grid(feature~ct+type, scales="free_y") + theme_bw() +
labs(list(y="Annotation coverage per Mb (kb)", x="",
fill="Flipped state")) + theme(legend.position="none")
dev.off()
########################################################################## |
279c568aad63308b1f9e0650f173ec5ff86fc8cd | 081c62f36f7703d7987218c1c22931e083198e73 | /myelo/inst/doc/papers/ELT/zNchemoG.R | 6d4e738c490596bcc9a22e777f92d0803bdbc864 | [] | no_license | radivot/myelo | be7ed23a6d1772e55310ced91270aa1d09da6735 | 2498bed404c98f096fcda4075c34a2881265e24b | refs/heads/master | 2022-12-15T00:11:22.751773 | 2022-12-04T14:24:36 | 2022-12-04T14:24:36 | 6,070,078 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,130 | r | zNchemoG.R | ############################ ZHUGE12 CHEMO+GCSF ##################
# Holding the chemo treatment cycle T fixed at 23 days (at resonance), change the time to
# GCSF in days after chemo last began. Compare T1 = 1 and 10 days
library(tidyverse)
library(deSolve)
library(myelo)
zhugePars["T"]=23
zhugePars["T1"]=10 # later do 1 day
zhugePars["Nss"]=639805537 #SS found in readme.md
Tf=200
mkEventsCG=function(zhugePars,Tf) {
(Tc1=seq(0,Tf,zhugePars["T"]))
(Tg1=seq(zhugePars["T1"],Tf,zhugePars["T"])) #1 day after start of chemo
# (Tg1=seq(zhugePars["T1"]+1,Tf,zhugePars["T"])) #1 day after end of chemo
Nc1=length(Tc1)
Ng1=length(Tg1)
(Tc2=seq(1,Tf,zhugePars["T"]))
(Tg2=seq(zhugePars["T1"]+1,Tf,zhugePars["T"])) #1 day after start of chemo
# (Tg2=seq(zhugePars["T1"]+2,Tf,zhugePars["T"])) #1 day after end of chemo
Nc2=length(Tc2)
Ng2=length(Tg2)
(etacB <- data.frame(var = rep("eta",Nc1),
time = Tc1 ,
value = rep(zhugePars["etaMinNP"],Nc1),
method = rep("rep",Nc1)))
(etacE <- data.frame(var = rep("eta",Nc2),
time = Tc2 ,
value = rep(zhugePars["etaNP"],Nc2),
method = rep("rep",Nc2)))
(etagB <- data.frame(var = rep("eta",Ng1),
time = Tg1 ,
value = rep(zhugePars["etaMaxNP"],Ng1),
method = rep("rep",Ng1)))
(etagE <- data.frame(var = rep("eta",Ng2),
time = Tg2 ,
value = rep(zhugePars["etaNP"],Ng2),
method = rep("rep",Ng2)))
(gamB <- data.frame(var = rep("gam",Ng1),
time = Tg1 ,
value = rep(zhugePars["gamMin0"],Ng1),
method = rep("rep",Ng1)))
(gamE <- data.frame(var = rep("gam",Ng2),
time = Tg2 ,
value = rep(zhugePars["gam0"],Ng2),
method = rep("rep",Ng2)))
(tauB <- data.frame(var = rep("tau",Ng1),
time = Tg1 ,
value = rep(zhugePars["tauNMgcsf"],Ng1),
method = rep("rep",Ng1)))
(tauE <- data.frame(var = rep("tau",Ng2),
time = Tg2 ,
value = rep(zhugePars["tauNM"],Ng2),
method = rep("rep",Ng2)))
# bind_rows(etaB,etaE,gamB,gamE,tauB,tauE)%>%arrange(time)
rbind(etacB,etacE,etagB,etagE,gamB,gamE,tauB,tauE)%>%arrange(time) #no warnings
}
eventDF=mkEventsCG(zhugePars,Tf)
sapply(eventDF,class)
head(eventDF,10)
zhuge12NchemoG<-function(Time, State, Pars) { # model with stem cells Q treated as constant
with(as.list(c(State, Pars)), {
deta=0 #this is etaNP as a state
dEta=eta
dgam=0 #this is gam0 as a state
dGam=gam
dtau=0 # this is tauNM as a state. Go from 6 days of maturation to 2 while on Gcsf
tauN=tauNP+tau #total tauN changes with maturation time changes
if (Time < 0) {
An=exp(etaNP*tauNP-gam0*tauNM) # no gcsf or chemo perturbations for negative times
dN=-gamN*N + An*f0/(1+(Nss/the1)^s1)*Qss
}
else{# quotients and remaninders: 21.4%/%10 21.4%%10
# if (Time%%T < 1) dEta=etaMinNP # in chemo period
# if ( (Time%%T -T1 > 0)&(Time%%T -T1 < 1) ) { # in G-CSF exposure period
# dEta=etaMaxNP
# dGam=gamMin0
# }
delEta=lagvalue(Time - tau,3)-lagvalue(Time - tauN,3)
delGam=Gam -lagvalue(Time - tau,5)
An=exp(delEta - delGam)
dN=-gamN*N + An*f0/(1+(lagvalue(Time - tauN)[1]/the1)^s1)*Qss
}
list(c(dN,deta,dEta,dgam,dGam,dtau))
})
}
times= seq(-zhugePars[["tauN"]],Tf,by=0.01)
yout=dede(c(N=zhugePars[["Nss"]],eta=zhugePars[["etaNP"]], Eta=0,
gam=zhugePars[["gam0"]], Gam=0,
tau=zhugePars[["tauNM"]] ),
times=times,func=zhuge12NchemoG,
parms=zhugePars,events=list(data=mkEventsCG(zhugePars,Tf)),method="lsodar")
D10=data.frame(yout)
zhugePars["T1"]=1
yout=dede(c(N=zhugePars[["Nss"]],eta=zhugePars[["etaNP"]], Eta=0,
gam=zhugePars[["gam0"]], Gam=0,
tau=zhugePars[["tauNM"]] ),
times=times,func=zhuge12NchemoG,
parms=zhugePars,events=list(data=mkEventsCG(zhugePars,Tf)),method="lsodar")
D1=data.frame(yout)
D1$Tg="1 Day"
D10$Tg="10 Days"
D=bind_rows(D1,D10)%>%mutate(N=N/1e8)
tc=function(sz) theme_classic(base_size=sz)
gy=ylab("Neutrophil Counts")
gx=xlab("Days")
sy=scale_y_log10()
ltp=theme(legend.position="top")
# cc=coord_cartesian(ylim=c(1e6,1e12))
cc=coord_cartesian(ylim=c(1e-2,1e4))
gh=geom_hline(yintercept=0.63)
D%>%ggplot(aes(x=time,y=N,col=Tg))+geom_line(size=1)+gx+gy+tc(14)+sy+ltp+cc+gh
ggsave("~/Results/myelo/zhugeNchemoGeventsfig3B.png",height=6,width=6.5)
# ggsave("~/Results/myelo/zhugeNchemoGeventsfig3Bplus1.png",height=6,width=6.5)
# two prongs of peaks with T1=10 match paper, as do troughs (as without events [in pdf])
# T1=1 does not match paper, neither counting 1 from beginning or end (plus1) of chemo interval
|
e464e9059ae3d3ad6c50897183b2a6534c7b986c | 18127f37d3e7eecce292311938b886c866113941 | /man/migrate_cdf_to_2015_std.Rd | 5cb019772960acf8c598c8aa0274279bae0c182f | [] | no_license | charkins24/mapvizieR | 637460ae72b5b03163d8c6e9fa5a1ba2fb2a4cc9 | c2f8f41892ff118fc1d066d140dc3d238d908316 | refs/heads/master | 2021-01-11T03:02:07.093609 | 2016-06-28T20:09:20 | 2016-06-28T20:09:20 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 561 | rd | migrate_cdf_to_2015_std.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cdf_prep.R
\name{migrate_cdf_to_2015_std}
\alias{migrate_cdf_to_2015_std}
\title{Migrate pre-2015 CDFs (both client-server nad WBM) to post-2015 specification,}
\usage{
migrate_cdf_to_2015_std(cdf)
}
\arguments{
\item{cdf}{the Assessment Results table from a Comprehensive Data file as a data.frame}
}
\value{
a data.frame of the Assessment Results table in the post-2015 CDF format
}
\description{
Migrate pre-2015 CDFs (both client-server nad WBM) to post-2015 specification,
}
|
4ef8a5c9f26e3b531f131ba8f104bd3b8ee62642 | fc2a934c0d7ad541897e826afb143b9ef0c0e6a4 | /code/functions.R | 6f389bcb86f8e062c9984edfd8ca124398e59c32 | [
"MIT"
] | permissive | jhelvy/splitKbCompare | c2a6b36737f35b20fc541b17f08e3cceb5233023 | 6ec32d4512c116be68a36b5fff4bcee7e823b6bd | refs/heads/master | 2023-04-15T22:18:28.605121 | 2022-09-26T11:18:08 | 2022-09-26T11:18:08 | 241,105,575 | 205 | 34 | MIT | 2023-04-08T05:04:12 | 2020-02-17T12:45:17 | R | UTF-8 | R | false | false | 8,787 | r | functions.R | # Loads the keyboards.csv file as a data frame
loadKeyboards <- function() {
keyboards <- readr::read_csv(file.path("data", "keyboards.csv")) %>%
filter(include == 1) %>%
mutate(
nameKeys = ifelse(
nKeysMin == nKeysMax,
paste0(name, " (", nKeysMin, ")"),
paste0(name, " (", nKeysMin, " - ", nKeysMax, ")")
),
openSource = ifelse(openSource == 1, TRUE, FALSE)
) %>%
arrange(id, desc(nKeysMax), desc(nKeysMin))
return(keyboards)
}
# Create DT of keyboard table for "Keyboards" page
loadKeyboardsDT <- function(keyboards) {
check <- "✓"
keyboardsDT <- keyboards %>%
rename("Name" = name) %>%
mutate(
`# of keys` = ifelse(
nKeysMin == nKeysMax,
nKeysMin,
paste(nKeysMin, nKeysMax, sep = " - ")
),
`# of rows` = numRows,
`Column stagger` = colStagger,
`Row stagger?` = ifelse(rowStagger == 1, check, ""),
`Number row?` = ifelse(hasNumRow == 1, check, ""),
`Available DIY?` = ifelse(diy == 1, check, ""),
`Available pre-built?` = ifelse(prebuilt == 1, check, ""),
`Rotary encoder?` = ifelse(rotaryEncoder == 1, check, ""),
`Wireless?` = ifelse(wireless == 1, check, ""),
`One-piece board?` = ifelse(onePiece == 1, check, ""),
`Cherry MX?` = ifelse(mxCompatible == 1, check, ""),
`Kailh Choc V1?` = ifelse(chocV1 == 1, check, ""),
`Kailh Choc V2?` = ifelse(chocV2 == 1, check, ""),
url_source = ifelse(
is.na(url_source),
"",
paste0(
'<a href="', url_source,
'" target="_blank"><i class="fa fa-github"></i></a> '
)
),
url_store = ifelse(
is.na(url_store),
"",
paste0(
'<a href="', url_store,
'" target="_blank"><i class="fa fa-shopping-cart"></i></a> '
)
),
pdf_path_a4 = paste0(
'<a href="pdf/a4/', id,
'.pdf" target="_blank" title="A4"><i class="fa fa-file-pdf-o"></i></a> '
),
pdf_path_letter = paste0(
'<a href="pdf/letter/', id,
'.pdf" target="_blank" title="Letter"><i class="fa fa-file-pdf-o"></i></a> '
),
Links = paste0(url_source, url_store),
PDF = paste(pdf_path_a4, pdf_path_letter)
) %>%
select(
Name, `# of keys`, `# of rows`, `Column stagger`, `Row stagger?`,
`Number row?`, `Available DIY?`, `Available pre-built?`,
`Rotary encoder?`, `Wireless?`, `One-piece board?`, `Cherry MX?`,
`Kailh Choc V1?`, `Kailh Choc V2?`, Links, PDF
)
return(keyboardsDT)
}
# Loads all svg images as a named list of pngs
loadImages <- function() {
imageNames <- list.files(file.path("images", "png"))
imagePaths <- file.path("images", "png", imageNames)
images <- as.list(image_read(imagePaths))
names(images) <- fs::path_ext_remove(imageNames)
# Make black background image
scale_black <- image_colorize(images$scale, opacity = 100, color = "#fff")
scale_black <- image_background(scale_black, color = "#000", flatten = TRUE)
images[["scale_black"]] <- scale_black
return(images)
}
# Load color palette
loadColorPalette <- function() {
# OPTION 1
palette1 <- c("white", RColorBrewer::brewer.pal(n = 8, name = "Dark2"))
# OPTION 2
# Source: https://sashat.me/2017/01/11/list-of-20-simple-distinct-colors/
palette2 <- c(
"#FFFFFF", "#E6194B", "#FFE119", "#3CB44B", "#4363D8", "#F58231",
"#911EB4", "#46F0F0", "#BCF60C", "#FABEBE", "#008080", "#E6BEFF",
"#9A6324", "#FFFAC8", "#AAFFC3", "#808000", "#FFD8B1", "#F032E6",
"#800000", "#000075"
)
# OPTION 3
# palette <- pals::polychrome()
# badColors <- c('#5A5156', '#325A9B', '#B00068', '#85660D', '#1C8356', '#B10DA1')
# palette <- palette[-which(palette %in% badColors)]
palette3 <- c(
"#E4E1E3", "#F6222E", "#16FF32", "#3283FE", "#FEAF16", "#FE00FA",
"#1CFFCE", "#90AD1C", "#2ED9FF", "#DEA0FD", "#AA0DFE", "#F8A19F",
"#C4451C", "#FBE426", "#1CBE4F", "#FA0087", "#FC1CBF", "#F7E1A0",
"#C075A6", "#782AB6", "#AAF400", "#BDCDFF", "#822E1C", "#B5EFB5",
"#7ED7D1", "#1C7F93", "#D85FF7", "#683B79", "#66B0FF", "#3B00FB"
)
return(palette2)
}
# Controls which keyboards to show based on filter controls
getFilteredKeyboardNames <- function(input, keyboards) {
filteredRows <- getFilteredRows(input, keyboards)
tempKeyboards <- keyboards[filteredRows, ]
if (input$sortKeyboards == "Name") {
tempKeyboards <- tempKeyboards %>%
arrange(id, desc(nKeysMax), desc(nKeysMin))
}
if (input$sortKeyboards == "# Keys") {
tempKeyboards <- tempKeyboards %>%
arrange(desc(nKeysMax), desc(nKeysMin), id)
}
return(tempKeyboards$nameKeys)
}
oneVarFilters <- function(input, keyboards) {
# Find active one variable filters
# One variable filters are based on only one column in the dataset
# e.g. Wireless with only the wireless (0, 1) column
oneVarFilterIds <- c(
"hasNumRow", "colStagger", "rowStagger", "rotaryEncoder", "wireless",
"onePiece", "openSource"
)
oneVarInputs <- sapply(oneVarFilterIds, function(id) input[[id]])
activeOneVar <- oneVarInputs[sapply(oneVarInputs, isTruthy)]
# Make logical vector assuming inputId corresponds with column and reactive
# value corresponds with desired value in column.
if (length(activeOneVar) > 0) {
oneVarLogical <- mapply(
function(inputId, reactiveVal) {
keyboards[[inputId]] %in% reactiveVal
},
inputId = names(activeOneVar),
reactiveVal = activeOneVar,
SIMPLIFY = FALSE
)
} else {
oneVarLogical <- NULL
}
return(oneVarLogical)
}
multiVarFilters <- function(input, keyboards) {
# Find active multi variable filters
# Multi variable filters are based on multiple columns in the dataset
# e.g. Switch Type with mxCompatible, chocV1 & chocV2
multiVarFilterIds <- c("availability", "switchType")
multiVarInputs <- sapply(multiVarFilterIds, function(id) input[[id]])
activeMultiVar <- multiVarInputs[sapply(multiVarInputs, isTruthy)]
# Make logical vector assuming reactive value corresponds with column and
# that column is logical.
if (length(activeMultiVar) > 0) {
multiVarLogical <- list(
apply(
X = keyboards[, unlist(activeMultiVar)] == 1,
MARGIN = 1,
FUN = all
)
)
} else {
multiVarLogical <- NULL
}
return(multiVarLogical)
}
rangeFilters <- function(input, keyboards) {
# Make logical vector from range filters (always active)
list(
keyboards$nKeysMin >= input$numKeys[[1]] &
keyboards$nKeysMax <= input$numKeys[[2]] &
keyboards$numRows >= input$numRows[[1]] &
keyboards$numRows <= input$numRows[[2]]
)
}
getFilteredRows <- function(input, keyboards) {
# Combine all logical vectors and find rows that are TRUE across all vectors
allFilters <- Reduce(
`&`,
c(
rangeFilters(input, keyboards),
oneVarFilters(input, keyboards),
multiVarFilters(input, keyboards)
)
)
rows <- which(allFilters)
return(rows)
}
# Functions for creating the merged image overlays
getImageOverlayColor <- function(ids, images, palette) {
# Don't want to select an index past palette's length
# So modulo the required indices around palette's length
# (Note this will select the same color multiple times but it gets so hectic
# it's unnoticable)
colors <- c(
palette[
# Modulo works nicely with 0-indexed not 1 indexed indices, so
# convert -> 0-indexed and then back
((seq_along(ids) - 1) %% length(palette)) + 1
],
"white"
)
ids <- c(ids, "border")
i <- 1
overlay <- images$scale_black
for (id in ids) {
image <- image_colorize(images[[id]], opacity = 100, color = colors[i])
overlay <- c(overlay, image)
i <- i + 1
}
return(image_mosaic(image_join(overlay)))
}
getImageOverlay <- function(ids, images) {
ids <- c("scale", ids)
return(image_mosaic(image_join(images[ids])))
}
|
1c8e49a7e5cfa37c037559d4b0a1edb0b30f785f | 3d3200873a757bf5142a185a51589ed1c70365ce | /time_delay_ecdf/combined_ecdf.R | c9db55867127e0452e64cb71f1a0bcd7ed6c7a18 | [
"Apache-2.0"
] | permissive | fvalka/r_estimate | 3ab0738940751526f1719004512b2f13219a42b0 | 91f7e528eb687b98aa4709da334a58f0c0925f68 | refs/heads/master | 2023-01-04T11:13:09.500411 | 2020-10-24T21:57:36 | 2020-10-24T21:57:36 | 259,440,541 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,215 | r | combined_ecdf.R | library(EpiEstim)
library(ggplot2)
library(incidence)
library(lubridate)
library(utils)
library("ggpubr")
library(latex2exp)
library(foreach)
library(RColorBrewer)
library(shiny)
library(EnvStats)
library(pracma)
library(deSolve)
require(scales)
require(Cairo)
require(RcppRoll)
ecdf_incubation_reporting_precalculated <- readRDS("r_estimate/data/time-delay/infection_reporting_cdf.rds")
ecdf_incubation_reporting <- Vectorize(function(t) {
idx <- findInterval(t, ecdf_incubation_reporting_precalculated$t, all.inside=TRUE)
return(ecdf_incubation_reporting_precalculated$infection_reporting_cdf[idx])
})
combined_ecdf <- function(window_size) {
estimation_delay <- readRDS(paste0("time_delay_ecdf/out/time_response_tau_",window_size,".rds"))
result <- rep(0, 41)
for(i in 0:40) {
result[i+1] <- sum(diff(append(c(0), estimation_delay))*ecdf_incubation_reporting(seq(i + 0.5,i + 0.5 - window_size+1)))
}
return(result)
}
combined_ecdfs <- data.frame("t"=seq(0,40))
for (tau in seq(3,20)) {
combined_ecdfs[[sprintf("tau_%d", tau)]] <- combined_ecdf(tau)
}
saveRDS(combined_ecdfs, "r_estimate/data/time-delay/infection_estimation_cdf.rds")
|
0aae2d2f018ff6d76312df0a1c06742b8574c319 | 6a28ba69be875841ddc9e71ca6af5956110efcb2 | /Modern_Physical_Chemistry_A_Molecular_Approach_by_George_H_Duffey/CH17/EX17.15/Ex17_15.R | c729814cf82716f3a4772c5681339c62cd143cc4 | [] | permissive | FOSSEE/R_TBC_Uploads | 1ea929010b46babb1842b3efe0ed34be0deea3c0 | 8ab94daf80307aee399c246682cb79ccf6e9c282 | refs/heads/master | 2023-04-15T04:36:13.331525 | 2023-03-15T18:39:42 | 2023-03-15T18:39:42 | 212,745,783 | 0 | 3 | MIT | 2019-10-04T06:57:33 | 2019-10-04T05:57:19 | null | UTF-8 | R | false | false | 110 | r | Ex17_15.R | # Page 482
h <- 6.626 * 10^-34
pi <- 3.1415
m <- 9.1094 * 10^-31
c <- (4 * pi * (2 * m)^1.5) / h^3
print(c) |
d73ea90b4efdca202d3a1ea0d9399e88f7a04eba | 40f4cb44ab742a168ca3f82d36a3e38dcaa6f844 | /man/dumpUniprotDb.Rd | 6374a581546ff4895dda1658d538872157bc72a4 | [] | no_license | sankleta/BED | 34e3f91fceffbb1164e65ab8a4cb24e6431b898b | 85c5c5ba4bbc927155d454dc6612512c7b197805 | refs/heads/master | 2021-04-30T05:55:28.535605 | 2018-02-06T11:18:59 | 2018-02-06T11:18:59 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 616 | rd | dumpUniprotDb.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/dumpUniprotDb.R
\name{dumpUniprotDb}
\alias{dumpUniprotDb}
\title{Feeding BED: Dump and preprocess flat dat files fro Uniprot}
\usage{
dumpUniprotDb(taxOfInt, release, env = parent.frame(n = 1))
}
\arguments{
\item{taxOfInt}{the organism of interest. Only human ("9606"),
mouse ("10090") and rat ("10116") are supported}
\item{release}{the release of interest (check if already downloaded)}
\item{env}{the R environment in which to load the tables when built}
}
\description{
Not exported to avoid unintended modifications of the DB.
}
|
13f27c7cb7224c3907054bd27d972b5dc62c20eb | c31e98ce2a7a380038dac8a19ccd03c5711e7c81 | /packages.R | dc7681bf0bc692a1c3145107eb083d3f48da3a88 | [] | no_license | aleholthuma/quantile_ensemble_talk | 76743f49a68ad3448d7200c1175ebf9a47b6731b | cb05c0d8921b086e8f901a168da042e57866226c | refs/heads/master | 2023-03-18T17:33:11.146262 | 2020-08-15T02:05:40 | 2020-08-15T02:05:40 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 143 | r | packages.R | install.packages(c("transformr", "latex2exp", "magick","remotes","styler","binb","codetools","tidyverse","fpp3","distributional","gganimate"))
|
ae68a93596fedeb01ec62802507be4cad675e2a9 | 3f43f522bc8b4527470c27ea48250f7120c81a8e | /nectar analysis/analysis/Balsamroot models/ModswoPlot/ModBalsMassBoth.R | d689b3e1a99dc069ebe372f2672af0e6871f30c9 | [] | no_license | EEB590/MAL | 0d23dadce6407ec7bc70a1a73eead12ca35c1ec3 | 84f0410687c7d693620e48b2f3b3354150fefb74 | refs/heads/master | 2020-12-01T08:40:28.856034 | 2019-05-20T03:53:42 | 2019-05-20T03:53:42 | 67,459,164 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,185 | r | ModBalsMassBoth.R | library(ggplot2)
library(lme4)
library(nlme)
library(lsmeans)
library(lubridate)
library(multcompView)
library(car)
setwd("D:/Iowa State University/Debinski Lab/Nectar data/MAL")
balssug15 <- read.csv("nectar analysis/data files/balssugar15.csv", header = T)
balssug16 <- read.csv("nectar analysis/data files/balssugar16.csv", header = T)
balssugboth <- rbind(balssug15,balssug16)
balssugboth$year <- as.factor(year(balssugboth$date))
cellN <- with(balssugboth, table(treatment, year))
cellN
cellMean <- with(balssugboth, tapply(mass, list(treatment, year), mean))
cellMean
modmass <- lmer(mass ~ treatment * year + (1|plant), data = balssugboth)
mass.grid <- ref.grid(modmass)
summary(mass.grid)
lsmeans(mass.grid, "treatment")
lsmeans(mass.grid, "year")
mass.treat <- lsmeans(mass.grid, "treatment")
pairs(mass.treat)
pairs.treat <- pairs(mass.treat)
test(pairs.treat, joint = T)
mass.year <- lsmeans(mass.grid, "year")
pairs(mass.year)
pairs.year <- pairs(mass.year)
test(pairs.year, joint = T)
int.mass <- pairs(mass.grid, by = "year")
int.mass
int.masstable <- update(int.mass, by = NULL)
int.masstable
test(pairs(int.masstable), joint = T)
Anova(modmass, type = 3)
|
0bc4a7ecabbfa9fe307a3da63356a74fb56cad69 | dab05df8a6ddf8947638c2bc2c3b5946d13771e2 | /R/download.R | 0bf765fb47c3ede43cb617fc797c2a0331e16e8d | [
"MIT"
] | permissive | tpemartin/econR | 2011047b7ef100b27fffd99148a7698ce7f99930 | 5df4fd5bf61b417b9860b3efc7ff20339e694fe4 | refs/heads/master | 2023-09-05T03:34:20.354596 | 2021-11-23T12:22:42 | 2021-11-23T12:22:42 | 335,521,237 | 0 | 4 | null | 2021-03-17T07:18:16 | 2021-02-03T05:48:23 | HTML | UTF-8 | R | false | false | 1,051 | r | download.R | internalData <- function(){
Download = list(
googleShareLink=googleLink_download
# dropboxLink=NA
)
}
# helpers -----------------------------------------------------------------
#' Download Google drive shared link to a destination folder
#'
#' @param googleSharedLink A shared link
#' @param destfolder A destination folder path
#'
#' @return
#' @export
#'
#' @examples none
googleLink_download <- function(googleSharedLink,
destfolder=NULL){
googledrive::as_dribble(googleSharedLink) -> drb
if(
drb$drive_resource[[1]]$kind=="drive#file"
){
allFiles <- drb
} else {
googledrive::drive_ls(drb) -> allFiles
}
if(is.null(destfolder)){
.root <- rprojroot::is_rstudio_project$make_fix_file()
destfolder = .root()
}
if(!dir.exists(destfolder)) dir.create(destfolder)
purrr::walk(
1:nrow(allFiles),
~{
fileX = allFiles[.x, ]
googledrive::drive_download(
file = allFiles[.x, ],
path = file.path(destfolder, fileX$name),
overwrite = T
)
}
)
}
|
2f965df7bd90d2d96bec02af95962d48d1453c29 | b42355f824cc5777d016380bd7cdc9e352f2179e | /R/postal-package.R | 69d4a7c28923332573893dcee6a1264cc2fa6878 | [
"MIT"
] | permissive | aedobbyn/postal | 541932150555d0894fcf1b3f0868e71db5ba0a0e | cac6491034514fb17dc78ff2e8e2582a1d0802a4 | refs/heads/master | 2020-03-20T03:30:42.266004 | 2019-02-02T17:23:01 | 2019-02-02T17:23:01 | 137,148,777 | 21 | 2 | null | 2018-08-20T03:11:56 | 2018-06-13T01:54:46 | R | UTF-8 | R | false | false | 507 | r | postal-package.R | #' Fetch mail information and zones from USPS
#'
#' Calculate shipping rates and times for packages and get the USPS zones corresponding to 3-digit and 5-digit zip code pairs.
#'
#' Contributors:
#' \itemize{
#' \item Amanda Dobbyn
#' }
#'
#'
#' To get postage information, use \code{fetch_mail}.
#' To get zones, use \code{get_zone_three_digit} or \code{get_zone_five_digit}.
#'
#' The zones vignette can be found with \code{browseVignettes(package = "postal")}.
#' @name postal
#' @docType package
NULL
|
1920a4383790f88641ccb1f34904daf5107e64a6 | 49ff0bc7c07087584b907d08e68d398e7293d910 | /mbg/mbg_core_code/mbg_central/LBDCore/R/compile_polygons.R | 5993a779028450db27d45a731a059f8a3b45f9bc | [] | no_license | The-Oxford-GBD-group/typhi_paratyphi_modelling_code | db7963836c9ce9cec3ca8da3a4645c4203bf1352 | 4219ee6b1fb122c9706078e03dd1831f24bdaa04 | refs/heads/master | 2023-07-30T07:05:28.802523 | 2021-09-27T12:11:17 | 2021-09-27T12:11:17 | 297,317,048 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,018 | r | compile_polygons.R | #' @title FUNCTION_TITLE
#' @description FUNCTION_DESCRIPTION
#' @param this_nid PARAM_DESCRIPTION
#' @param estimates_raster PARAM_DESCRIPTION
#' @return OUTPUT_DESCRIPTION
#' @details DETAILS
#' @examples
#' \dontrun{
#' if (interactive()) {
#' # EXAMPLE1
#' }
#' }
#' @rdname compile_polygons
#' @export
compile_polygons <- function(this_nid, estimates_raster) {
## Define function to extract preds/pops over comparison polygons, and calculate pop-weighted mean outcome over polygons.
message(this_nid)
nid_results <- compare_data[nid == this_nid, ]
gaul <- gaul_convert(nid_results[, iso3][1], shapefile_version = shapefile_version)
message(paste0("gaul", gaul))
country_pops <- master_list[[paste0("list_", gaul, ".pops_", gaul)]]
country_pops <- crop(country_pops, extent(master_list[[paste0("list_", gaul, ".simple_", gaul)]]))
country_pops <- setExtent(country_pops, master_list[[paste0("list_", gaul, ".simple_", gaul)]])
country_pops <- mask(country_pops, master_list[[paste0("list_", gaul, ".simple_", gaul)]])
message("crop")
country_estimates <- crop(estimates_raster, extent(master_list[[paste0("list_", gaul, ".simple_", gaul)]]))
country_estimates <- setExtent(country_estimates, master_list[[paste0("list_", gaul, ".simple_", gaul)]])
country_estimates <- mask(country_estimates, master_list[[paste0("list_", gaul, ".simple_", gaul)]])
all_data <- merge(compare_spdf, nid_results, by = c("location_code", "shapefile"))
all_data <- all_data[!is.na(all_data@data$outcome), ]
all_data$geo_mean <- 0
for (shape_x in unique(all_data$location_code)) {
message(shape_x)
test_poly <- all_data[all_data$location_code == shape_x, ]
period <- test_poly$year[1] - 2000 + 1
preds <- extract(country_estimates[[period]], test_poly)
pops <- extract(country_pops[[period]], test_poly)
all_data$geo_mean[all_data$location_code == shape_x] <- weighted.mean(preds[[1]], pops[[1]], na.rm = T)
}
this_data <- as.data.table(all_data)
return(this_data)
}
|
1ed2044f559d599b85fb3de36ea695b9fe3ae27d | 0732213947df3a08c33d7414ea1e1dd461c142e7 | /ch10_tibbles/tibbles.r | 19c4ec8af3aeee6f600eec34437489fbae7405c8 | [] | no_license | arpitag1/R-for-data-science | ea3c580dd8288a1bb368c5aba14911785bec387b | f09fb8ebb9f44a0bec242126de8b3cdfe9ef5bc1 | refs/heads/master | 2022-02-08T23:37:06.525461 | 2017-09-29T06:51:16 | 2017-09-29T06:51:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,308 | r | tibbles.r | # tibbles
# use tibbles instead of traditional data.frame in R
vignette("tibble")
library(tidyverse)
as_tibble(iris)
# 10.2 creating tibble ----------------------------------------------------
df1 = tibble(x=1:3,y=list(1:5,1:10,1:20))
df1$y
tibble(
x = 1:5,
y = 1,
z = x^2 + y
)
tb <- tibble(
`:)` = "smile",
` ` = "space",
`2000` = "number"
)
# tribble :
tribble(
~x , ~y, ~z,
# --|--| --
"a",2,3.6,
"b",1,8.5
)
# 10.3 tibbles vs data.frame ---------------------------------------------
tibble(
a = lubridate::now() + runif(1e3) * 86400,
b = lubridate::today() + runif(1e3) * 30,
c = 1:1e3,
d = runif(1e3),
e = sample(letters,1e3,replace=T)
)
# print
nycflights13::flights %>%
print(n=10,width=Inf)
as_tibble(mtcars) %>%
print(n=3)
# 10.3.2 subsetting -------------------------------------------------------
df <- tibble(
x= runif(5),
y = rnorm(5)
)
# extract by name
df$x
df[['x']]
# extract by position
df[[1]]
# . in pipeline
df %>% .$x
df %>% .[["x"]]
# 10.4. interacting with data.frame(older code) ----------------------------
class(as.data.frame(tb))
# 10.5 ex -----------------------------------------------------------------
# ex1:
class(mtcars)
class(as_tibble(mtcars))
# ex2 :
df <-data.frame(abc=1,xyz='a')
df[,"xyz"]
df[,c("abc","xyz")]
df_tbl <- as_tibble(df)
df_tbl
df_tbl[,"xyz"]
df_tbl[,c("abc","xyz")]
# vignette ----------------------------------------------------------------
vignette("tibble")
# no more stringAsFactors = FALSE
df <- data.frame(letters,stringsAsFactors = FALSE)
class(df$letters) # characters (not factor )
df_tbl <- tibble(x = letters)
class(df_tbl$x) # character
# list column
data.frame(x=1:3,y=list(1:5,1:10,1:20)) # not possible
tibble(x=1:3,y=list(1:5,1:10,1:20))
# never adjust the names of variables
names(data.frame(`crazy name` = 1)) # "crazy.name"
names(tibble(`crazy name`=1))#"carzy name"
# evaluate arg lazily and sequentially
tibble(x=1:30,y=x^2)
options(tibble.print_min=15)
tibble(x=1:30,y=x^2)
# dataframe vs tibble
df1 <-data.frame(x=1:3,y=3:1)
class(df1[,1:2]) # data.frame
class(df1[,1]) # integer
df2 <- tibble(x=1:3,y=3:1)
class(df2[,1:2])
class(df2[,1])
df2[[1]]
class(df2$x)
# stricker
df <-data.frame(abc=1)
df$a
df2 <-tibble(abc=1)
df2$a # NULL, unknown column 'a'
|
34d484ae74168615c21ebd7a606463522893b412 | 44670272d87d705f461abcf365c88847e7b1b69c | /R/listing.R | 6995e18a7a05db3801cc0ff32ee4987e190b221b | [
"MIT"
] | permissive | npechl/empdata | a4473bea060ab749a6909487c9535f17186f1cdf | d924d0a171ce1bb8f2fc5ff971e71816458a8da5 | refs/heads/master | 2023-08-31T17:50:14.777493 | 2021-10-12T10:13:55 | 2021-10-12T10:13:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,127 | r | listing.R |
#' Title
#'
#' @return
#' @export
#'
#' @examples
#'
list_datasets <- function() {
message(c("emp.data.2k90bp:\t", "a dgCMatrix object containing 90-bp microbial sequences and their corresponding score values across 2,000 samples"))
message(c("emp.data.2k100bp:\t", "a dgCMatrix object containing 100-bp microbial sequences and their corresponding score values across 1,856 samples"))
message(c("emp.data.2k150bp:\t", "a dgCMatrix object containing 150-bp microbial sequences and their corresponding score values across 975 samples"))
message(c("obs.metadata.2k90bp:\t", "a data.table object containing 90-bp microbial sequences and their corresponding taxonomy"))
message(c("obs.metadata.2k100bp:\t", "a data.table object containing 100-bp microbial sequences and their corresponding taxonomy"))
message(c("obs.metadata.2k150bp:\t", "a data.table object containing 150-bp microbial sequences and their corresponding taxonomy"))
message(c("sample.metadata.sub2k:\t", "a data.table class containing sample metadata of 2,000 samples"))
}
#' Title
#'
#' @return
#' @export
#'
#' @examples
list_otu_tables <- function() {
message(c("emp.data.2k90bp:\t", "a dgCMatrix object containing 90-bp microbial sequences and their corresponding score values across 2,000 samples"))
message(c("emp.data.2k100bp:\t", "a dgCMatrix object containing 100-bp microbial sequences and their corresponding score values across 1,856 samples"))
message(c("emp.data.2k150bp:\t", "a dgCMatrix object containing 150-bp microbial sequences and their corresponding score values across 975 samples"))
}
#' Title
#'
#' @return
#' @export
#'
#' @examples
list_taxonomy_tables <- function() {
message(c("obs.metadata.2k90bp:\t", "a data.table object containing 90-bp microbial sequences and their corresponding taxonomy"))
message(c("obs.metadata.2k100bp:\t", "a data.table object containing 100-bp microbial sequences and their corresponding taxonomy"))
message(c("obs.metadata.2k150bp:\t", "a data.table object containing 150-bp microbial sequences and their corresponding taxonomy"))
}
|
dcb2c36fe741bc0cf5f7791bfd674265f05076b6 | 8822932e19293117812ee9826a87b1a26111fe2a | /R/paged_spectro.R | 87661248baf2486462d2e0e4aa44da7e0241944a | [] | no_license | Louis-Backstrom/dynaSpec | 98732f5af01bcb29570b18b9c3f188635612cea9 | 0ad679fb9c753ea64167b1f14949e393273cea8e | refs/heads/master | 2023-09-04T00:11:17.564530 | 2021-10-26T09:28:45 | 2021-10-26T09:28:45 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,723 | r | paged_spectro.R | #' Make a paged dynamic spectrogram similar to spectral display in Adobe Audition
#'
#' This function works on an object generated with \code{\link{prep_static_ggspectro}}, an alias for prepStaticSpec().
#' Video generation is very time consuming, and all the desired spectrogram parameters should be set
#' in the prep step. The output is an mp4 video of a dynamic spectrogram video. If the input sound file was
#' segmented in the prep step, the resulting video will be a concatenation of multiple dynamic spectrogram "pages."
#' Each page has a sliding window revealing the part of the static spectrogram being played. Temporal width of each page
#' is defined by the xLim parameter in \code{\link{prep_static_ggspectro}}. You can also output temporary segmented files, if desired.
#'
#' @aliases pagedSpectro pagedSpec
#' @usage paged_spectro(specParams,destFolder,vidName,framerate=30,highlightCol="#4B0C6BFF",
#' highlightAlpha=.6,cursorCol="#4B0C6BFF",delTemps=TRUE)
#' @param specParams an object returned from \code{\link{prep_static_ggspectro}}
#' @param destFolder destination of output video; this setting overwrites setting from specParams object
#' @param vidName expects "FileName", .mp4 not necessary; if not supplied, will be named after the file you used in prep_static_ggspectro()
#' @param highlightCol default "#4B0C6BFF" (a purple color to match the default viridis 'inferno' palette)
#' @param highlightAlpha opacity of the highlight box; default is 0.6
#' @param cursorCol Color of the leading edge of the highlight box; default "#4B0C6BFF"
#' @param delTemps Default= TRUE, deletes temporary files (specs & WAV files used to create concatenated video)
#' @param framerate by default, set to 30 (currently this is not supported, as animate doesn't honor the setting)
#' @return Nothing is returned, though progress and file save locations are output to user. Video should play after rendering.
#' @seealso \code{\link{prep_static_ggspectro}}
#' @author Matthew R Wilkins (\email{matt@@galacticpolymath.com})
#' @references {
#' Araya-Salas M & Wilkins M R. (2020). *dynaSpec: dynamic spectrogram visualizations in R*. R package version 1.0.0.
#' }
#' @export
#' @examples \dontrun{
#' #show wav files included with dynaSpec
#' f <- list.files(pattern=".wav", full.names = TRUE,
#' path = system.file(package="dynaSpec"))
#'
#' femaleBarnSwallow<-prep_static_ggspectro(f[1],destFolder=tempdir(),
#' onlyPlotSpec = FALSE, bgFlood= TRUE)
#' paged_spectro(femaleBarnSwallow,destFolder=tempdir())
#'
#' maleBarnSwallow<-prep_static_ggspectro(f[2],destFolder=tempdir(),
#' onlyPlotSpec = FALSE, bgFlood= TRUE,min_dB=-40)
#'
#' paged_spectro(femaleBarnSwallow,destFolder=tempdir())
#'
#' # Make a multipage dynamic spec of a humpback whale song
#' # Note, we're saving PNGs of our specs in the working directory; to add
#' # axis labels, we set onlyPlotSpec to F, and to make the same background
#' # color for the entire figure, we set bgFlood= TRUE;
#' # The yLim is set to only go to 0.7kHz, where the sounds are for these big whales;
#' #also applying an amplitude transform to boost signal.
#' #This is a longer file, so we're taking the first 12 seconds with crop=12
#' #xLim=3 means each "page" will be 3 seconds, so we'll have 4 dynamic spec pages that get combined
#'
#' humpback <- prep_static_ggspectro(
#' "http://www.oceanmammalinst.org/songs/hmpback3.wav",destFolder=tempdir(),savePNG= FALSE,
#' onlyPlotSpec=FALSE,bgFlood= TRUE,yLim=c(0,.7),crop=12,xLim=3,ampTrans=3)
#'
#' #to generate multipage dynamic spec (movie), run the following
#' paged_spectro(humpback,destFolder=tempdir())
#'
#' # see more examples at https://marce10.github.io/dynaSpec/
#' }
paged_spectro <-function(specParams,destFolder,vidName,framerate=30,highlightCol="#4B0C6BFF",highlightAlpha=.6,cursorCol="#4B0C6BFF",delTemps=TRUE)
{
xmin<-ymin <- xmax <- ymax <- NULL
#This ^^ suppresses note about "no visible binding for global variable ‘xmax’"
if(!ari::have_ffmpeg_exec()){
cat("\n*****This script needs ffmpeg to work*****\n")
cat("If you have a mac, with HomeBrew installed, you can fix this easily
in terminal with:\n")
cat("\n>\tbrew install ffmpeg\n")
cat("\nIf not, download and install it from ffmpeg.org")
}else{
if(missing(destFolder)){destFolder <- specParams$destFolder}
if(!missing(vidName)){
iName0=tools::file_path_sans_ext(vidName)
vidName=paste0(destFolder,iName0,".mp4")
}else{
iName0<-tools::file_path_sans_ext(specParams$outFilename)
vidName=paste0(destFolder,iName0,".mp4")
}#base name for output, sans extension
#To avoid probs if a file contains '
vidName<-gsub("'",".",vidName)
iName0<-gsub("'",".",iName0)
tempdir<-paste0(destFolder,"temp/")
dir.create(tempdir,showWarnings=FALSE)
#always export the newWav version that has been cropped/padded according to user parameters
cat(paste0("Temporary files saved at: ",tempdir))
newWavOut=paste0(tempdir,iName0,"_forVideo.wav")
tuneR::writeWave(specParams$newWav,filename=newWavOut)
#export wav files if spec is to be segmented; not necessary if wav is unaltered
if(length(specParams$segWavs)>1){
#create list of names for WAV audio segments
outWAV<-lapply(1:length(specParams$segWavs),function(x) {paste0(tempdir,iName0,"_",x,"_.wav")})
invisible(
lapply(1:length(specParams$segWavs), function(x){fn=outWAV[[x]]
tuneR::writeWave(specParams$segWavs[[x]],file=fn)
cat(paste0("\nSaved temp wav segment: ",fn))}))
}
for(i in 1:length(specParams$segWavs))
{
#Address missing variables
iName<-paste0(iName0,ifelse(length(specParams$segWavs)==1,"",paste0("_",i,"_")))
#Save background spectrogram PNG to temp directory using tested parameters
outPNG<-paste0(tempdir,paste0(iName,".png"))
outTmpVid<-paste0(tempdir,paste0(iName,".mp4"))
#output spec without axes, b/c we'll have to
ggplot2::ggsave(filename=outPNG,plot=specParams$spec[[i]]+ggplot2::theme_void()+ggplot2::theme(panel.background=ggplot2::element_rect(fill=specParams$bg),legend.position = 'none'),dpi=300,width=specParams$specWidth,height=specParams$specHeight,units="in")
print(paste0("Spec saved @ ",outPNG))
#Read PNG bitmap back in
spec_PNG<-png::readPNG(outPNG)
spec_width_px<-attributes(spec_PNG)$dim[2]
spec_height_px<-attributes(spec_PNG)$dim[1]
#Create data frame for highlighting box animation for i^th wav segment
range_i<-c((i-1)*specParams$xLim[2],(i-1)*specParams$xLim[2]+specParams$xLim[2])
cursor<-seq(range_i[1],range_i[2],specParams$xLim[2]/framerate)
played<-data.frame(xmin=cursor,xmax=rep(range_i[2],length(cursor)),ymin=rep(specParams$yLim[1],length(cursor)),ymax=rep(specParams$yLim[2], length(cursor)))
#Make ggplot overlay of highlight box on spectrogram
vidSegment<-{
ggplot2::ggplot(played)+ggplot2::xlim(range_i)+ggplot2::ylim(specParams$yLim)+
#Labels
ggplot2::labs(x="Time (s)",y="Frequency (kHz)",fill="Amplitude\n(dB)\n")+
##Animate() seems to shrink font size a bit
mytheme_lg(specParams$bg)+
#Conditional theming based on user prefs (note, legend not currently supported)
#Since I'm reimporting spec as a raster, legend would need to rebuilt manually...gets a little
#warped if I embed it in the raster...doesn't look good.
{
#If user supplied fontAndAxisCol, change those settings (regardless of whether bg is flooded or not)
if(!specParams$autoFontCol){
ggplot2::theme(axis.text=ggplot2::element_text(colour=specParams$fontAndAxisCol),text=ggplot2::element_text(colour=specParams$fontAndAxisCol),axis.line = ggplot2::element_line(colour=specParams$fontAndAxisCol),axis.ticks=ggplot2::element_line(colour=specParams$fontAndAxisCol))
}else{}
}+{
#get rid of axes & legend if requested
if(specParams$onlyPlotSpec){ggplot2::theme_void()+ ggplot2::theme(plot.background=ggplot2::element_rect(fill=specParams$bg),text=ggplot2::element_text(colour=specParams$fontAndAxisCol))
}else{
#For cases where axes are plotted
#if axes to be plotted, flood panel bg color over axis area?
if(specParams$bgFlood){ggplot2::theme(plot.background=ggplot2::element_rect(fill=specParams$bg),axis.text=ggplot2::element_text(colour=specParams$fontAndAxisCol),text=ggplot2::element_text(colour=specParams$fontAndAxisCol),axis.line = ggplot2::element_line(colour=specParams$fontAndAxisCol),axis.ticks=ggplot2::element_line(colour=specParams$fontAndAxisCol),legend.background=ggplot2::element_rect(fill=specParams$bg))}else{}
}
}+
#Add spectrogram
ggplot2::annotation_custom(grid::rasterGrob(spec_PNG,width = ggplot2::unit(1,"npc"), height = ggplot2::unit(1,"npc")),- Inf, Inf, -Inf, Inf)+
#Add box highlights for playback reveal
ggplot2::geom_rect(data=played,ggplot2::aes(xmin=xmin,ymin=ymin,xmax=xmax,ymax=ymax),fill=highlightCol,alpha=highlightAlpha)+
#Add cursor
ggplot2::geom_segment(data=played,ggplot2::aes(x=xmin,xend=xmin,y=ymin,yend=ymax),col=cursorCol,size=2) +
#Add animation
#**** Time consuming animation stage *****
gganimate::transition_reveal(xmin)
}#end GGPLOT stuffs
# #Increase plot margin slightly b/c it gets changed when exporting to video for some reason
# if(!specParams$onlyPlotSpec){axisMargin=40}else{axisMargin=0}
#### Export animated ggplot specs
#save Audio File with sound in 1 step only if not segmented
if(length(specParams$segWavs)==1){
#note, height is set to 500px due to an issue w/ output being garbled at some resolutions; width according to aspect ratio
gganimate::animate(vidSegment,renderer=gganimate::av_renderer(vidName,audio=newWavOut),duration=specParams$xLim[2],width=500*(spec_width_px/spec_height_px),height=500,units="px") #Need to save audio for segments!!
}else{
gganimate::animate(vidSegment,renderer=gganimate::av_renderer(outTmpVid,audio=outWAV[[i]]),duration=specParams$xLim[2],width=500*(spec_width_px/spec_height_px),height=500,units="px") #Need to save audio for segments!!
}
}#end for loop extracting video pieces
#if necessary, combine segments
if(length(specParams$segWavs)>1){
tmpPaths<-paste0("file '",gsub(".wav","",unlist(outWAV)),".mp4' duration ",specParams$xLim[2])
writeLines(tmpPaths,paste0(tempdir,"mp4Segments.txt"))
#Turns out this was wrong or has been fixed!! MP4s CAN be combined!
# #Unfortunately, can't just slap MP4 files together, so have to have an intermediate .ts file step
# ffmpegTransCode<-paste0(ffmpeg_exec(),' -y -i "',unlist(file_path_sans_ext(outWAV)),'.mp4" -vsync 1 -c copy "',unlist(file_path_sans_ext(outWAV)),'.mkv"')
# invisible(sapply(ffmpegTransCode,system))
#now combine .ts files into .mp4
#For matching audio & video lengths:
cropSmplRt<-specParams$newWav@samp.rate
cropFileDur<-max(length(specParams$newWav@left),length(specParams$newWav@right))/cropSmplRt
# cropFileDur2<-seconds_to_period(cropFileDur)
# cropFileDur3<-sprintf(fmt='%02d:%02d:%2.3f',hour(cropFileDur2),minute(cropFileDur2),second(cropFileDur2))
#Concat Step 1
#concatenate mp4 segments
#slight stutter for continuous sounds across segments, but the alternative step below doesn't work quite right, so good enough
system(paste0(ari::ffmpeg_exec(),' -f concat -ss 00:00:00.000 -safe 0 -i "',paste0(tempdir,"mp4Segments.txt"),'" -codec copy -y "',vidName,'"') )
#Concat Step 2
#Add audio track back in (couldn't figure how to combine these steps)
#THIS STEP CURRENTLY DOESN'T WORK WELL (DROPS LAST FEW FRAMES B/C MISMATCH IN A/V LENGTHS)
# system(paste0(ari::ffmpeg_exec(),' -ss 0 -i "',paste0(tempdir,"deleteme.mp4"),'" -i "',newWavOut,'" -c:v libx264 -map 0:v:0 -map 1:a:0 -c:a aac -ac 1 -b:a 192k -y -vsync 1 -t ',cropFileDur3,' "',vidName,'"'))
#Old Concat Step 1 (when step 2 is implemented); results in deleteme.mp4 intermediate
# system(paste0(ari::ffmpeg_exec(),' -f concat -safe 0 -i "',paste0(tempdir,"mp4Segments.txt"),'" -codec copy -y "',paste0(tempdir,"deleteme.mp4"),'"'))
}
cat("\n\nAll done!\n")
cat(paste0("file saved @",vidName))
system(paste0('open "',vidName,'"'))
if(delTemps){unlink(tempdir,recursive=TRUE);print(paste0("FYI temporary file directory deleted @ ",tempdir))}
}#end else which passed FFMPEG check
}#end paged_spectro definition
#create alias
pagedSpec<-paged_spectro |
b3b7026cd59ebd5d4f2a9c34ed15bc31b389d061 | 9b05876e4c76fe8e6c165e04567d4837ec06afdd | /inst/scripts/scratch.R | 71024cef4a6bc8b5be1a8b3e056611ff6a3426a1 | [
"MIT"
] | permissive | epiforecasts/covid19.track.severity | fbb67fcafce8b9345f40d865a7a9653923e36c97 | 01616e64dd3dbb39916ed25f3a82b4dbc15c6420 | refs/heads/main | 2023-06-11T00:10:33.011609 | 2021-07-01T19:34:56 | 2021-07-01T19:34:56 | 337,206,377 | 0 | 0 | NOASSERTION | 2021-06-30T17:25:50 | 2021-02-08T20:52:24 | R | UTF-8 | R | false | false | 400 | r | scratch.R | # load observed data
observations <- merge(primary, secondary, by = c("date", "region"))
observations <- observations[date <= forecast_date]
observations <- observations[!is.na(primary)][!is.na(secondary)]
if (!is.null(obs_weeks)) {
observations <- observations[,
.SD[date >= (max(date) - lubridate::weeks(obs_weeks))],
by = region
]
}
setorder(observations, date)
|
3fe996431928caa596aa3d56d198117bbdb428c7 | 5188655b6d390ff0fd9c194e88599b959318bec1 | /models/seq.qdr.b.JAGS.R | 6fa1bd23122fc8237698528f6f0eb80f72060762 | [] | no_license | NErler/TimeVarImp | fadc768e7d884a1885b4c374dc7e22dda03733b4 | be8eec265088646d1cb473645d060e64400ce4b2 | refs/heads/master | 2021-01-22T03:49:37.025784 | 2017-06-20T13:51:28 | 2017-06-20T13:51:28 | 92,407,416 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,086 | r | seq.qdr.b.JAGS.R | model {
for(j in 1:T){
# Linear mixed effects model for y (and x)
y[j, 1] ~ dnorm(mu.y[j], tau.y)
y[j, 2] ~ dnorm(mu.x[j], tau.x)
mu.y[j] <- inprod(b[subj[j], 1:2], Z[j, ]) + beta[3]*y[j, 2] + beta[4] * pow(y[j, 2], 2)
mu.x[j] <- inprod(b[subj[j], 3:4], Z[j, ])
}
# Priors for the covariance of y (and x)
tau.y ~ dgamma(0.01, 0.01)
sig.y <- sqrt(1/tau.y)
tau.x ~ dgamma(0.01, 0.01)
sig.x <- sqrt(1/tau.x)
for(i in 1:N){
# random effects of y
b[i, 1:4] ~ dmnorm(mu.b[i, 1:4], inv.D[ , ])
mu.b[i, 1] <- beta[1]
mu.b[i, 2] <- beta[2]
# random effects of x
mu.b[i, 3] <- alpha[1]
mu.b[i, 4] <- alpha[2]
}
# Priors for the regression coefficients
for(k in 1:4){
beta[k] ~ dnorm(0, 0.001)
}
for(k in 1:2){
alpha[k] ~ dnorm(0, 0.001)
}
# Priors for random effects part
#################################
for(k in 1:4){
priorR.invD[k,k] ~ dgamma(0.1,0.01)
}
inv.D[1:4, 1:4] ~ dwish(priorR.invD[1:4, 1:4], 4)
D[1:4, 1:4] <- inverse(inv.D[, ])
}
|
f9de6392e9de87750efbe380790bea88bdcbfea7 | 0269891a7828ad264c515a679435f4dca723833e | /exploratory.R | 4418d0d36eedab66ca774163705e6e35163e514e | [] | no_license | lenkasaka/Rukol | bc3179bea56bb1b255354805ab5ca4774662be83 | 003a6808e2bf59ce9558b32b5e600a77dfaee6e1 | refs/heads/main | 2023-01-19T16:49:16.894325 | 2020-12-03T11:30:53 | 2020-12-03T11:30:53 | 302,658,928 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 706 | r | exploratory.R | ## Assignment 1
# basic exploratory analysis
library(xlsx)
library(tidyverse)
traindata <- read.csv('train.csv',1)
View(traindata)
summary(traindata)
ggplot(traindata, aes(x=SalePrice)) +
geom_histogram()
#correlation for numeric variables
x<- traindata$SalePrice
y<- select_if(traindata, is.numeric) %>% select(-SalePrice)
cor(x, y, use="complete.obs")
# boxplots for factor variables
fact <- select_if(traindata, is.factor) %>% mutate(Id = traindata$Id, SalePrice = traindata$SalePrice)
fact_long<- fact %>%
gather("var", "val", MSZoning:SaleCondition)
ggplot(fact_long, aes(x = val, y = SalePrice)) +
geom_boxplot()+
facet_wrap(~var)
|
f70100afcc529103dc15ad2af27bbd9c69574011 | 4378bae11ae1d6c7c319f61527bc90e3838b2f5f | /code/dia_28.R | 48f0aa712dec1a859d9d369118fe3f898fc235b6 | [] | no_license | Daniel795-lab/desafio_30_dias_de_graficos | 763859a8cbb93df3c03d1a2ab469541d8e18e71f | f6d7377c4ebff646a57c9daa117687a733c4d191 | refs/heads/master | 2022-10-25T17:59:50.028021 | 2020-06-10T08:42:50 | 2020-06-10T08:42:50 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,742 | r | dia_28.R | library(rjson)
library(tidyverse)
library(circlize)
# Parametros
twitter_dim<-list(width=unit(13/2,"cm"),height=unit(6.5/2,"cm"))
# Cargar datos y convertir en data frame
datos_json <- fromJSON(file = "../datos/flujo_migracion_2018.json")
json_parser<-function(x){
if(length(x$Data)>0) {
res<-data.frame(origen=x$MetaData[[3]]$Nombre,
destino=x$MetaData[[2]]$Nombre,
migracion=x$Data[[1]]$Valor)
} else {
res<-data.frame(origen=x$MetaData[[3]]$Nombre,
destino=x$MetaData[[2]]$Nombre,
migracion=NA)
}
return(res)
}
datos_df<-NULL
for (i in 1:length(datos_json)){datos_df<-datos_df %>% bind_rows(json_parser(datos_json[[i]]))}
datos_df<-datos_df %>%
filter(!is.na(migracion)) %>%
separate(origen,into=c("origen","todelete1"),sep=",",fill = "right") %>%
separate(destino,into=c("destino","todelete2"),sep=",",fill = "right") %>%
select(-starts_with("todelete"))
# Graficar
png(filename = "../images/dia_28.png",res=300,width=13*2,height=6.5*2,units = "cm")
set.seed(1234)
chordDiagram(datos_df,
directional = 1,
direction.type = c("diffHeight","arrows"),
diffHeight = -0.04,
annotationTrack = "grid",
link.arr.type = "big.arrow")
circos.track(track.index = 1, panel.fun = function(x, y) {
circos.text(CELL_META$xcenter, CELL_META$ylim[1], CELL_META$sector.index,facing = "clockwise", niceFacing = TRUE,cex=0.8, adj = c(1, 0.5))
},bg.border = NA)
title("Flujo de migración interautonómica en España (2018)", cex = 0.6)
mtext("twitter: @GuillemSalazar\nCódigo: https://github.com/GuillemSalazar/desafio_30_dias_de_graficos",side = 1,adj = 0,cex=0.5)
dev.off()
|
bdec91f828a59ae7d4e18ee44f7621b5d6419e5d | 1db5084d33ce23cfc7031509e5e9266b0d8ae07c | /vignettes/cell_segmentation/step9_clustering/fsom_clustering.R | c619ab684a6e962071d2a2851a704e003a4acd2d | [] | no_license | Kaestner-Lab/T2D_IMC | 6140fcf1d9ee0fd6aa5544253fb9171d77ebc478 | 3802926dd85a1f1cbbb91aec8bd616161311b211 | refs/heads/main | 2023-04-10T06:58:46.108000 | 2021-11-05T17:25:13 | 2021-11-05T17:25:13 | 375,136,919 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,733 | r | fsom_clustering.R | rm(list = ls())
library(ggplot2)
library(reshape2)
library(FlowSOM)
home_dir <- "/Users/wuminghui/MYDATA/Data0803/"
setwd(home_dir)
marker_dir <- "Cluster_marker.csv"
input_dir <- "normalized/data_norm.rds"
output_dir<- "clustering/"
dir.create(output_dir)
output_filename <- "clustered"
mydata <- readRDS(input_dir)
marker_df <- read.csv(marker_dir)
marker_reclus <- marker_df$marker[1:27]
mydata_dm <- data.matrix(mydata)
flowSOM.res <- ReadInput(mydata_dm, transform = F, scale = F)
# building the 15x15 (225 clusters) self-organizing map
fsom <- BuildSOM(flowSOM.res, colsToUse = marker_reclus, xdim = 15, ydim = 15, rlen = 10)
nmc <- 40
# combined into 40 groups through consensus clustering
metaClustering <- metaClustering_consensus(fsom$map$codes, k=nmc)
metaClustering_perCell <- GetMetaclusters(fsom, metaClustering)
metafsom_df <- cbind(mydata, metaClustering_perCell)
saveRDS(metafsom_df, paste0(output_dir,output_filename,".rds"))
# keep a record of number of cells in each cluster for each image
fre <- table(metafsom_df$Image_name, metafsom_df$metaClustering_perCell)
sum <- apply(fre,2,function(x){sum(x)})
fre_df <- rbind(fre,sum)
write.csv(fre_df, paste0(output_dir,output_filename, "frequ.csv"))
## ====== clust end ======= ##
# plot boxplot showing distribution of mean-intensity per marker for each cluster
# this plot is used to guide the cluster annotation
## plot the boxplot
marker <- c("HLA.ABC","C.peptide","Nestin","Glucagon","pan.Keratin","CD11b","CD44","PDX.1",
"CD45","CD56","beta.Actin","CD4","NKX6.1","CD68","Somatostatin","CD20","CD8",
"CD99","CA2","NFkb","GnzB","Ki67","CD57", "CD31","CD14", "Foxp3","p16","CD3",
"pS6","CD45RO","HLA.DR","PP","GHRL")
data_FlowSOM <- data.frame(metafsom_df)
melt_data <- melt(data_FlowSOM, measure.vars = marker,
variable.name = "target", value.name = "value")
melt_data$metaClustering_perCell <- as.factor(melt_data$metaClustering_perCell)
## plot the boxplot of each marker
ggplot(melt_data, aes( x = metaClustering_perCell, y = value, group = metaClustering_perCell)) +
geom_boxplot( aes(color=metaClustering_perCell), alpha = 0.3, size = 0.3, show.legend = F)+
facet_wrap(~ target, ncol = 4, scales = "free")+
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
save_name <- paste0(output_dir, output_filename, "boxreclumetaMar.png")
ggsave(save_name, width = 1000, height = 1000, units = 'mm')
## boxplot of each cluster
ggplot(melt_data, aes( x = target, y = value, group = target )) +
geom_boxplot( aes(color=target), alpha = 0.3, size = 0.3, show.legend = F)+
facet_wrap(~ metaClustering_perCell, ncol = 4, scales = "free")+
theme(axis.text.x = element_text(angle = 90, vjust = 0.5, hjust=1))
save_name <- paste0(output_dir, output_filename, "boxreclumetaClu.png")
ggsave(save_name, width = 1000, height = 1000, units = 'mm')
## ========== ##
# Another validation method is to reconstruct the image by projecting the cells based on their x and y coordinate and see the spatial distribution of cells (and potentially compare with the raw images).
# select cells in an image
cluster_6009h2 <- subset(metafsom_df, metafsom_df$Image_name=="NPOD6444_Tail_ROI2")
# subset cells in a specific cluster
cluster_sel_6009h2 <- subset(cluster_6009h2, cluster_6009h2$metaClustering_perCell %in% c(4))
png("cell_spatial_dist.png")
plot(1:1100, 1:1100, ylim = rev(range(1:1100)),type = "n",
main = paste(unique(cluster_sel_6009h2$metaClustering_perCell),
unique(cluster_6009h2$Image_name), nrow(cluster_sel_6009h2)))
text(x = cluster_sel_6009h2$Location_Center_X, y = cluster_sel_6009h2$Location_Center_Y,
label = "o",col = "red", cex = 0.8)
dev.off()
|
ec96adeca032672d01b0aee90ff4bd7846ffec90 | 3b5965101b46f89d6317c986c0bf40688b33bf32 | /2009_R_pl_Silkworm/PCA/R_code/FIG-PCAfun-27-auto.R | 7b97be7b99167c3b2c044dbf6ab0a0c5ea2857aa | [] | no_license | celinesf/personal | e1ea370a1f5914b00ea32f78943d4f1b0512eaa3 | d844b3597636930ce255aca2901e95063139f42e | refs/heads/master | 2022-07-12T13:15:53.420456 | 2021-06-17T07:46:24 | 2021-06-17T07:46:24 | 13,597,376 | 0 | 0 | null | 2022-06-25T07:27:19 | 2013-10-15T17:49:08 | HTML | UTF-8 | R | false | false | 4,875 | r | FIG-PCAfun-27-auto.R | rm(list = ls())
#columns are snps (0,1,2)
#rows are individuals
eigenstratcel<-function(chr){
name=paste("X",chr,sep="-") #get X matrix
X=read.table(name,stringsAsFactors = F)
E<-eigen(X)
name=paste("SNPIND",chr,sep="-")#get info for sign
info=read.table(name,stringsAsFactors = F)
snp=info[1]
ind=info[2]
mu<-(sqrt(snp-1)+sqrt(ind))^2/snp
sigma<-(sqrt(snp-1)+sqrt(ind))/snp*(1/sqrt(snp-1)+1/sqrt(ind))^(1/3)
for(i in 1:ind[[1]])
{
E$TW[i]<-(E$values[i]*ind/sum(E$values)-mu)/sigma
}
E$mu<-mu
E$sigma<-sigma
E$snp=snp
E$ind=ind
class(E)<-"eigenstratcel"
E
}
plot.eigenstratcel<-function(x,...){plot(x$vectors[,1:2],...)}
print.eigenstratcel<-function(x){print(x$TW)}
chr="sum-auto"
file1=paste("Fig-PCA",chr,sep="-")
E<-eigenstratcel(chr)
eig=paste("eigen",chr,sep="-")
write(E$values,eig,n=1)
eig=paste("eigenvector",chr,sep="-")
write(t(E$vectors[,1:6]),eig,n=6,sep="\t")
P=read.table("IndvPheno-Latlon",header=T)
colo=c()
ch=c()
for(i in 1:40)
{
if(P$V[i]==1)
{colo=c(colo,"cyan4")
ch=c(ch,3) }
else if(P$V[i]==2)
{colo=c(colo,"slateblue")
ch=c(ch,4)}
else if(P$V[i]==3)
{colo=c(colo,"darkblue")
ch=c(ch,5)}
#else if(i==30 |i==34)
#{colo=c(colo,grey(.4))
#ch=c(ch,2)}
else
{colo=c(colo,"black")
ch=c(ch,1)}
}#japan
leg=c("Domesticated-V=1","Domesticated-V=2","Domesticated-V>2","Wild")
cleg=c("cyan4","slateblue","darkblue","black")
pleg=c(3,4,5,1)
par(mfrow=c(1,1))
####### 1 2
postscript("Fig1.eps",paper="special",width = 8.0, height = 8.0,)
par(mar=c(5.1, 5.1, 4.1, 2.1))
plot(E$vectors[,1:2], col=colo, xlab="Eigenvector 1", ylab="Eigenvector 2",pch=ch,cex=1.5,cex.axis=1.5,cex.lab=1.5)
legend("topleft",leg,cex=1.5,col=cleg,pch=pleg, bty="n")
dev.off()
colo=c()
ch=c()
for(i in 1:40)
{
if(P$V[i]==1)
{colo=c(colo,"cyan4")
ch=c(ch,3) }
else if(P$V[i]==2)
{colo=c(colo,"slateblue")
ch=c(ch,4)
}
else if(P$V[i]==3)
{colo=c(colo,"darkblue")
ch=c(ch,5)}
else if(i==30 |i==34)
{colo=c(colo,"black")
ch=c(ch,6)}
else
{colo=c(colo,"black")
ch=c(ch,1)}
}#japan
leg=c("Domesticated-V=1","Domesticated-V=2","Domesticated-V>3","Wild","B.m. Ziyang & Pengshan ")
cleg=c("cyan4","slateblue","darkblue","black","black")
pleg=c(3,4,5,1,6)
######### 1/4
postscript("FigS2.eps",paper="special",width = 8.0, height = 8.0,)
par(mar=c(5.1, 5.1, 4.1, 2.1))
plot(E$vectors[,1],E$vectors[,4], col=colo,xlab="Eigenvector 1", ylab="Eigenvector 4",pch=ch,cex=1.5,cex.axis=1.5,cex.lab=1.5)
legend("bottomright",leg,cex=1.5,col=cleg,pch=pleg, bty="n")
dev.off()
########## 2/4
postscript("FigS4.eps",paper="special",width = 8.0, height = 8.0,)
par(mar=c(5.1, 5.1, 4.1, 2.1))
plot(E$vectors[,2], E$vectors[,4],col=colo,xlab="Eigenvector 2", ylab="Eigenvector 4",pch=ch,cex=1.5,cex.axis=1.5,cex.lab=1.5)
legend("right",leg,cex=1.5,col=cleg,pch=pleg, bty="p",box.lty=2)
dev.off()
colo=c()
ch=c()
for(i in 1:40)
{
if(P$V[i]==1)
{colo=c(colo,"cyan4")
ch=c(ch,3) }
else if(P$V[i]==2)
{colo=c(colo,"slateblue")
if(i==1 |i==3)
#{colo=c(colo,grey(.4))
{ch=c(ch,2)}
else{ch=c(ch,4)}
}
else if(P$V[i]==3)
{colo=c(colo,"darkblue")
ch=c(ch,5)}
#else if(i==1 |i==34)
#{colo=c(colo,grey(.4))
#ch=c(ch,2)}
else
{colo=c(colo,"black")
ch=c(ch,1)}
}#japan
leg=c("Domesticated-V=1","Domesticated-V=2","Domesticated-V>3","Wild","J7532 & J872 ")
cleg=c("cyan4","slateblue","darkblue","black","slateblue")
pleg=c(3,4,5,1,2)
######### 1/3
postscript("FigS1.eps",paper="special",width = 8.0, height = 8.0,)
par(mar=c(5.1, 5.1, 4.1, 2.1))
plot(E$vectors[,1],E$vectors[,3], col=colo,xlab="Eigenvector 1", ylab="Eigenvector 3",pch=ch,cex=1.5,cex.axis=1.5,cex.lab=1.5)
legend("topleft",leg,cex=1.5,col=cleg,pch=pleg, bty="n")
dev.off()
############ 2/3
postscript("FigS3.eps",paper="special",width = 8.0, height = 8.0,)
par(mar=c(5.1, 5.1, 4.1, 2.1))
plot(E$vectors[,2:3], col=colo,xlab="Eigenvector 2", ylab="Eigenvector 3",pch=ch,cex=1.5,cex.axis=1.5,cex.lab=1.5)
legend("topright",leg,cex=1.5,col=cleg,pch=pleg, bty="n")
dev.off()
colo=c()
ch=c()
for(i in 1:40)
{
if(P$V[i]==1)
{colo=c(colo,"cyan4")
ch=c(ch,3) }
else if(P$V[i]==2)
{colo=c(colo,"slateblue")
if(i==1 |i==3)
#{colo=c(colo,grey(.4))
{ch=c(ch,2)}
else{ch=c(ch,4)}
}
else if(P$V[i]==3)
{colo=c(colo,"darkblue")
ch=c(ch,5)}
else if(i==30 |i==34)
{colo=c(colo,"black")
ch=c(ch,6)}
else
{colo=c(colo,"black")
ch=c(ch,1)}
}#japan
leg=c("Domesticated-V=1","Domesticated-V=2","Domesticated-V>3","Wild","J7532 & J872 ","B.m. Ziyang & Pengshan ")
cleg=c("cyan4","slateblue","darkblue","black","slateblue","black")
pleg=c(3,4,5,1,2,6)
######
postscript("Fig2.eps",paper="special",width = 8.0, height = 8.0,)
par(mar=c(5.1, 5.1, 4.1, 2.1))
plot(E$vectors[,3:4], col=colo,xlab="Eigenvector 3", ylab="Eigenvector 4",pch=ch,cex=1.5,cex.axis=1.5,cex.lab=1.5)
legend("bottomright",leg,col=cleg,cex=1.5,pch=pleg,bty="n",box.lty=2)
dev.off()
dev.off()
|
c3d7bdb2d925a84381280ecdc59f0fb8728e353f | c22f7c83f03fe3f386460294de7a9bd407cb1c5a | /Causes-of-death.R | 0d4fbc5afea218ba624822b244a7e4ee700bce7c | [] | no_license | meltrn/3361-Coding | d8c821ef53a9c56d7dc5ba32486cad2efc32a391 | 2a803f31947ce88bc3406f2cc8c79d69869f0e1e | refs/heads/master | 2021-05-21T14:51:43.707535 | 2020-04-03T11:22:10 | 2020-04-03T11:22:10 | 252,686,955 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,020 | r | Causes-of-death.R | #causes of death in France 2001-2008
# Melissa Tran
# created: 1/04/2020
-------------------------------------------------------------------------------------------
#load packages
library(tidyverse)
# working with data ------------------------------------------------------------
data <- read_csv("CausesOfDeath_France_2001-2008_.csv") %>%
select(-GEO, -UNIT, -AGE, -Value)%>% # keep variables: "cause of death", "sex", "year"
rename(YEAR = TIME, # rename variables: "CAUSE", "SEX", "YEAR"
CAUSE = ICD10) %>%
filter(DEATHS > 1)%>% # filter out causes of death below threshold count
mutate(total = sum(DEATHS),
coverage = DEATHS*100/total) %>%
arrange(desc(coverage))
# Analysing data for females -------------- ------------------------------------
female <- data%>% # data for females only
filter(SEX == "Females") %>%
select(YEAR, SEX, CAUSE, DEATHS)
#summary table for females_ grouped cause of death across years
f_summary <- female %>%
group_by(CAUSE)%>%
summarise(t_f = sum(DEATHS), #total no. of female deaths across 2001-2008
m_f = mean(DEATHS), #mean no. deaths per year
s_f = sd(DEATHS),
n_f = n()) %>%
arrange(desc(m_f)) %>% #arrange from most to least deaths
mutate(total_f = sum(t_f), #add col. for total number of deaths
coverage_f = t_f*100/total_f, #add col, for percentage of deaths covered by cause
rank = rank(-coverage_f), #add col. for ranking from most to least deaths
SEX = "Female")
ungroup()
### final table: Leading cause of death for females ###
top10_f <- f_summary %>%
filter(rank < 11) %>%
select(CAUSE, rank, coverage_f, SEX)%>%
rename(percentage = coverage_f)%>%
arrange(rank)
view(top10_f)
# creating the plot--------------------------------------------------
#basic plot
fp <- ggplot(top10_f, aes(x = percentage)) +
geom_col(aes(y = reorder(CAUSE, percentage)), fill = "pink") +
geom_rug()
#changing axis tick mark labels
fp1 <- fp +
theme(axis.text.x = element_text(size = 12), #edit aesthetics, label orientation, colour
axis.text.y = element_text(size = 10),
axis.line = element_line(color = "grey", size = 0.5))+
scale_x_continuous(name = "Percentage of total female deaths (%)")+
scale_y_discrete(name = "Cause of death")+
ggtitle("Leading causes of death in Females")
print(fp1)
#### obtain top 10 leading causes of death for males ------------------------------------
male <- data%>% # data for males only
filter(SEX == "Males") %>%
select(YEAR, SEX, CAUSE, DEATHS)
#summary table for males_ grouped cause of death across years
m_summary <- male %>%
group_by(CAUSE)%>%
summarise(t_m = sum(DEATHS),
m_m = mean(DEATHS),
s_m = sd(DEATHS),
n_m = n()) %>%
arrange(desc(m_m)) %>%
mutate(total_m = sum(t_m),
coverage_m = t_m*100/total_m,
rank = rank(-coverage_m),
SEX = "Male")%>%
ungroup()
### final: Leading cause of death for males ###
top10_m <- m_summary %>%
filter(rank < 11) %>%
select(CAUSE, rank, coverage_m, SEX)%>%
arrange(rank)
view(top10_m)
# creating the plot--------------------------------------------------
#basic plot
mp <- ggplot(top10_m, aes(x = coverage_m)) +
geom_col(aes(y = reorder(CAUSE, coverage_m)), fill = "sky blue") +
geom_rug()
#changing axis tick mark labels
mp1 <- mp +
theme(axis.text.x = element_text(size = 12), #edit aesthetics, label orientation, colour
axis.text.y = element_text(size = 10),
axis.line = element_line(color = "grey", size = 0.5))+
scale_x_continuous(name = "Percentage of total male deaths (%)")+
scale_y_discrete(name = "Cause of death")+
ggtitle("Leading causes of death in Males")
print(mp1) |
d6c929fba2ca5eaf47f5613ce7a22ef53eeb7d99 | 3a649ffa4f93b5ff1d9250baa0d59105d0d252ee | /MakeMetPlots.R | ba68c761ce8973cb8093bd8cb4ed1527db85a49e | [] | no_license | dgianotti/NearestMetStation | cacb2d8eeecb0b0ce1d7bc004c0996b41fc9cded | 29a90bc7c560e61d3f6788354fb9a36c25570903 | refs/heads/master | 2016-09-05T11:20:50.126368 | 2014-02-28T07:02:24 | 2014-02-28T07:02:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,796 | r | MakeMetPlots.R | # Make some quick figures from Master+Met data and do some simple analysis.
# Load some required packages:
loaded <- require("maps")
if(!loaded){
print("trying to install package maps")
install.packages("maps")
loaded <- require("maps")
if(loaded){
print("maps installed and loaded")
library(maps)
}
else {
stop("Could not install maps You need to figure out how to install that manually before this function will work!")
}
}
loaded <- require("mapdata")
if(!loaded){
print("trying to install package mapdata")
install.packages("mapdata")
loaded <- require("mapdata")
if(loaded){
print("mapdata installed and loaded")
library(mapdata)
}
else {
stop("Could not install mapdata You need to figure out how to install that manually before this function will work!")
}
}
loaded <- require("leaps")
if(!loaded){
print("trying to install package leaps")
install.packages("leaps")
loaded <- require("leaps")
if(loaded){
print("leaps installed and loaded")
library(leaps)
}
else {
stop("Could not install leaps You need to figure out how to install that manually before this function will work!")
}
}
# Open a pdf socket for plot output:
#pdf("MetPlots.pdf", paper="USr")
# Load the data:
master_plus_met <- read.csv("Master_Plus_Met.csv")
# Make a histogram of distances from met station to pheno site:
lon_lat <- data.frame(master_plus_met$Longitude,master_plus_met$Latitude)
unique_lon_lat <- unique(lon_lat)
unique_indices <- as.integer(row.names(unique_lon_lat))
unique_distances <- master_plus_met$Met_Station_Distance_km[unique_indices]
hist(unique_distances)
# Make a map of distances to pheno-site
map("worldHires")
points(unique_lon_lat[unique_distances<10,],pch='.',col="red")
points(unique_lon_lat[unique_distances<25 & unique_distances >= 10,],pch='.',col="orange")
points(unique_lon_lat[unique_distances<50 & unique_distances >= 25,],pch='.',col="yellow")
points(unique_lon_lat[unique_distances<100 & unique_distances >= 50,],pch='.',col="green")
points(unique_lon_lat[unique_distances<150 & unique_distances >= 100,],pch='.',col="blue")
color_list <- c("lightslateblue","green4","maroon3","goldenrod4")
# Plot pheno-day versus a bunch of met variables:
relevant_data <- master_plus_met[c(6,7,8,26,29:63)] # BUT WE ALSO NEED YEAR AND SITE ID!!
# Pheno day vs. Latitude
par(mar=c(5.1,8.1,4.1,2.1))
plot(relevant_data$Latitude,relevant_data$pheno_day,ylab="Pheno Day",xlab="Latitude")
# Pheno day vs. Latitude (Lat > 25)
par(mar=c(5.1,8.1,4.1,2.1))
plot(relevant_data$Latitude[relevant_data$Latitude > 25],
relevant_data$pheno_day[relevant_data$Latitude > 25],ylab="Pheno Day",xlab="Latitude")
#boxplot(relevant_data$Biome,relevant_data$pheno_day,xlab="Pheno Day",ylab="Latitude")
# Pheno day vs. precip (1-30,31-60, 61-90)
par(mar=c(5.1,8.1,4.1,2.1))
plot(relevant_data$Precip_total_1to30days,relevant_data$pheno_day,
ylab="Pheno Day",xlab="Precip [mm]",col=color_list[1],pch=20)
points(relevant_data$Precip_total_31to60days,relevant_data$pheno_day,col=color_list[2],pch=20)
points(relevant_data$Precip_total_61to90days,relevant_data$pheno_day,col=color_list[3],pch=20)
legend(x="bottomright",c("1-30 days","31-60 days","61-90 days"),
col=color_list,pch=20)
# Pheno day vs. Tmin (1-30,31-60,61-90)
par(mar=c(5.1,8.1,4.1,2.1))
plot(relevant_data$Tmin_mean_1to30days,relevant_data$pheno_day,
ylab="Pheno Day",xlab="mean T_min [C]",col=color_list[1],pch=20)
points(relevant_data$Tmin_mean_31to60days,relevant_data$pheno_day,col=color_list[2],pch=20)
points(relevant_data$Tmin_mean_61to90days,relevant_data$pheno_day,col=color_list[3],pch=20)
legend(x="bottomright",c("1-30 days","31-60 days","61-90 days"),
col=color_list,pch=20)
# Pheno day vs. Tmax (1-30,31-60,61-90)
par(mar=c(5.1,8.1,4.1,2.1))
plot(relevant_data$Tmax_mean_1to30days,relevant_data$pheno_day,
ylab="Pheno Day",xlab="mean T_max [C]",col=color_list[1],pch=20)
points(relevant_data$Tmax_mean_31to60days,relevant_data$pheno_day,col=color_list[2],pch=20)
points(relevant_data$Tmax_mean_61to90days,relevant_data$pheno_day,col=color_list[3],pch=20)
legend(x="bottomright",c("1-30 days","31-60 days","61-90 days"),
col=color_list,pch=20)
# Pheno day vs. Tmean (1-30,31-60,61-90)
par(mar=c(5.1,8.1,4.1,2.1))
plot(relevant_data$Tmean_1to30days,relevant_data$pheno_day,
ylab="Pheno Day",xlab="mean T_mean [C]",col=color_list[1],pch=20)
points(relevant_data$Tmean_31to60days,relevant_data$pheno_day,col=color_list[2],pch=20)
points(relevant_data$Tmean_61to90days,relevant_data$pheno_day,col=color_list[3],pch=20)
legend(x="bottomright",c("1-30 days","31-60 days","61-90 days"),
col=color_list,pch=20)
#### Run a few regressions:
# Get the ending year of studies..
revalue(master_plus_met$Biome, c("tropical deciduous"="tropical_deciduous"))
master_plus_met$Year_Sampled_End[is.na(master_plus_met$Year_Sampled_End)] <-
master_plus_met$Year_Sampled_Start[is.na(master_plus_met$Year_Sampled_End)]
master_plus_met$Mean_Sample_Year <- 0.5*(master_plus_met$Year_Sampled_Start +
master_plus_met$Year_Sampled_End)
# First, get rid of some variables we won't regress against:
excluded_cols <- names(master_plus_met) %in%
c("X","Student","Paper","Country","Site","Longitude","Study_Type",
"Experimental_Unit","Exp_Unit_Method","Database","Response_Type","Genus",
"Species","Reference_NA","Magnitude","Year_Sampled_Start","Year_Sampled_End",
"Total_Years","Data_Thief","Reported_slope_yr","Reported_slope_C","Site_ID",
"Met_Station_ID","Met_Station_Distance_km",
"Tmin_median_1to30days","Tmin_median_31to60days","Tmin_median_61to90days",
"Tmax_median_1to30days","Tmax_median_31to60days","Tmax_median_61to90days",
"Range_Years")
regression_data <- master_plus_met[!excluded_cols]
LF50_data <- subset(regression_data, regression_data$pheno_method == "LeafFall50")
LF50_data <- LF50_data[!(names(LF50_data) %in% c("pheno_method"))]
LF80_data <- subset(regression_data, regression_data$pheno_method == "LeafFall80")
LF80_data <- LF80_data[!(names(LF80_data) %in% c("pheno_method"))]
LF100_data <- subset(regression_data, regression_data$pheno_method == "LeafFall100")
LF100_data <- LF100_data[!(names(LF100_data) %in% c("pheno_method"))]
LAI0_data <- subset(regression_data, regression_data$pheno_method == "LAI_zero")
LAI0_data <- LAI0_data[!(names(LAI0_data) %in% c("pheno_method"))]
# Simplest (and dumbest) regression -- use everything:
LF50_model <- lm(pheno_day ~ ., data=LF50_data)
LF80_model <- lm(pheno_day ~ ., data=LF80_data)
LF100_model <- lm(pheno_day ~ ., data=LF100_data)
LAI0_model <- lm(pheno_day ~ ., data=LAI0_data)
## Better fitting:
# LF50:
LF50_subsets <- regsubsets(pheno_day ~ ., data=LF50_data[,-2],nbest=1,nvmax=15)
LF50_summary <- summary(LF50_subsets)
best_mod_number <- which.min(LF50_summary$bic)
a <- as.data.frame(LF50_summary$which[best_mod_number,])
col_names <- row.names(a)
columns_to_use <- names(LF50_data) %in% c("pheno_day", col_names[a[,1]])
LF50_data_BIC <- LF50_data[columns_to_use]
LF50_model_BIC <- lm(pheno_day ~ ., LF50_data_BIC)
# LF80:
LF80_subsets <- regsubsets(pheno_day ~ ., data=LF80_data[,-2],nbest=1,nvmax=15)
LF80_summary <- summary(LF80_subsets)
best_mod_number <- which.min(LF80_summary$bic)
a <- as.data.frame(LF80_summary$which[best_mod_number,])
col_names <- row.names(a)
columns_to_use <- names(LF80_data) %in% c("pheno_day", col_names[a[,1]])
LF80_data_BIC <- LF80_data[columns_to_use]
LF80_model_BIC <- lm(pheno_day ~ ., LF80_data_BIC)
# LF100:
LF100_subsets <- regsubsets(pheno_day ~ ., data=LF100_data[,-2],nbest=1,nvmax=15)
LF100_summary <- summary(LF100_subsets)
best_mod_number <- which.min(LF100_summary$bic)
a <- as.data.frame(LF100_summary$which[best_mod_number,])
col_names <- row.names(a)
columns_to_use <- names(LF100_data) %in% c("pheno_day", col_names[a[,1]])
LF100_data_BIC <- LF100_data[columns_to_use]
LF100_model_BIC <- lm(pheno_day ~ ., LF100_data_BIC)
# LAI0:
LAI0_subsets <- regsubsets(pheno_day ~ ., data=LAI0_data[,-2],nbest=1,nvmax=15)
LAI0_summary <- summary(LAI0_subsets)
best_mod_number <- which.min(LAI0_summary$bic)
a <- as.data.frame(LAI0_summary$which[best_mod_number,])
col_names <- row.names(a)
columns_to_use <- names(LAI0_data) %in% c("pheno_day", col_names[a[,1]])
LAI0_data_BIC <- LAI0_data[columns_to_use]
LAI0_model_BIC <- lm(pheno_day ~ ., LAI0_data_BIC)
# Plot pheno-day vs. precip AND pheno-day versus expected pheno-day from precip-only linear effect
# (4 plots: unregressed, 30, 60, 90)
par(mar=c(5.1,8.1,4.1,2.1))
plot(LF50_data$Precip_total_1to30days,LF50_data$pheno_day,
ylab="Pheno Day",xlab="Precipitation 1-30 days prior [mm]",
col=color_list[1],pch=20)
tmp_mod <- lm(LF50_data$pheno_day ~ LF50_data$Precip_total_1to30days)
abline(tmp_mod,col=color_list[1])
points(LF80_data$Precip_total_1to30days,LF80_data$pheno_day,
col=color_list[2],pch=20)
tmp_mod <- lm(LF80_data$pheno_day ~ LF80_data$Precip_total_1to30days)
abline(tmp_mod,col=color_list[2])
points(LF100_data$Precip_total_1to30days,LF100_data$pheno_day,
col=color_list[3],pch=20)
tmp_mod <- lm(LF100_data$pheno_day ~ LF100_data$Precip_total_1to30days)
abline(tmp_mod,col=color_list[3])
points(LAI0_data$Precip_total_1to30days,LAI0_data$pheno_day,
col=color_list[4],pch=20)
tmp_mod <- lm(LAI0_data$pheno_day ~ LAI0_data$Precip_total_1to30days)
abline(tmp_mod,col=color_list[4])
legend(x="bottomright",c("LF50","LF80","LF100","LAI0"),
col=color_list,pch=20)
par(mar=c(5.1,8.1,4.1,2.1))
plot(LF50_data$Precip_total_1to30days,LF50_data$pheno_day - LF50_model_BIC$residuals,
ylab="Predicted Pheno Day",xlab="Precipitation 1-30 days prior [mm]",
col=color_list[1],pch=20)
tmp_mod <- lm(LF50_data$pheno_day - LF50_model_BIC$residuals~ LF50_data$Precip_total_1to30days)
abline(tmp_mod,col=color_list[1])
points(LF80_data$Precip_total_1to30days,LF80_data$pheno_day - LF80_model_BIC$residuals,
col=color_list[2],pch=20)
tmp_mod <- lm(LF80_data$pheno_day - LF80_model_BIC$residuals~ LF80_data$Precip_total_1to30days)
abline(tmp_mod,col=color_list[2])
points(LF100_data$Precip_total_1to30days,LF100_data$pheno_day - LF100_model_BIC$residuals,
col=color_list[3],pch=20)
tmp_mod <- lm(LF100_data$pheno_day - LF100_model_BIC$residuals ~ LF100_data$Precip_total_1to30days)
abline(tmp_mod,col=color_list[3])
points(LAI0_data$Precip_total_1to30days,LAI0_data$pheno_day - LAI0_model_BIC$residuals,
col=color_list[4],pch=20)
tmp_mod <- lm(LAI0_data$pheno_day-LAI0_model_BIC$residuals ~ LAI0_data$Precip_total_1to30days)
abline(tmp_mod,col=color_list[4])
legend(x="bottomright",c("LF50","LF80","LF100","LAI0"),
col=color_list,pch=20)
# Plot pheno-day vs. Tmin AND pheno-day versus expected pheno-day from Tmin-only linear effect
# (4 plots: unregressed, 30, 60, 90)
# Plot pheno-day vs. Tmax AND pheno-day versus expected pheno-day from Tmax-only linear effect
# (4 plots: unregressed, 30, 60, 90)
# Plot pheno-day vs. Tmean AND pheno-day versus expected pheno-day from Tmean-only linear effect
# (4 plots: unregressed, 30, 60, 90)
# Plot pheno-day vs. GDD_0 AND pheno-day versus expected pheno-day from GDD_0-only linear effect
# (4 plots: unregressed, 30, 60, 90)
# Plot pheno-day vs. GDD_10 AND pheno-day versus expected pheno-day from GDD_10-only linear effect
# (4 plots: unregressed, 30, 60, 90)
# Plot pheno-day vs. CDD_0 AND pheno-day versus expected pheno-day from CDD_0-only linear effect
# (4 plots: unregressed, 30, 60, 90)
# Plot pheno-day vs. CDD_10 AND pheno-day versus expected pheno-day from CDD_10-only linear effect
# (4 plots: unregressed, 30, 60, 90)
# Plot pheno-day vs. photoperiod AND pheno-day versus expected pheno-day from photoperiod-only linear effect
# (4 plots: unregressed, 30, 60, 90)
# Plot pheno-day versus modeled/expected pheno-day from all met variables.
# (And determine portion of variance explained by meteorology)
# Plot pheno-day versus modeled/expected pheno-day from latitude variables.
# (And determine portion of variance explained by latitude)
## Let's make some partial regression plots (aka 'added variable plots') and some partial residual plots
loaded <- require("car")
if(!loaded){
print("trying to install package car")
install.packages("car")
loaded <- require("car")
if(loaded){
print("car installed and loaded")
library(car)
}
else {
stop("Could not install car You need to figure out how to install that manually before this function will work!")
}
}
avPlots(LF50_model_BIC)
avPlots(LF80_model_BIC)
avPlots(LF100_model_BIC)
avPlots(LAI0_model_BIC)
crPlots(LF50_model_BIC)
crPlots(LF80_model_BIC)
crPlots(LF100_model_BIC)
crPlots(LAI0_model_BIC)
dev.off()
|
fdaf4f9ba3fc723b575c1aa8ea5e5b837b607158 | 09805a36f805fdb1458cabe92b7683432f19549b | /plot4.R | 34750a543b85594a93dd107843f5595c7efbb100 | [] | no_license | joaomaiaduarte/ExData_Plotting1 | b6aebe5c98018913eeee619059188194d4088ee5 | 21b15b7c6dfcf604d86b37357b6db3f9be5d69ce | refs/heads/master | 2021-01-16T19:15:27.193299 | 2015-03-08T21:16:34 | 2015-03-08T21:16:34 | 31,862,088 | 0 | 0 | null | 2015-03-08T19:22:05 | 2015-03-08T19:22:05 | null | UTF-8 | R | false | false | 1,564 | r | plot4.R | #download and unzip file %may be commented if data
if(!file.exists("household_power_consumption.txt")){
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile = "household.zip", method = "curl")
unzip("household.zip", overwrite = T)
}
#read data
library(data.table)
data<-fread("household_power_consumption.txt", na.strings="?")
#convert character to Date
data$Date<-as.Date(data$Date,format = "%d/%m/%Y")
subsetData<-subset(data, Date %in% c(as.Date("2007/02/01",format = "%Y/%m/%d"), as.Date("2007/02/02",format = "%Y/%m/%d")))
#convert character to numeric
subsetData$Global_active_power<-as.numeric(subsetData$Global_active_power)
#convert character to time
time<-strptime(paste(subsetData$Date, subsetData$Time), format="%Y-%m-%d %H:%M:%S")
#plot data to png device
png(filename = "plot4.png", width = 480, height = 480)
par(mfcol = c(2,2))
#plot 1
plot(time,subsetData$Global_active_power,type = "l", ylab = "Global Active Power (kilowatts)", xlab = "")
#plot 2
plot(time,subsetData$Sub_metering_1,type = "l", ylab = "Energy sub metering",xlab = "")
lines(time,subsetData$Sub_metering_2, col="red")
lines(time,subsetData$Sub_metering_3, col="blue")
legend("topright", legend = c("Sub_metering_1","Sub_metering_2","Sub_metering_3"), col = c("black","red","blue"), lty="solid")
#plot 3
plot(time,subsetData$Voltage, type = "l", ylab = "Voltage", xlab = "datetime")
#plot 4
plot(time,subsetData$Global_reactive_power, type = "l", ylab = "Global_reactive_power", xlab = "datetime")
dev.off()
|
172f894458b07b5ec23bd664dc0400a27206a0bf | c639cbad1939137ae9845d7a3d4d33d60bcaa038 | /man/callExecutablePy-function.Rd | 673d4b75bcddd7c11ad918a32fea3ee303155db9 | [
"Artistic-2.0"
] | permissive | lamdv/rRice | 0fbff968d5798802726078264fb78e44655b2353 | d0261f358825fb8c5fe2399d64c2ee5de3c740fe | refs/heads/master | 2021-09-14T18:40:17.762372 | 2018-05-03T16:57:47 | 2018-05-03T16:57:47 | 107,843,725 | 0 | 3 | null | 2017-12-31T17:17:31 | 2017-10-22T06:48:22 | R | UTF-8 | R | false | true | 322 | rd | callExecutablePy-function.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/linkPython-functions.R
\name{callExecutablePy}
\alias{callExecutablePy}
\title{callExecutablePy}
\usage{
callExecutablePy()
}
\value{
nothing
}
\description{
This function calls test.py which will be allow to all python file to be
executable
}
|
776b7757d81040cf0a9da7c9688008b89bb649d1 | 77bf16c5cd8be1edfb5c62a433a2bc440b806915 | /TCGA/CombineScript.R | c02eb6dcfccf101123a98ec00b5751f6000e6c30 | [] | no_license | luederm/DKFZ | 5c558eda005fe792d2e0152bf152723b9274eb26 | bd5d71a8592da5c126048fd1faf40ba4e1065e5a | refs/heads/master | 2020-12-24T19:04:31.098790 | 2016-06-02T08:24:41 | 2016-06-02T08:24:41 | 59,029,063 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,503 | r | CombineScript.R | # @author Matthew Lueder
# @def Combines clinical and gene expression data from TCGA
# Make sure expression data was collected using HG-U133 Plus2 platform. This script uses justRMA to normalize
# Set this to base directory crreated when instaling + extracting TCGA data
setwd("C:\\Users\\Matthew\\Desktop\\DKFZ\\TCGA\\DL3")
source("http://bioconductor.org/biocLite.R")
#biocLite("affy")
library(affy)
# Download/Install annotation packages
#biocLite("hgu133plus2cdf", suppressUpdates=T)
library(hgu133plus2cdf)
#biocLite("hgu133plus2.db", suppressUpdates=T)
library(hgu133plus2.db)
#biocLite("hgu133plus2probe", suppressUpdates=T)
library(hgu133plus2probe)
# Get list of CEL files
CEL_paths <- dir('./Expression-Genes/WUSM__HG-U133_Plus_2/Level_1', full.names = T, pattern = "CEL$")
# Read CEL files and Normalize/background correct
eset <- justRMA(filenames = CEL_paths)
# Get expression data in form of a data frame
exprs <- exprs(eset)
# Assign gene names to probe ids
sid <- rownames(exprs)
sym <- unlist(mget(sid, hgu133plus2SYMBOL, ifnotfound = NA))
rownames(exprs) <- sym
rm(sid, sym)
# Create patient barcode feild from CEL file name
toPatientBarcodes <- function(CEL.names) {
nameVec <- NULL
for (name in CEL.names) {
index <- gregexpr(patter = '-', name)[[1]][3] - 1
nameVec <- c(nameVec, substr(name, 0, index))
}
return(nameVec)
}
# Changes precision: How to prevent?
combined <- rbind( exprs, "bcr_patient_barcode" = toPatientBarcodes(colnames(exprs)) )
# Read in clinical data
clinical <- read.table("./Clinical/Biotab/nationwidechildrens.org_clinical_patient_laml.txt",
sep = "\t", header = T, stringsAsFactors = F)
# Reduce to patient information TODO: Keep CDE identifiers in separate object first?
clinical <- clinical[3:nrow(clinical),]
# Recode values
clinical[clinical == "NO"] <- 0
clinical[clinical == "No"] <- 0
clinical[clinical == "YES"] <- 1
clinical[clinical == "Yes"] <- 1
clinical[clinical == "[Not Available]"] <- NA
clinical[clinical == "null"] <- NA
# Set up status and time feilds for survival analysis
status <- NULL
time <- NULL
for (i in 1:nrow(clinical)) {
if (clinical[i,"vital_status"] == "Dead") {
status <- c(status, 1)
time <- c(time, clinical[i,"death_days_to"])
}
else {
status <- c(status, 0)
time <- c(time, clinical[i,"last_contact_days_to"])
}
}
clinical$status <- status
clinical$time <- time
combined <- merge( t(combined), clinical )
colnames(clinical)
|
ff4ee4ebae81e3782b22088a23690e07b84316aa | 29585dff702209dd446c0ab52ceea046c58e384e | /dlmap/R/nmrk.dlcross.R | 5d3af847866f376395c3d676ec40aef43a0b4aaf | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 65 | r | nmrk.dlcross.R | nmrk.dlcross <- function(object, ...)
{
nmar(object$map)
}
|
cb30425d16f2ba19eb67d36da6ddd419b84e240d | c4522a72b9543374d9f6b74bd387a071490348d8 | /man/siddat.Rd | 438b3044af1f2f2b728fd48b24b74be04e6e3402 | [] | no_license | cran/SCCS | 8aab25b4cf8b2e547369a71d3b3508e25147667c | aa0e7c0a549f67ba7017712f13cdbe5e529c852b | refs/heads/master | 2022-07-07T05:02:30.814331 | 2022-07-05T13:20:09 | 2022-07-05T13:20:09 | 133,012,639 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 915 | rd | siddat.Rd | \name{siddat}
\docType{data}
\alias{siddat}
\title{Data on hexavalent vaccine and sudden infant death syndrome (SIDS)}
\description{
These simulated data comprise ages in days at hexavalent vaccination and SIDS in 300 cases.
}
\usage{siddat}
\format{A data frame containing 300 rows and 5 columns. The column names are 'case' (individual identifier), 'sta' (age on first day of the observation period), 'end' (age on last day of the nominal observation period), 'sids' (age at SIDS), 'hex' (age at first hexavalent vaccination), 'hexd2' (age at second dose), 'hexd3' (age at third dose).}
%\source{}
\references{Kuhnert R., Hecker, H., Poethko-Muller, C., Schlaud, M., Vennemann, M., Whitaker, H.J., and Farrington C. P. (2011). A modified self-controlled case series method to examine association between multidose vaccinations and death. Statistics in Medicine 30, 666-677.
}
\keyword{datasets}
|
ef267c57e42182236722e925b655b2ce0220e32a | c93106b682e33c6a46b058f6c0ae8ab54d6e7827 | /single_subject/scripts/make_conditions.R | a510f08eb93483ec4839db5459809b8cf2d0adf5 | [] | no_license | jdgryse/fROI_consistency | f3786ea677391e48ca01be6a5434d88b3f4c23ec | 003262ac37ebbc52312faff591c539a9b053d5e2 | refs/heads/master | 2020-06-01T11:24:51.906287 | 2019-06-07T15:00:20 | 2019-06-07T15:00:20 | 182,058,760 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 884 | r | make_conditions.R | ##########################################
## CONDITIONS GONZALEZ-CASTILLO
##########################################
###MAXIMIZED LR
percval <- c(60,75,90,95
kval <- c(0.88,1.5,3.68,8)
perc <- rep(percval, each=length(kval))
k <- rep(kval, times=length(percval))
conditions <- as.data.frame(cbind(perc,k))
colnames(conditions) <- c("perc","k")
write.csv(conditions,file="PATH/TO/SCRIPTS/conditions_mLR.txt",row.names=FALSE)
###ABT
percval <- c(60,75,90,95)
alphaval <- c(0.05,0.001)
betaval <- c(0.1,0.2,0.3)
perc <- rep(percval, each=length(alphaval)*length(betaval))
alpha <- rep(alphaval, times=length(betaval)*length(percval))
beta <- rep(betaval,times=length(alphaval)*length(percval))
conditions <- as.data.frame(cbind(perc,alpha,beta))
colnames(conditions) <- c("perc","alpha","beta")
write.csv(conditions,file="PATH/TO/SCRIPTS/conditions_abt.txt",row.names=FALSE)
|
79e2d6fb4b89d416fe141a7999a05ffa69478f29 | 025083fb3f57193e94ea887ad8ee16dc41ac275e | /man/missing_dates.Rd | 4d85fd6f88c5e47c675de334f1100b2e0d113eb5 | [] | no_license | bridachristian/DataQualityCheckEuracAlpEnv | 891671f84e6036c60d7733bfecce3fc9dd50ddc8 | 8941a20caf657cbc66e7f38ef5d553665457feb8 | refs/heads/master | 2020-08-26T20:17:15.864913 | 2019-10-23T15:08:38 | 2019-10-23T15:08:38 | 112,318,390 | 3 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,045 | rd | missing_dates.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/missing_dates.R
\name{missing_dates}
\alias{missing_dates}
\title{This function detect missing dates in a data.frame timeserie and fill with rows containing NAs}
\usage{
missing_dates(DATA, DATETIME_HEADER = DATETIME_HEADER,
RECORD_HEADER = RECORD_HEADER, DATETIME_SAMPLING = DATETIME_SAMPLING)
}
\description{
@param DATA data.frame having a column defined as POSIXct (datetime)
@param DATETIME_HEADER header corresponding to datetime
@param RECORD_HEADER header corresponding to record
@param DATETIME_SAMPLING time sampling (e.g. "15 min", "hour". See seq.POSIXt {base} on website)
}
\details{
@return a data.frame which contains a continuos timeseries
@export
@examples
missing_dates(DATA = mydata ,DATETIME_HEADER = "TIMESTAMP", RECORD_HEADER = "Record", DATETIME_SAMPLING = "15 min")
missing_dates(DATA = your data.frame ,DATETIME_HEADER = "your datetime header", RECORD_HEADER = "your datetime record", DATETIME_SAMPLING = "your datetime sampling")
}
|
68c9cc93af83b098d11b18acb1e5df16ab4391e6 | bc0e9a47ba0846c2ac0c04d8809883033a9dbb2a | /serie_temporelle.R | e66c307740c8d7f0668a4b67651993b7efcf03f5 | [] | no_license | Zaydy/stage_M1 | 608acdccc72b445fb100c6b0951e84c0481e08bf | 7a729c1c3f4c9eed8f8be921f63b32edbe66c566 | refs/heads/master | 2020-04-11T11:40:21.468145 | 2018-12-14T08:41:26 | 2018-12-14T08:41:26 | 161,755,885 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,870 | r | serie_temporelle.R | #### Création séries temporelles ####
# Installation et chargement des packages
if (!require("pacman")) install.packages("pacman")
pacman::p_load(pacman, data.table, dplyr, surveillance, RMySQL, stringr, ggplot2, dygraphs, xts)
## Données : SAU, DEP: Paris (75), C_AGE : 6 (tous), RS : 40 (Ischémie myocardique)
# Données hebdomadaires
# Récupération des données
# # Si données dans la BDD
# sau_sem_france_dep <- dbReadTable(con, "sau_sem_france_dep")
# Si sous forme RDS (ici le fichier RDS est la table extraite de la BDD)
sau_sem_france_dep <- readRDS(file = "./data/sau_sem_france_dep_bdd.rds")
# Extraction des données
sau_sem_france_dep <- setDT(sau_sem_france_dep)[age_class == 6 & dep == 75 & synd_group == 40]
# Calcul des proportions
sau_sem_france_dep[, proportion_sau := round(10000*nb_visits/total_actes_codes, digits = 2)]
# Création d'un objet sts
sau_sem_france_dep_sts <- sts(observed = sau_sem_france_dep$proportion_sau, # les proportions
start = c(2010, 01), # la première semaine de 2010
frequency = 52, # 52 car travail en semaine
epochAsDate = TRUE, # les valeurs numériques retournées par epoch sont transformées en date
epoch = as.numeric(as.Date(sau_sem_france_dep$d, origin = "1970-01-01"))) # transformation des dates en numérique
# Premier graphique
dygraph(data = sau_sem_france_dep_sts,
main = "Nombre de passages pour une ischémie myocardique (SAU) pour 10 000 passages codés entre janvier 2010 et décembre 2017 sur Paris (75) pour tous âges",
xlab = "Date", ylab = "Nombre de passages pour 10 000 passages codés") %>% dyRangeSelector()
# Création d'un objet Disprog (nécessaire pour les algorithmes RKI, Bayes et Farrington)
sau_sem_france_dep_disprog <- sts2disProg(sau_sem_france_dep_sts)
# Calcul du nombre de semaines
nbr_sem <- length(sau_sem_france_dep_disprog$observed)
# On indique sur combien d'années on veut créer nos alarmes
# nbr_sem - (52 * nombre_année)
# Ici on calculera sur 3 années
min_sem <- nbr_sem - (52*3)
##### RKI 3
# Application de l'algorithme RKI3
sau_sem_france_dep_rki3 <- algo.rki3(disProgObj = sau_sem_france_dep_disprog,
control = list(range = c(min_sem: nbr_sem))) # Période que l'on veut étudier
# On remplit la colonne @alarm avec les alarmes calculées par l'algorithme
sau_sem_france_dep_sts@alarm <- as.matrix(c(rep(0, times = min_sem-1), sau_sem_france_dep_rki3$alarm))
# On remplit la colonne @upperbound avec les alarmes calculées par l'algorithme
sau_sem_france_dep_sts@upperbound <- as.matrix(c(rep(0, times = min_sem - 1), sau_sem_france_dep_rki3$upperbound))
# Création de la série temporelle avec les cas observés
serie1 <- data.frame(date = as.Date(sau_sem_france_dep_sts@epoch), obs = sau_sem_france_dep_sts@observed)
serie1 <- xts(serie1$observed1, order.by = serie1$date)
# Créationd de la série temporelle avec les bornes supérieures
serie2 <- data.frame(date = as.Date(sau_sem_france_dep_sts@epoch), upp = sau_sem_france_dep_sts@upperbound)
# On enlève les bornes supérieures égales à 0
serie2 <- serie2[which(serie2$upp != 0),]
serie2 <- xts(serie2$upp, order.by = serie2$date)
# Fusion des deux séries
serie <- cbind(serie1, serie2)
names(serie) <- c("cas_obs", "borne_sup")
# Création du graphique dypgrah avec la borne supérieure de prédiction
dygraph_sau <- dygraph(serie,
main = "Nombre de passages pour une ischémie myocardique (SAU) pour 10 000 passages codés entre janvier 2010 et décembre 2017 sur Paris pour tous âges et seuil (méthode RKI3)",
xlab = "Date",
ylab = "Nombre de passages pour 10 000") %>%
dySeries("cas_obs", label = "Proportion observée") %>%
dySeries("borne_sup", label = "Borne supérieure de détection", strokePattern = "dashed") %>%
dyOptions(drawGrid = FALSE) %>%
dyRangeSelector()
dygraph_sau
# On rajoute au graphique dygraph les alarmes
for(i in seq_along(sau_sem_france_dep_sts@alarm)){
if(sau_sem_france_dep_sts@alarm[i] == 1){
dygraph_sau <- dygraph_sau %>% dyAnnotation(x = as.Date(sau_sem_france_dep_sts@epoch[i], origin = "1970-01-01"), text = "A",
tooltip = as.character(sau_sem_france_dep_sts@observed[i]))
}
}
dygraph_sau
###### Bayes 2
# Application de l'algorithme Bayes2
sau_sem_france_dep_bayes2 <- algo.bayes2(disProgObj = sau_sem_france_dep_disprog,
control = list(range = c(min_sem: nbr_sem))) # Période que l'on veut étudier
# On remplit la colonne @alarm avec les alarmes calculées par l'algorithme
sau_sem_france_dep_sts@alarm <- as.matrix(c(rep(0, times = min_sem-1), sau_sem_france_dep_bayes2$alarm))
# On remplit la colonne @upperbound avec les alarmes calculées par l'algorithme
sau_sem_france_dep_sts@upperbound <- as.matrix(c(rep(0, times = min_sem - 1), sau_sem_france_dep_bayes2$upperbound))
# Création de la série temporelle avec les cas observés
serie1 <- data.frame(date = as.Date(sau_sem_france_dep_sts@epoch), obs = sau_sem_france_dep_sts@observed)
serie1 <- xts(serie1$observed1, order.by = serie1$date)
# Créationd de la série temporelle avec les bornes supérieures
serie2 <- data.frame(date = as.Date(sau_sem_france_dep_sts@epoch), upp = sau_sem_france_dep_sts@upperbound)
# On enlève les bornes supérieures égales à 0
serie2 <- serie2[which(serie2$upp != 0),]
serie2 <- xts(serie2$upp, order.by = serie2$date)
# Fusion des deux séries
serie <- cbind(serie1, serie2)
names(serie) <- c("cas_obs", "borne_sup")
# Création du graphique dypgrah avec la borne supérieure de prédiction
dygraph_sau <- dygraph(serie,
main = "Nombre de passages pour une ischémie myocardique (SAU) pour 10 000 passages codés entre janvier 2010 et décembre 2017 sur Paris pour tous âges et seuil (méthode Bayes 2)",
xlab = "Date",
ylab = "Nombre de passages pour 10 000") %>%
dySeries("cas_obs", label = "Proportion observée") %>%
dySeries("borne_sup", label = "Borne supérieure de détection", strokePattern = "dashed") %>%
dyOptions(drawGrid = FALSE) %>%
dyRangeSelector()
dygraph_sau
# On rajoute au graphique dygraph les alarmes
for(i in seq_along(sau_sem_france_dep_sts@alarm)){
if(sau_sem_france_dep_sts@alarm[i] == 1){
dygraph_sau <- dygraph_sau %>% dyAnnotation(x = as.Date(sau_sem_france_dep_sts@epoch[i], origin = "1970-01-01"), text = "A",
tooltip = as.character(sau_sem_france_dep_sts@observed[i]))
}
}
dygraph_sau
###### Farrington
# Application de l'algorithme Farrington
sau_sem_france_dep_farr <- algo.farrington(disProgObj = sau_sem_france_dep_disprog,
control = list(range = c(min_sem: nbr_sem))) # Période que l'on veut étudier
# On remplit la colonne @alarm avec les alarmes calculées par l'algorithme
sau_sem_france_dep_sts@alarm <- as.matrix(c(rep(0, times = min_sem-1), sau_sem_france_dep_farr$alarm))
# On remplit la colonne @upperbound avec les alarmes calculées par l'algorithme
sau_sem_france_dep_sts@upperbound <- as.matrix(c(rep(0, times = min_sem - 1), sau_sem_france_dep_farr$upperbound))
# Création de la série temporelle avec les cas observés
serie1 <- data.frame(date = as.Date(sau_sem_france_dep_sts@epoch), obs = sau_sem_france_dep_sts@observed)
serie1 <- xts(serie1$observed1, order.by = serie1$date)
# Créationd de la série temporelle avec les bornes supérieures
serie2 <- data.frame(date = as.Date(sau_sem_france_dep_sts@epoch), upp = sau_sem_france_dep_sts@upperbound)
# On enlève les bornes supérieures égales à 0
serie2 <- serie2[which(serie2$upp != 0),]
serie2 <- xts(serie2$upp, order.by = serie2$date)
# Fusion des deux séries
serie <- cbind(serie1, serie2)
names(serie) <- c("cas_obs", "borne_sup")
# Création du graphique dypgrah avec la borne supérieure de prédiction
dygraph_sau <- dygraph(serie,
main = "Nombre de passages pour une ischémie myocardique (SAU) pour 10 000 passages codés entre janvier 2010 et décembre 2017 sur Paris pour tous âges et seuil (méthode Farrington)",
xlab = "Date",
ylab = "Nombre de passages pour 10 000") %>%
dySeries("cas_obs", label = "Proportion observée") %>%
dySeries("borne_sup", label = "Borne supérieure de détection", strokePattern = "dashed") %>%
dyOptions(drawGrid = FALSE) %>%
dyRangeSelector()
dygraph_sau
# On rajoute au graphique dygraph les alarmes
for(i in seq_along(sau_sem_france_dep_sts@alarm)){
if(sau_sem_france_dep_sts@alarm[i] == 1){
dygraph_sau <- dygraph_sau %>% dyAnnotation(x = as.Date(sau_sem_france_dep_sts@epoch[i], origin = "1970-01-01"), text = "A",
tooltip = as.character(sau_sem_france_dep_sts@observed[i]))
}
}
dygraph_sau
##### EARS C2
# NOTE : Cet algorithe utilise un objet STS et pas un objet Disprog
# On accède aux colonnes avec "@" et non pas avec "$"
# Application de l'algorithme EARS C2
sau_sem_france_dep_earsc2 <- earsC(sts = sau_sem_france_dep_sts,
control = list(method = "C2", range = c(min_sem: nbr_sem))) # Période que l'on veut étudier
# On remplit la colonne @alarm avec les alarmes calculées par l'algorithme
# Faire attention au "@" à l'intérieur de la fonction
sau_sem_france_dep_sts@alarm <- as.matrix(c(rep(0, times = min_sem-1), sau_sem_france_dep_earsc2@alarm))
# On remplit la colonne @upperbound avec les alarmes calculées par l'algorithme
# Faire attention au "@" à l'intérieur de la fonction
sau_sem_france_dep_sts@upperbound <- as.matrix(c(rep(0, times = min_sem - 1), sau_sem_france_dep_earsc2@upperbound))
# Création de la série temporelle avec les cas observés
serie1 <- data.frame(date = as.Date(sau_sem_france_dep_sts@epoch), obs = sau_sem_france_dep_sts@observed)
serie1 <- xts(serie1$observed1, order.by = serie1$date)
# Créationd de la série temporelle avec les bornes supérieures
serie2 <- data.frame(date = as.Date(sau_sem_france_dep_sts@epoch), upp = sau_sem_france_dep_sts@upperbound)
# On enlève les bornes supérieures égales à 0
serie2 <- serie2[which(serie2$upp != 0),]
serie2 <- xts(serie2$upp, order.by = serie2$date)
# Fusion des deux séries
serie <- cbind(serie1, serie2)
names(serie) <- c("cas_obs", "borne_sup")
# Création du graphique dypgrah avec la borne supérieure de prédiction
dygraph_sau <- dygraph(serie,
main = "Nombre de passages pour une ischémie myocardique (SAU) pour 10 000 passages codés entre janvier 2010 et décembre 2017 sur Paris pour tous âges et seuil (méthode EARS C2)",
xlab = "Date",
ylab = "Nombre de passages pour 10 000") %>%
dySeries("cas_obs", label = "Proportion observée") %>%
dySeries("borne_sup", label = "Borne supérieure de détection", strokePattern = "dashed") %>%
dyOptions(drawGrid = FALSE) %>%
dyRangeSelector()
dygraph_sau
# On rajoute au graphique dygraph les alarmes
for(i in seq_along(sau_sem_france_dep_sts@alarm)){
if(sau_sem_france_dep_sts@alarm[i] == 1){
dygraph_sau <- dygraph_sau %>% dyAnnotation(x = as.Date(sau_sem_france_dep_sts@epoch[i], origin = "1970-01-01"), text = "A",
tooltip = as.character(sau_sem_france_dep_sts@observed[i]))
}
}
dygraph_sau
|
44ed9d1d09b17428174bdfc447edcf58881c67cc | 4af0f7862da50b8a20b8554260285ad32e3840cf | /scripts/ch02/is-element.r | bd149419b5a9121b9f2d0e6d05da0d7c16830ba8 | [] | no_license | StefanoCiotti/MyProgectsFirst | aefd345971c5578dfbec7662d11c3f368d6c17b7 | 04794b634b9384da62ae6ba926fd59ca5a7d3d13 | refs/heads/master | 2020-04-03T16:35:38.941185 | 2018-10-30T14:05:55 | 2018-10-30T16:57:26 | 155,099,726 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,235 | r | is-element.r | op <- options(width = 60, stringsAsFactors = FALSE)
(begin.patients <- paste(LETTERS[1 : 6], '.', ' Smith',
sep = ''))
set.seed(21)
(begin.weight <- round(rnorm(6, 250, 25)))
begin.experiment <- data.frame(name = begin.patients,
weight = begin.weight)
begin.experiment
(middle.patients <- paste(LETTERS[7 : 9], '.', ' Smith',
sep = ''))
(middle.weight <- round(rnorm(3, 200, 20)))
middle.experiment <- data.frame(name = middle.patients,
weight = middle.weight)
middle.experiment
(end.patients <- c(sample(begin.patients, 3),
sample(middle.patients, 2)))
(end.weight <- round(rnorm(5, 100, 5)))
end.experiment <- data.frame(name = end.patients,
weight = end.weight)
end.experiment
(m <- is.element(begin.experiment$name, end.experiment$name))
(begin.end <- begin.experiment[m, ])
(p.names <- begin.experiment[m, 1])
(patients <- cbind(begin.experiment[m, ],
end.experiment[is.element(end.experiment$name, p.names), ]))
(p.names <- stack(patients[, c(1, 3)]))
(weights <- stack(patients[, c(2, 4)])[, 1])
(experiment <- data.frame(p.names, weights))
levels(experiment$ind) <- c('begin', 'end')
names(experiment)[1 : 2] <- c('name', 'time')
experiment
tapply(experiment$weights, experiment$time, mean)
|
a26a3523a7d16a35f6e08fa91db686ee0a1bb957 | fbd8b4abf8e3abefa88544b2d45decb53b061c70 | /R/lc_IP_bfs.R | 8b90134d4bdc5587bd5482aa86d12c4acfd9dadb | [
"MIT"
] | permissive | bbcon/Get-Monthly-Data-Industrial-Production-from-BFS | 9262273f215d2aa569165904ba7ba6cad2be7437 | b8d7eb2cfc34098df4091cc7d55c41bee64dc4cd | refs/heads/main | 2023-03-01T00:10:01.842028 | 2021-02-06T14:38:46 | 2021-02-06T14:38:46 | 336,550,292 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 722 | r | lc_IP_bfs.R | rm(list=ls())
library(tidyverse)
# Source functions
source("./R/utils/f.getIP.R")
#Download IP (monthly) data
f.getIP()
# Rename the file accordingly
details <- file.info(list.files(recursive = T, pattern = ".xlsx"))
details <- details[with(details, order(as.POSIXct(mtime), decreasing = T))[1],] # only keep most recent
file.rename(rownames(details),"data/IP_monthly.xlsx")
# Make it tidy
IP_data <- readxl::read_excel('data/IP_monthly.xlsx', skip =2)
colnames(IP_data)[1:8] <- c("adjustments", "adjustments_lab", "indORchange", "indORchange_lab", "nomORreal", "nomORreal_lab", "var", "var_lab")
IP_data <- IP_data %>%
tidyr::fill(names(IP_data), .direction = "down")
save(IP_data, file = "R/lc_IP_bfs.RData")
|
c10b49e42c408932bf2e1e2effd311902f395766 | e7e0ccce84c80113d7aba41458007dd42127a94c | /src/class_prep_pmsignature.R | 5c0535ea447b46b626c294e23726d2bb434518b1 | [] | no_license | halasadi/ancient-damage | ea10ea94325b66b129c1d4e9e5bf4827e5377ad2 | 51387d59d3436d796a2621d3dd72afbec48f981a | refs/heads/master | 2020-04-12T06:42:24.364980 | 2018-07-28T22:45:30 | 2018-07-28T22:45:30 | 62,754,123 | 2 | 0 | null | 2016-09-22T01:13:50 | 2016-07-06T21:20:43 | HTML | UTF-8 | R | false | false | 8,088 | r | class_prep_pmsignature.R |
####### Class preparation for pm signature ####################
#### This script is aimed at creating the MutationFeatureData class
#### from the Lindo2016 data that we shall use for pmsignature model.
signature_counts <- get(load("../summary_data/signature-counts-clubbed-Lindo2016.rda"))
signature_set <- colnames(signature_counts)
sig_split <- do.call(rbind, lapply(colnames(signature_counts), function(x) strsplit(as.character(x), split="")[[1]]))
sig_split[,1]
gsub2 <- function(pattern, replacement, x, ...) {
for(i in 1:length(pattern))
x <- gsub(pattern[i], replacement[i], x, ...)
x
}
site_left_2 <- gsub2(c("A","C","G","T"), c(1,2,3,4), x=sig_split[,1])
site_left_1 <- gsub2(c("A","C","G","T"), c(1,2,3,4), x=sig_split[,2])
site_right_1 <- gsub2(c("A","C","G","T"), c(1,2,3,4), x=sig_split[,7])
site_right_2 <- gsub2(c("A","C","G","T"), c(1,2,3,4), x=sig_split[,8])
sub_pattern <- sapply(1:dim(sig_split)[1], function(x) paste(sig_split[x,3:6], collapse=""))
from = c("C->A", "C->G", "C->T", "T->A", "T->C", "T->G")
to = c(1,2,3,4,5,6)
substitute <- gsub2(from, to, sub_pattern)
mutation_features <- sapply(1:dim(sig_split)[1],
function(m) paste0(substitute[m], ",", site_left_2[m], ",", site_left_1[m], ",", site_right_1[m], ",", site_right_2[m]))
temp <- get(load("../summary_data/signature-counts-Lindo2016.rda"))
sample_names <- rownames(temp);
lookupSampleInd <- 1:length(sample_names)
names(lookupSampleInd) <- sample_names
lookupFeatInd <- 1:length(mutation_features)
names(lookupFeatInd) <- mutation_features
w <- which(tableCount > 0, arr.ind=TRUE)
row_col_indices <- which(signature_counts > 0, arr.ind=TRUE)
out <- slam::simple_triplet_matrix(signature_counts)
out$
library(slam)
CheckCounts <- function(counts){
if(class(counts)[1] == "TermDocumentMatrix"){ counts <- t(counts) }
if(is.null(dimnames(counts)[[1]])){ dimnames(counts)[[1]] <- paste("doc",1:nrow(counts)) }
if(is.null(dimnames(counts)[[2]])){ dimnames(counts)[[2]] <- paste("wrd",1:ncol(counts)) }
empty <- row_sums(counts) == 0
if(sum(empty) != 0){
counts <- counts[!empty,]
cat(paste("Removed", sum(empty), "blank documents.\n")) }
return(as.simple_triplet_matrix(counts))
}
stm <- CheckCounts(signature_counts)
proc_count <- cbind(stm$j, stm$i, stm$v)
rownames(proc_count) <- NULL
mut_features_mat <- data.frame(cbind(substitute, site_left_2, site_left_1, site_right_1, site_right_2))
rownames(mut_features_mat) <- mutation_features
colnames(mut_features_mat)=NULL
mut_features_mat <- as.matrix(apply(mut_features_mat, c(1,2), function(x) as.numeric(x)))
type <- "independent"
flankingBasesNum = as.integer(numBases)
trDir <- FALSE
fdim <- c(6, rep(4, numBases - 1), rep(2, as.integer(trDir)))
library(pmsignature)
new(Class = "MutationFeatureData",
type = type,
flankingBasesNum = as.integer(numBases),
transcriptionDirection = trDir,
possibleFeatures = as.integer(fdim),
featureVectorList = t(mut_features_mat),
sampleList = sample_names,
countData = t(proc_count),
mutationPosition = data.frame()
)
G <- new(Class = "MutationFeatureData",
type = type,
flankingBasesNum = as.integer(numBases),
transcriptionDirection = trDir,
possibleFeatures = as.integer(fdim),
featureVectorList = t(mut_features_mat),
sampleList = sample_names,
countData = t(proc_count),
mutationPosition = data.frame()
)
out <- slot(G, "countData")
Param <- getPMSignature(G, K = 4)
save(Param, file="../rda/pmsignature_fit_K_4.rda")
visPMSignature(Param, 1)
visPMSignature(Param, 2)
visPMSignature(Param, 3)
visPMSignature(Param, 4)
omega <- slot(Param, "sampleSignatureDistribution")
library(CountClust)
annotation <- data.frame(
sample_id = paste0("X", c(1:NROW(omega))),
tissue_label = factor(c(rep("Ancient",25), rep("Modern",25)))
)
rownames(omega) <- annotation$sample_id;
StructureGGplot(omega = omega,
annotation = annotation,
palette = RColorBrewer::brewer.pal(8, "Accent"),
yaxis_label = "Development Phase",
order_sample = FALSE,
figure_title = paste0("StructurePlot: K=", dim(omega)[2],": pmsignature: with C->T/G->A"),
axis_tick = list(axis_ticks_length = .1,
axis_ticks_lwd_y = .1,
axis_ticks_lwd_x = .1,
axis_label_size = 7,
axis_label_face = "bold"))
######### Reperform topic model removing C -> T ###########
indices_noCtoT <- which(mut_features_mat[,1]!=3);
dim(signature_counts)
signature_counts_noCtoT <- signature_counts[,indices_noCtoT];
mut_features_mat_noCtoT <- mut_features_mat[indices_noCtoT,];
stm <- CheckCounts(signature_counts_noCtoT)
proc_count_noCtoT <- cbind(stm$j, stm$i, stm$v)
rownames(proc_count_noCtoT) <- NULL
G <- new(Class = "MutationFeatureData",
type = "custom",
flankingBasesNum = as.integer(numBases),
transcriptionDirection = trDir,
possibleFeatures = as.integer(fdim),
featureVectorList = t(mut_features_mat_noCtoT),
sampleList = sample_names,
countData = t(proc_count_noCtoT),
mutationPosition = data.frame()
)
Param <- getPMSignature(G, K = 4, numInit = 5)
visPMSignature(Param, 1)
visPMSignature(Param, 2)
visPMSignature(Param, 3)
visPMSignature(Param, 4)
omega <- slot(Param, "sampleSignatureDistribution")
library(CountClust)
annotation <- data.frame(
sample_id = paste0("X", c(1:NROW(omega))),
tissue_label = factor(c(rep("Ancient",25), rep("Modern",25)))
)
rownames(omega) <- annotation$sample_id;
StructureGGplot(omega = omega,
annotation = annotation,
palette = RColorBrewer::brewer.pal(8, "Accent"),
yaxis_label = "Development Phase",
order_sample = FALSE,
figure_title = paste0("StructurePlot: K=", dim(omega)[2],": pmsignature: with C->T/G->A"),
axis_tick = list(axis_ticks_length = .1,
axis_ticks_lwd_y = .1,
axis_ticks_lwd_x = .1,
axis_label_size = 7,
axis_label_face = "bold"))
###########################################################
signature_counts_noCtoT
pr <- prcomp(t(limma::voom(t(signature_counts_noCtoT))$E))
pc_data_frame <- data.frame("PC"=pr$x,
"labels"=c(rep("Ancient",25),
rep("Modern",25)))
qplot(PC.PC1, PC.PC2,
data=pc_data_frame,
colour=labels)
library(CountClust)
topics_clus <- FitGoM(signature_counts_noCtoT,
tol=0.1,
K=2:4)
save(topics_clus, file="../rda/CountClust_output_Lindo2016_without_C_to_T.rda")
topics_clus <- get(load("../rda/CountClust_output_Lindo2016_without_C_to_T.rda"));
omega <- topics_clus$clust_4$omega
annotation <- data.frame(
sample_id = paste0("X", c(1:NROW(omega))),
tissue_label = c(rep("Ancient",25),rep("Modern",25))
)
rownames(omega) <- annotation$sample_id;
StructureGGplot(omega = omega,
annotation = annotation,
palette = rev(RColorBrewer::brewer.pal(8, "Accent")),
yaxis_label = "Development Phase",
order_sample = FALSE,
figure_title = paste0("StructurePlot: K=", dim(omega)[2],", no C -> T / G -> A"),
axis_tick = list(axis_ticks_length = .1,
axis_ticks_lwd_y = .1,
axis_ticks_lwd_x = .1,
axis_label_size = 7,
axis_label_face = "bold"))
theta <- topics_clus$clust_4$theta;
sort(theta[,1])[1:10]
sort(theta[,2])[1:10]
signature_set_noCtoT <- signature_set[indices_noCtoT]
apply(ExtractTopFeatures(theta, top_features = 10, method="poisson", options="min"), c(1,2), function(x) signature_set_noCtoT [x])
|
02584f36129a4549933bd288cf3d1ae6bbafcc58 | d675b056cc4697b8b84d082dfa771c5af80260f8 | /man/jaccardOnly.Rd | 84aa6f3f134b2fa519164232e85803aba06a7c70 | [] | no_license | ADotDong/RankBindingSimilarity | a6d2ad0be61c0ccccf2713ac294b2ae865bdda1c | 43ed622f9f1bd3d8c129e4824c42f959dd29b64f | refs/heads/master | 2023-01-10T04:44:48.734634 | 2020-11-05T04:46:45 | 2020-11-05T04:46:45 | 295,546,988 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,043 | rd | jaccardOnly.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jaccardOnly.R
\name{jaccardOnly}
\alias{jaccardOnly}
\title{Ranks bed file similarities without significance}
\usage{
jaccardOnly(bed1, folder_dir)
}
\arguments{
\item{bed1}{The file path string of a query bed file to be compared to the database files.}
\item{folder_dir}{The file path string of a query bed file to be compared to the database files.}
}
\value{
A dataframe with four columns, ranked by jaccard index. Contains the name of the database file, the respective jaccard index, the proportion of intersection similarity with bed1, and proportion of intersection similarity with the database file.
}
\description{
The function compares a given query bed file with multiple bed files, and ranks the relative similarity between each file pairing, computed using jaccard indexes, where 0 has no similarities and 1 has an identical file. Will not provide significance values, but will run faster.
}
\examples{
jaccardOnly("/dir/bed1.txt","/dir/folder_dir")
}
|
0c386b4cd7fcf6a5adec676dd98f427c3b2f4a12 | 5fcf46719eaf96868b9517c0c6bfb3dd717b62e3 | /uploadHelper.R | 84e262834865b50f2d140ae5fc71ec69024f9f96 | [] | no_license | mi-erasmusmc/PredictionLibrary | d08059dcc97ddf4b8766b4961b037e7ea72bbf75 | 5ca4f08cf881a23e9d6a1b4fe950e327e8d0fc91 | refs/heads/main | 2023-04-17T21:58:25.056445 | 2021-05-03T14:59:31 | 2021-05-03T14:59:31 | 331,942,090 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,466 | r | uploadHelper.R | library(DatabaseConnector)
library(DBI)
library(odbc)
# dataLocation <- "~/git/PredictionLibrary/dataHoldingFolder/Ross D. Williamstestmodel4/"
addUserDevToDatabase <- function(dataLocation, con){
#todo: do a bunch of checks lol
inputdata <- readr::read_csv(file.path(dataLocation, 'inputdata.csv'))
target_json <- readr::read_file(file.path(dataLocation, 'target.json'))
outcome_json <- readr::read_file(file.path(dataLocation, 'outcome.json'))
colnames(inputdata) <- SqlRender::camelCaseToSnakeCase(colnames(inputdata))
# get database_id and ifnot insert and get new id
database_id <- DBI::dbGetQuery(con, paste0("SELECT database_id FROM databases WHERE database_name = '", inputdata$database_name,"';"))[,1]
if (identical(database_id, integer(0))){
database_id <- insertSqlData(table = "databases",conn = con, primaryKey = "database_id", inputdata$database_name,
inputdata$database_acronym, inputdata$database_desc, inputdata$database_type)[,1]
database_id <- DBI::dbGetQuery(con, paste0("SELECT database_id FROM databases WHERE database_name = '", inputdata$database_name,"';"))[,1]
}
# get target_id and ifnot insert and get new id
target_id <- DBI::dbGetQuery(con, paste0("SELECT cohort_id FROM cohorts WHERE cohort_name = '", inputdata$target_name,"';"))[,1]
if (identical(target_id, integer(0))){
target_id <- insertSqlData(table = "cohorts", conn = con, primaryKey = "cohort_id", inputdata$target_name, target_json)[,1]
# target_id <- DBI::dbGetQuery(con, paste0("SELECT cohort_id FROM cohorts WHERE cohort_name = '", inputdata$target_name,"';"))[,1]
}
# get outcome_id and ifnot insert and get new id
outcome_id <- DBI::dbGetQuery(con, paste0("SELECT cohort_id FROM cohorts WHERE cohort_name = '", inputdata$outcome_name,"';"))[,1]
if (identical(outcome_id, integer(0))){
outcome_id <- insertSqlData(table = "cohorts", conn = con, primaryKey = "cohort_id", inputdata$outcome_name, outcome_json)[,1]
# outcome_id <- DBI::dbGetQuery(con, paste0("SELECT cohort_id FROM cohorts WHERE cohort_name = '", inputdata$outcome_name,"';"))[,1]
}
# get researcher_id and ifnot insert and get new id
researcher_id <- DBI::dbGetQuery(con, paste0("SELECT researcher_id FROM researchers WHERE researcher_name = '", inputdata$researcher_name,"';"))[,1]
if (identical(researcher_id, integer(0))){
researcher_id <- addPlpResearcher <- insertSqlData(table = "researchers", conn = con, primaryKey = "researcher_id",
inputdata$researcher_name, inputdata$researcher_email, inputdata$researcher_affiliation)[,1]
# researcher_id <- DBI::dbGetQuery(con, paste0("SELECT researcher_id FROM researchers WHERE researcher_name = '", inputdata$researcher_name,"';"))[,1]
}
# get model
plpResult <- readRDS(file.path(dataLocation, 'plpResult.rds'))
tar <- plpResult$inputSetting$populationSettings$riskWindowEnd - plpResult$inputSetting$populationSettings$riskWindowStart
# insert model info
# TODO: fix the file insertion
model_id <- DBI::dbGetQuery(con, paste0("SELECT model_id FROM models WHERE model_name = '", inputdata$model_name,"';"))[,1]
if (identical(model_id, integer(0))){
model_id <- insertSqlData(table ="models", conn = con, primaryKey = "model_id",
model_name = "test", target_id, outcome_id, tar,
researcher_id, database_id, plp_model_file = "temp")[,1]
# model_id <- DBI::dbGetQuery(con, paste0("SELECT model_id FROM models WHERE model_name = '", inputdata$model_name,"';"))[,1]
}
#insert new result
# todo: fix original_model_id
result_id <- insertSqlData(table = "results", conn = con, primaryKey = "result_id",
model_id, researcher_id, database_id,
target_id, outcome_id, tar, inputdata$analysis_type,
as.character(plpResult$executionSummary$ExecutionDateTime),
as.character(plpResult$executionSummary$PackageVersion$packageVersion),
original_model_id = model_id)[,1]
# add prediction distribution
predDist <- plpResult$performanceEvaluation$predictionDistribution %>%
mutate(result_id = result_id)
DBI::dbAppendTable(con = con, name = "prediction_distribution", predDist)
# add covariateSummary
# covSummary <- plpResult$covariateSummary
covSummary <- plpResult$covariateSummary %>%
filter(covariateValue !=0) %>%
mutate(result_id = result_id)
DBI::dbAppendTable(conn = con, name = "covariate_summary", covSummary)
#thresholdSummary
threshSum <- plpResult$performanceEvaluation$thresholdSummary %>%
# mutate(result_id = result_id) %>%
# # maybe want to make this something other than negative numbers, 999999 and -999999
tidyr::replace_na(replace = list(predictionThreshold = -1, preferenceThreshold = -1, f1Score = -1, accuracy = -1,
sensitivity = -1, falseNegativeRate = -1, falsePositiveRate = -1, specificity = -1,
positivePredictiveValue = -1, falseDiscoveryRate = -1, negativePredictiveValue = -1, falseOmissionRate = -1,
positiveLikelihoodRatio = -1, negativeLikelihoodRatio = -1, diagnosticOddsRatio = -1)) %>%
mutate_if(is.numeric, ~ if_else(is.infinite(.x) & .x > 0,999999,.x)) %>%
mutate_if(is.numeric, ~ if_else(is.infinite(.x) & .x < 0 ,-999999,.x)) %>%
mutate(result_id = result_id)
DBI::dbAppendTable(conn = con, name = "threshold_summary", threshSum)
#calibration summary
calSum <- plpResult$performanceEvaluation$calibrationSummary %>%
mutate(result_id = result_id)
DBI::dbAppendTable(con = con, name = "calibration_summary", calSum)
# add evalutaion statistics
evalStat <- as.data.frame(plpResult$performanceEvaluation$evaluationStatistics) %>%
filter(Eval == "test") %>%
select(Metric, Value) %>%
tidyr::pivot_wider(names_from = Metric, values_from = Value) %>%
mutate(result_id = result_id)
colnames(evalStat) <- c("population_size", "outcome_count",
"AUC_auc", "AUC_auc_lb95ci", "AUC_auc_ub95ci", "AUPRC", "brier_score", "brier_scaled",
"calibration_intercept", "calibration_slope", "result_id")
nas <- which(is.na(evalStat)==TRUE) # get index of NA values
evalStat[nas] <- -1
DBI::dbAppendTable(con = con, name = "evaluation_statistics", evalStat)
# demographic summary
demSum <- plpResult$performanceEvaluation$demographicSummary %>%
mutate(result_id = result_id)
DBI::dbAppendTable(con = con, name = "demographic_summary", demSum)
#add populationSettings
popSettings <- insertSqlData(table = "population_settings", conn = con, primaryKey = "population_setting_id", model_id, jsonify::to_json(plpResult$model$populationSettings))
#add modelSettings
modelSettings <- insertSqlData(table = "model_settings", conn = con, primaryKey = "model_setting_id", model_id, plpResult$model$modelSettings$model, jsonify::to_json(plpResult$model$modelSettings))
covariateSettings <- insertSqlData(table = "covariate_settings", conn = con, primaryKey = "covariate_setting_id", model_id, jsonify::to_json(plpResult$model$metaData$call$covariateSettings))
}
addUserValToDatabase <- function(dataLocation, con){
#todo: do a bunch of checks lol
inputdata <- readr::read_csv(file.path(dataLocation, 'inputdata.csv'))
colnames(inputdata) <- SqlRender::camelCaseToSnakeCase(colnames(inputdata))
# get database_id and ifnot insert and get new id
database_id <- DBI::dbGetQuery(con, paste0("SELECT database_id FROM databases WHERE database_name = '", inputdata$database_name,"';"))[,1]
if (identical(database_id, integer(0))){
database_id <- insertSqlData(table = "databases",conn = con, primaryKey = "database_id", inputdata$database_name,
inputdata$database_acronym, inputdata$database_desc, inputdata$database_type)[,1]
database_id <- DBI::dbGetQuery(con, paste0("SELECT database_id FROM databases WHERE database_name = '", inputdata$database_name,"';"))[,1]
}
# # get target_id and ifnot insert and get new id
# target_id <- DBI::dbGetQuery(con, paste0("SELECT cohort_id FROM cohorts WHERE cohort_name = '", inputdata$target_name,"';"))[,1]
# if (identical(target_id, integer(0))){
# target_id <- insertSqlData(table = "cohorts", conn = con, primaryKey = "cohort_id", inputdata$target_name, target_json)[,1]
# # target_id <- DBI::dbGetQuery(con, paste0("SELECT cohort_id FROM cohorts WHERE cohort_name = '", inputdata$target_name,"';"))[,1]
# }
#
# # get outcome_id and ifnot insert and get new id
# outcome_id <- DBI::dbGetQuery(con, paste0("SELECT cohort_id FROM cohorts WHERE cohort_name = '", inputdata$outcome_name,"';"))[,1]
# if (identical(outcome_id, integer(0))){
# outcome_id <- insertSqlData(table = "cohorts", conn = con, primaryKey = "cohort_id", inputdata$outcome_name, outcome_json)[,1]
# # outcome_id <- DBI::dbGetQuery(con, paste0("SELECT cohort_id FROM cohorts WHERE cohort_name = '", inputdata$outcome_name,"';"))[,1]
# }
#
# get researcher_id and ifnot insert and get new id
researcher_id <- DBI::dbGetQuery(con, paste0("SELECT researcher_id FROM researchers WHERE researcher_name = '", inputdata$researcher_name,"';"))[,1]
if (identical(researcher_id, integer(0))){
researcher_id <- addPlpResearcher <- insertSqlData(table = "researchers", conn = con, primaryKey = "researcher_id",
inputdata$researcher_name, inputdata$researcher_email, inputdata$researcher_affiliation)[,1]
# researcher_id <- DBI::dbGetQuery(con, paste0("SELECT researcher_id FROM researchers WHERE researcher_name = '", inputdata$researcher_name,"';"))[,1]
}
# get model
valResult <- readRDS(file.path(dataLocation, 'validationResult.rds'))
tar <- valResult$inputSetting$populationSettings$riskWindowEnd - valResult$inputSetting$populationSettings$riskWindowStart
# insert model info
# TODO: fix the file insertion
model_id <- inputdata$model_id
model_info <- DBI::dbGetQuery(conn = con, paste0("SELECT * FROM models WHERE model_id = ", model_id))
model_info
#insert new result
# todo: fix original_model_id
result_id <- insertSqlData(table = "results", conn = con, primaryKey = "result_id",
model_id, researcher_id, database_id,
model_info$target_id, model_info$outcome_id, model_info$tar, inputdata$analysis_type,
as.character(valResult$executionSummary$ExecutionDateTime),
as.character(valResult$executionSummary$PackageVersion$packageVersion),
original_model_id = model_id)[,1]
# add prediction distribution
predDist <- valResult$performanceEvaluation$predictionDistribution %>%
mutate(result_id = result_id)
DBI::dbAppendTable(con = con, name = "prediction_distribution", predDist)
# add covariateSummary
# covSummary <- valResult$covariateSummary
covSummary <- valResult$covariateSummary %>%
filter(covariateValue !=0) %>%
mutate(result_id = result_id)
DBI::dbAppendTable(conn = con, name = "covariate_summary", covSummary)
#thresholdSummary
threshSum <- valResult$performanceEvaluation$thresholdSummary %>%
# mutate(result_id = result_id) %>%
# # maybe want to make this something other than negative numbers, 999999 and -999999
tidyr::replace_na(replace = list(predictionThreshold = -1, preferenceThreshold = -1, f1Score = -1, accuracy = -1,
sensitivity = -1, falseNegativeRate = -1, falsePositiveRate = -1, specificity = -1,
positivePredictiveValue = -1, falseDiscoveryRate = -1, negativePredictiveValue = -1, falseOmissionRate = -1,
positiveLikelihoodRatio = -1, negativeLikelihoodRatio = -1, diagnosticOddsRatio = -1)) %>%
mutate_if(is.numeric, ~ if_else(is.infinite(.x) & .x > 0,999999,.x)) %>%
mutate_if(is.numeric, ~ if_else(is.infinite(.x) & .x < 0 ,-999999,.x)) %>%
mutate(result_id = result_id)
DBI::dbAppendTable(conn = con, name = "threshold_summary", threshSum)
#calibration summary
calSum <- valResult$performanceEvaluation$calibrationSummary %>%
mutate(result_id = result_id)
DBI::dbAppendTable(con = con, name = "calibration_summary", calSum)
# add evalutaion statistics
evalStat <- as.data.frame(valResult$performanceEvaluation$evaluationStatistics) %>%
filter(Eval == "validation") %>%
select(Metric, Value) %>%
tidyr::pivot_wider(names_from = Metric, values_from = Value) %>%
mutate(result_id = result_id)
colnames(evalStat) <- c("population_size", "outcome_count",
"AUC_auc", "AUC_auc_lb95ci", "AUC_auc_ub95ci", "AUPRC", "brier_score", "brier_scaled",
"calibration_intercept", "calibration_slope", "result_id")
nas <- which(is.na(evalStat)==TRUE) # get index of NA values
evalStat[nas] <- -1
DBI::dbAppendTable(con = con, name = "evaluation_statistics", evalStat)
# demographic summary
demSum <- valResult$performanceEvaluation$demographicSummary %>%
mutate(result_id = result_id)
DBI::dbAppendTable(con = con, name = "demographic_summary", demSum)
# #add populationSettings
# popSettings <- insertSqlData(table = "population_settings", conn = con, primaryKey = "population_setting_id", model_id, jsonify::to_json(valResult$model$populationSettings))
#
# #add modelSettings
# modelSettings <- insertSqlData(table = "model_settings", conn = con, primaryKey = "model_setting_id", model_id, valResult$model$modelSettings$model, jsonify::to_json(valResult$model$modelSettings))
#
# covariateSettings <- insertSqlData(table = "covariate_settings", conn = con, primaryKey = "covariate_setting_id", model_id, jsonify::to_json(valResult$model$metaData$call$covariateSettings))
}
|
9896ebe9c0bb9ab0b229a58ad9cfc7d0155967d7 | 5e5e66108dda230d3ae4593ed36f68cf00a8b9e5 | /man/print.elephant.Rd | 0ebd2db8a44e91c3fed9ffb4f044b191bb708def | [] | no_license | moodymudskipper/elephant | a209ed32752c21c01f1b80b782d6a2293ad1b7e9 | 2ae07a4c7b3cf7dd9115a78fe434b76535e16903 | refs/heads/master | 2022-09-22T18:51:22.771303 | 2020-06-02T13:08:17 | 2020-06-02T13:16:51 | 268,387,152 | 15 | 0 | null | null | null | null | UTF-8 | R | false | true | 465 | rd | print.elephant.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/elephant.R
\name{print.elephant}
\alias{print.elephant}
\title{Print elephant object}
\usage{
\method{print}{elephant}(x, ..., print.value = TRUE)
}
\arguments{
\item{x}{an object used to select a method.}
\item{...}{further arguments passed to or from other methods.}
\item{print.value}{if \code{FALSE} only the elephant's memory is printed.}
}
\description{
Print elephant object.
}
|
2bf998a5f43599d1ba00d554f37982f873ebea58 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/bingat/examples/getNumEdges.Rd.R | 76b48c7874230fc2d731f8db8767e9b8224a4117 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 271 | r | getNumEdges.Rd.R | library(bingat)
### Name: getNumEdges
### Title: Get the Number of Edges in a Graph
### Aliases: getNumEdges
### ** Examples
data(braingraphs)
brainnodes <- getNumNodes(braingraphs, "adjMatrix")
brainedges <- getNumEdges(brainnodes, "adjMatrix")
brainedges
|
6898647884e24c0f00f3a938a29b73d591c40962 | f997169854672f36810e793a2932313f11b52139 | /R/superbarplot.R | 6bd44bee8111c95d755c477ae9a025233e2c5a27 | [] | no_license | jverzani/UsingR | 7e3fcbddae97a0ecd0268a9068af7a70ecc82907 | d1cd49622b6e85cf26710c5747423b4ba0721ef6 | refs/heads/master | 2021-01-09T20:53:56.202763 | 2020-07-29T16:53:55 | 2020-07-29T16:53:55 | 57,312,995 | 1 | 3 | null | null | null | null | UTF-8 | R | false | false | 1,273 | r | superbarplot.R | ##' extended bar plot
##'
##' @param x data
##' @param names names
##' @param names_height height of names
##' @param col color
##' @param ... passed on
##' @return NULL
##' @export
superbarplot <- function(x,
names = 1:dim(x)[2],
names_height=NULL,
col = gray(seq(.8,.5,length=dim(x)[1]/2)), ...) {
plot.bar <- function(x,min,max,width=1,...) {
alpha <- (1-width)/2
polygon(x + c(alpha,alpha,1-alpha,1-alpha,alpha),
c(min,max,max,min,min),
...) # pass in col
}
## x is a matrix with rows alternating of High, Min.
n = dim(x)[2]
m = dim(x)[1]
no.bars = dim(x)[1]/2
y.range = c(min(x),max(x))
x.range = c(1,n+1)
## setup plot
plot.new()
plot.window(xlim=x.range,ylim=y.range,
xaxt="n",
bty="n",ann=FALSE)
title(...)
for(i in 1:no.bars) {
for(j in 1:n) {
plot.bar(j,x[2*i-1,j],x[2*i,j],width=1 - i/(3*no.bars),col=col[i])
}
}
## names
if(!is.null(names)) {
## height
if(is.null(names_height)) {
f = par("yaxp")
names_height= f[1] + (f[2]-f[1])*(f[3]-1)/f[3]
}
text(0.5 + 1:n,rep(names_height,n),format(names))
}
}
|
95d0b0d32b69cb4e20ea848f57c4d9e6ac09d60c | 04aa2074e718d6d41225f07be8bbeb38bbd9e478 | /man/quick_C.Rd | c5a89669f6fcdb467f1b3436e79b38824ed5a627 | [] | no_license | cran/activegp | 510a29a02da349977980c85f77ad414f43f026ef | f553dde66e7cded8a5f5bf5c8854dff023c80b14 | refs/heads/master | 2022-07-06T13:22:57.223613 | 2022-06-27T20:00:02 | 2022-06-27T20:00:02 | 253,630,433 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,104 | rd | quick_C.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/RcppExports.R
\name{quick_C}
\alias{quick_C}
\title{Covariance of kernel computations}
\usage{
quick_C(measure, design, Ki, Kir, theta, xm, xv, ct, verbose)
}
\arguments{
\item{measure}{An integer giving the measure of integration. 0 for Lebesgue/Uniform on [0,1]^m, 1 for (truncated) Gaussian on [0,1]^m, 2 for Gaussian on R^m.}
\item{design}{matrix of design points}
\item{Ki}{The inverse covariance matrix}
\item{Kir}{The inverse covariance matrix times the response.}
\item{theta}{lengthscales}
\item{xm}{The mean vector associated with the Gaussian measure. Ignored if uniform.}
\item{xv}{The variance vector associated with the Gaussian measure (diagonal of covariance matrix, vars assumed independent). Ignored if uniform.}
\item{ct}{Covariance type, 1 means Gaussian, 2 means Matern 3/2, 3 means Matern 5/2}
}
\value{
The matrix representing the result of the integration.
}
\description{
Computes Int(kappa_i(X, design) . kappa_j(design, X)). This function is preferred for initialization
}
\keyword{internal}
|
f59bcf2b96ebea35c7a5fafae8e927395318b26e | 6a28ba69be875841ddc9e71ca6af5956110efcb2 | /Probability_Random_Variables_And_Stochastic_Processes_by_Athanasios_Papoulis_And_S_Unnikrishna_Pillai/CH8/EX8.7/Ex8_7.R | 2c9ffcabf5901dd89362c3c9de577d8670533a76 | [] | permissive | FOSSEE/R_TBC_Uploads | 1ea929010b46babb1842b3efe0ed34be0deea3c0 | 8ab94daf80307aee399c246682cb79ccf6e9c282 | refs/heads/master | 2023-04-15T04:36:13.331525 | 2023-03-15T18:39:42 | 2023-03-15T18:39:42 | 212,745,783 | 0 | 3 | MIT | 2019-10-04T06:57:33 | 2019-10-04T05:57:19 | null | UTF-8 | R | false | false | 997 | r | Ex8_7.R | #page no. 314-315
#example 8-7
#part (a)
n=6
v_cap=0.25
x1=qchisq(0.975,6) #qchisq() is the function used to calculate Chi-square percentile value in R
x2=qchisq(0.025,6) #qchisq() is the function used to calculate Chi-square percentile value in R
c1=n*v_cap/x1
c2=n*v_cap/x2
cat("(8-23) yields ",c1,"< sigma^2 <",c2,". The corresponding interval for sigma is ",sqrt(c1),"< sigma <",sqrt(c2),"V")
#there is slight difference in the values in the book and that is due to approximation
#part (b)
n=5
s=0.6
x1=qchisq(0.975,5) #qchisq() is the function used to calculate Chi-square percentile value in R
x2=qchisq(0.025,5) #qchisq() is the function used to calculate Chi-square percentile value in R
c1=(n-1)*s^2/x1
c2=(n-1)*s^2/x2
cat("(8-24) yields ",c1,"< sigma^2 <",c2,". The corresponding interval for sigma is ",sqrt(c1),"< sigma <",sqrt(c2),"V")
#there is slight difference in the values in the book and that is due to approximation |
9ccb061a87ece26ef4c8428db639c8b9c34f90a7 | b26db26e9a6b8a612dd716757a577ac05249860e | /man/formatTime.Rd | d4330e28e6bc700c9d6c047819d65abaa117910f | [] | no_license | cran/transmission | 354fbe8fda9ed30b45f17e3370fb63999801ec77 | 8834ea811501d937e7fa2047caff5c17f193b090 | refs/heads/master | 2016-09-06T06:02:10.520540 | 2014-05-15T00:00:00 | 2014-05-15T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 313 | rd | formatTime.Rd | % Generated by roxygen2 (4.0.0): do not edit by hand
\name{formatTime}
\alias{formatTime}
\title{formats proc_time as a "hh:mm:ss" time string}
\usage{
formatTime(x)
}
\arguments{
\item{x}{number of seconds.}
}
\description{
formats proc_time as a "hh:mm:ss" time string
}
\seealso{
print.proc_time, proc.time
}
|
0caad84ae555f20fc378a8cb10e764b02a321f00 | 9eb2995d4bd17766f89fdbb56b25d7c9dc7221bc | /Project/App1.R | e1bde292251683eeac547100dffac95891a271de | [] | no_license | Kuleng/CMSC-150 | 6b15e7e52863fd4614f4789de03eed4ed1962a17 | fbecea9cb51dbd4bde0699f22a11511b14402152 | refs/heads/master | 2021-01-05T01:10:51.366379 | 2020-02-16T03:31:09 | 2020-02-16T03:31:09 | 240,825,129 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,027 | r | App1.R | library(shiny)
library(shinyMatrix)
ui <- fluidPage(
pageWithSidebar(
titlePanel("CMSC 150 - Project"),
sidebarPanel(
selectInput(inputId = "Method", "Please select your choice.",
choices = c("Polynomial Regression",
"Spline Interpolation",
"Simplex Method")
),
conditionalPanel(condition = "input.Method == 'Polynomial Regression'",
fileInput(inputId="csvfile", label = "Attach CSV file",
multiple = FALSE,
accept = c("text/csv",
"text/comma-separated-values,text/plain",
".csv")),
numericInput(inputId="order", label = "Input polynomial order", value = 3),
numericInput(inputId="realX", label = "Input a real number", value =1)),
conditionalPanel(condition = "input.Method == 'Spline Interpolation'",
fileInput(inputId="csvfile2", label = "Attach CSV file",
multiple = FALSE, accept = c("text/csv", "text/comma-separated-values, text/plain",
".csv")),
numericInput(inputId="realX", label = "Input a real number", value=1)),
conditionalPanel(condition = "input.Method == 'Simplex Method'",
#INPUT CODE HERE
)
),
mainPanel(conditionalPanel(condition = "input.Method == 'Polynomial Regression'",
h2("Polynomial Function: "),
textOutput("polynomial_function"),
h2("Estimate of f(x): "),
textOutput("estimate"),plotOutput("trend")),
conditionalPanel(condition = "input.Method == 'Spline Interpolation'",
#SPLINE INTERPOLATION
),
conditionalPanel(condition = "input.Method == 'Simplex Method'",
#SIMPLEX METHOD
),
)
)
)
server<-function(input, output){
setwd('~/Desktop/CMSC150/Project')
# source('Gauss.R')
output$polynomial_function = renderText({
mainFile <- input$csvfile
PolynomialRegression(mainFile$datapath, input$order, input$realX, 1) })
output$estimate = renderText({
mainFile <- input$csvfile
PolynomialRegression(mainFile$datapath, input$order, input$realX, 2) })
# eval(parse(text= output$polynomial_function )) })
output$trend = renderPlot({
mainFile <- input$csvfile
PolynomialRegression(mainFile$datapath, input$order, input$realX, 3) } )
}
#AugCoeffMatrix
AugCoeffMatrix <-function(mainList){
#checking if unknowns are equal to the number of
numOfEq = length(mainList) #number of equations
unknowns = 0
checked = 0 #variable for equality check
loopCount = 0 #variable for equality check (loop)
varCount = 0
outerLoopCount = 0
#Checker for number of unknowns
for (a in mainList){
loopCount = 0
tempDeparse = deparse(a, width.cutoff = 400L); #deparsed function
tempCoeffs = strsplit(tempDeparse[2],split = "+", fixed = T) #splitted the string that was deparsed to elements
splitCoeffs = gsub(" ", "", tempCoeffs[[1]], fixed = T)
splittedCoeffs = strsplit(splitCoeffs, split = "*", fixed = T) #Splitted Coefficient and Variables
while ( loopCount < length(splittedCoeffs) - 1){
loopCount = loopCount + 1
if (as.numeric(substring(splittedCoeffs[[loopCount]][2],2)) > unknowns){
unknowns = as.numeric(substring(splittedCoeffs[[loopCount]][2],2))
}
}
}
unknowns = unknowns + 1 #increment for x0 count
if (unknowns <= numOfEq){
# print("Number of equations and unknowns are equal.")
} else{
# print("Number of equations and unknowns are not equal.")
}
#create a list of all the unknown variables
mainVariables = (c())
a = 0
while(a<unknowns){
a = a + 1
mainVariables[a] = paste("x", a-1, sep="")
}
#initialize mainData
mainData = list(variables = mainVariables, augcoeffmatrix = matrix(0L, byrow = TRUE, nrow = numOfEq, ncol = unknowns+1,dimnames = list(c(1:numOfEq),c(mainVariables,"RHS"))))
for (a in mainList){
outerLoopCount = outerLoopCount + 1
loopCount = 0
tempDeparse = deparse(a, width.cutoff = 200L); #deparsed function
tempCoeffs = strsplit(tempDeparse[2],split = "+", fixed = T) #splitted the string that was deparsed to elements
splitCoeffs = gsub(" ", "", tempCoeffs[[1]], fixed = T)
splittedCoeffs = strsplit(splitCoeffs, split = "*", fixed = T) #Splitted Coefficient and Variables
#split all the elements and get their respective values via indexing, negate all the right hand side
while (loopCount < length(splittedCoeffs)){
loopCount = loopCount + 1
for (var in mainVariables){ #loop the coefficients variable to all unknowns that was initialized.
if(is.na(splittedCoeffs[[loopCount]][2])){ #if there is no variable, it will automatically be set to the right hand side
mainData$augcoeffmatrix[outerLoopCount,unknowns+1] = (-1)*as.numeric(splittedCoeffs[[loopCount]][1])
} else if(splittedCoeffs[[loopCount]][2] == var){ #if the variable is equal to the looped variable in the vector of unknowns, it will replace the 0s in the array
mainData$augcoeffmatrix[outerLoopCount,as.numeric(substring(var,2,2))+1] = as.numeric(splittedCoeffs[[loopCount]][1])
}
}
}
}
return (mainData)
}
#GaussJordan Function
GaussJordan<-function(newList){
numOfEq = length(newList); #number of equations
newMatrix = AugCoeffMatrix(newList) #Augmented Coefficient Matrix
unknowns = length(newMatrix$variables) #number of unknown variables
variables = c(newMatrix$variables) #creates a vector of the variables
variableValues = rep(0, (unknowns)) #creates a vector of zeroes corresponding to variables
#print(newMatrix$augcoeffmatrix)
for(pivotIteration in 1:(unknowns)){ #loops through the rows of the system based on number of unknowns
max = 0
pivotRow = 0
PivotElement = 0;
for (i in pivotIteration:(numOfEq)){#loop for the number of equations
#get the maximum value on the ith column
if (max <= abs(newMatrix$augcoeffmatrix[i,pivotIteration])){
max = abs(newMatrix$augcoeffmatrix[i,pivotIteration])
pivotRow = i
}
}
#swap first row and the new pivot row with max
if (pivotRow != pivotIteration){
temp = newMatrix$augcoeffmatrix[pivotIteration,];
newMatrix$augcoeffmatrix[pivotIteration,] = newMatrix$augcoeffmatrix[pivotRow,]
newMatrix$augcoeffmatrix[pivotRow,] = temp
}
# cat("\n\nResult of pivot: \n")
# print(newMatrix$augcoeffmatrix)
#Pivot element before normalization
PivotElement = newMatrix$augcoeffmatrix[pivotIteration,pivotIteration] #pivot element in main diagonal
if(PivotElement == 0){
# print("Division by zero error. No solution.")
return(variableValues)
}
newMatrix$augcoeffmatrix[pivotIteration,] = newMatrix$augcoeffmatrix[pivotIteration,]/PivotElement
#pivot element after normalization
PivotElement = newMatrix$augcoeffmatrix[pivotIteration,pivotIteration]
# cat("\n\nResult of Normalization: \n")
# print(newMatrix$augcoeffmatrix)
for (upperTriangle in 1:(numOfEq)){ #looping all values to be eliminated to create an identity matrix
if(upperTriangle != pivotIteration){ #ignore the values in the main diagonal and RHS
VTBE = newMatrix$augcoeffmatrix[upperTriangle,pivotIteration]
multiplier = VTBE/PivotElement
#temporary storage of pivot row
tempVector = c(newMatrix$augcoeffmatrix[pivotIteration,])
tempVector = tempVector*multiplier #M x PivotRow
#row - vector(MxPivotRow) loop
for(element in 1:(unknowns+1)){
newMatrix$augcoeffmatrix[upperTriangle, element] = newMatrix$augcoeffmatrix[upperTriangle, element] - (tempVector[element])
}
# cat("\n\nCurrent pivot row: ")
# cat(newMatrix$augcoeffmatrix[pivotIteration,])
# cat("\nPivot element: ", PivotElement,"\n")
# cat("Value to be eliminated: ", VTBE, "\n")
# cat("Vector: \n ")
# print(tempVector)
# cat("\n")
# cat("\nResulting matrix: \n")
# print(newMatrix$augcoeffmatrix)
# cat("\n")
}
}
}
for(i in 1:(unknowns)) #input results to a vector
variableValues[i] = newMatrix$augcoeffmatrix[i,(unknowns+1)]
#create new list for results
results = list(variables = newMatrix$variables, augcoeffmatrix = newMatrix$augcoeffmatrix, values = variableValues)
#print(variableValues)
return(results$values)
}
PolynomialRegression<-function(csvfile, degree, realX, int){
mydata = read.csv(file = csvfile)
x = mydata[,1]
y = mydata[,2]
mainList = list(x,y)
#create the main labeled list
mainData = list(augcoeffmatrix = matrix(0L, byrow = TRUE, nrow = degree+1, ncol = degree+2), unknowns = c(), polynomialString = "function(x) ", polynomialFunction = "function(x) ")
#Set up aug coeff matrix
for(row in 1:(degree+1)){
for(col in 1:(degree + 2)){
if (col != (degree + 2))
mainData$augcoeffmatrix[row,col] = sum(mainList[[1]]^((row-1) + (col-1))) #if the coeff is not in the right hand side
else
mainData$augcoeffmatrix[row,col] = sum(mainList[[2]] * mainList[[1]]^(row-1)) #if the coeff is in the RHS
}
}
#get the unknowns
mainData$unknowns = GaussJordan(mainData$augcoeffmatrix)
#getting the qModel (polynomial Model)
if(degree == 1)
qModel = lm(y~x)
else
qModel = lm(y~poly(x,degree,raw = T))
#plot the function
plot_f <- plot(mainList[[1]] , mainList[[2]], pch = 20, col = "red", main = "Function plot", xlab = "X", ylab = "Y")
#set lines
coordinates = lines(x,predict(qModel), col="blue")
#manipulate polynomial String from right to left, if already in the left most, do not add any string manipulated variables and operations
for(degCount in (degree+1):1){
if(degCount == 1)
mainData$polynomialString = paste(mainData$polynomialString, qModel$coefficients[[degCount]])
else{
mainData$polynomialString = paste(mainData$polynomialString, qModel$coefficients[[degCount]], " * x ^ ", (degCount-1) ," + ", sep = "", collapse = NULL)
}
}
#create the function through evaluation and parsing functions
#print(mainData$polynomialString)
mainData$polynomialFunction = eval(parse(text = mainData$polynomialString))
#print(mainData$polynomialFunction)
if(int == 1)
return(mainData$polynomialString)
else if(int == 2)
return(mainData$polynomialFunction(realX))
else
return(plot_f)
}
shinyApp(ui= ui, server= server) |
d841be51d8ad9178c7e1a3fa3b8aeb14d91a28c3 | ed437dfb67dda8ad95028298d6f9f922f4c0c5f9 | /man/rvCoeff.Rd | 83f07cecece4e6a9957b7937a64d860b6497a7eb | [] | no_license | cran/ExPosition | bda478275dce86b90750cac4bf6eda1f81c5c091 | 78d16a34a44e4a99231a4e11b132500e952b3123 | refs/heads/master | 2021-05-16T03:10:05.982127 | 2019-01-07T16:00:41 | 2019-01-07T16:00:41 | 17,679,061 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 722 | rd | rvCoeff.Rd | \name{rvCoeff}
\alias{rvCoeff}
\encoding{UTF-8}
\title{
Perform Rv coefficient computation.
}
\description{
Perform Rv coefficient computation.
}
\usage{
rvCoeff(Smat, Tmat, type)
}
\arguments{
\item{Smat}{A square covariance matrix}
\item{Tmat}{A square covariance matrix}
\item{type}{DEPRECATED. Any value here will be ignored}
}
\value{
A single value that is the Rv coefficient.
}
\references{
Robert, P., & Escoufier, Y. (1976). A Unifying Tool for Linear Multivariate Statistical Methods: The RV-Coefficient. \emph{Journal of the Royal Statistical Society. Series C (Applied Statistics)}, \emph{25}(3), 257--265.}
\author{
Derek Beaton
}
\keyword{ misc }
\keyword{ multivariate }
|
cba8a744f2f2cd83bed4289d5337b1c689a0a3b2 | d74208b48e1595366435acfe90584036e89dd88e | /man/getNlTileTifLclNamePathVIIRS.Rd | e88f9fa9b340c2735c6cd65094e321437e5c4351 | [] | no_license | mjdhasan/Rnightlights | 85feaac20d8ed20429d95a41b59fef59e23a4cfa | f34fd986a405b1ca51d9a807849d2274f8e22d22 | refs/heads/master | 2022-11-06T18:50:41.533156 | 2020-06-26T12:11:28 | 2020-06-26T12:11:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,027 | rd | getNlTileTifLclNamePathVIIRS.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getnlfilename.R
\name{getNlTileTifLclNamePathVIIRS}
\alias{getNlTileTifLclNamePathVIIRS}
\title{Constructs the full path used to save/access the decompressed VIIRS .tif file}
\usage{
getNlTileTifLclNamePathVIIRS(nlType = "VIIRS.M",
configName = pkgOptions(paste0("configName_", nlType)), nlPeriod,
tileNum)
}
\arguments{
\item{nlType}{The particular VIIRS type e.g. VIIRS.D for daily VIIRS}
\item{configName}{character the type of raster being processed}
\item{nlPeriod}{the yearMonth in which the tile was created}
\item{tileNum}{the index of the tile as given in nlTileIndex}
}
\value{
a character vector filename of the .tif VIIRS tile
}
\description{
Constructs the full path used to save/access the decompressed VIIRS .tif file
}
\examples{
#using default dirNlTiles
\dontrun{
Rnightlights:::getNlTileTifLclNamePathVIIRS(nlType = "VIIRS.M", nlPeriod = "201401", tileNum = "1")
#returns "/dataPath/tiles/VIIRS_2014_01_75N180W.tif"
}
}
|
2a9e202baeb3adf46e958997b8bdbcbfb28894dc | a178023912345712bc876aedf70fa38df7f3202f | /OPIIF_Proc/OPIIF_Proc.R | 469da9e05ed99b6080b145df522892102b6338ca | [
"MIT"
] | permissive | IFFranciscoME/OPIIF-P16 | 256971f3fd137a233579a450d03fd61e3c4ea9b9 | aa0313a80241a7e7f87a3582343410db0fd8323d | refs/heads/master | 2021-01-20T21:46:30.700910 | 2016-02-14T20:58:52 | 2016-02-14T20:58:52 | 52,220,326 | 1 | 0 | null | 2016-02-21T18:14:00 | 2016-02-21T18:14:00 | null | UTF-8 | R | false | false | 2,247 | r | OPIIF_Proc.R |
# ------------------------------------------------------------------------------------ #
# -- Initial Developer: FranciscoME ----------------------------------------------- -- #
# -- Code: MachineTradeR Main Control --------------------------------------------- -- #
# -- License: MIT ----------------------------------------------------------------- -- #
# ------------------------------------------------------------------------------------ #
# -------------------------------------------------- Matriz Mensual de Estadisticas -- #
EstadMens <- function(DataEnt,YearNumber, MonthNumber) {
DfRends <- DataEnt
NumActivos <- length(DfRends[1,])-1
Years <- unique(year(DfRends$Index))
Months <- unique(month(DfRends$Index))
EstadMens <- data.frame(matrix(ncol = NumActivos+1, nrow = 5))
row.names(EstadMens) <- c("Media","Varianza","DesvEst","Sesgo","Kurtosis")
NvosDatos <- DfRends[which(year(DfRends$Index) == Years[YearNumber]),]
NvosDatos <- NvosDatos[which(month(NvosDatos$Index) == Months[MonthNumber]),]
colnames(EstadMens)[1] <- "Fecha"
EstadMens$Fecha <- NvosDatos$Index[length(NvosDatos$Index)]
EstadMens[1,2:length(EstadMens[1,])] <- round(apply(NvosDatos[,2:length(NvosDatos[1,])],
MARGIN=2,FUN=mean),4)
EstadMens[2,2:length(EstadMens[1,])] <- round(apply(NvosDatos[,2:length(NvosDatos[1,])],
MARGIN=2,FUN=var),4)
EstadMens[3,2:length(EstadMens[1,])] <- round(apply(NvosDatos[,2:length(NvosDatos[1,])],
MARGIN=2,FUN=sd),4)
EstadMens[4,2:length(EstadMens[1,])] <- round(apply(NvosDatos[,2:length(NvosDatos[1,])],
MARGIN=2,FUN=skewness),4)
EstadMens[5,2:length(EstadMens[1,])] <- round(apply(NvosDatos[,2:length(NvosDatos[1,])],
MARGIN=2,FUN=kurtosis),4)
colnames(EstadMens)[2:length(EstadMens[1,])] <- colnames(DfRends[2:(NumActivos+1)])
return(EstadMens)
}
Resultado <- EstadMens(DataEnt=DfRendimientos, YearNumber=2, MonthNumber=2)
# ---------------------------------------------------------- N Aleatorios Markowitz -- #
|
993e148b79f3e8efcac560a53520e7e35809c286 | 4b4d35d45f12ee0d0605db27a6c9328d93e06c52 | /20171010/regrmod_1_st.R | 2dd7c7c1167cf95174e02cd7ab741a77cc0efad9 | [] | no_license | bebosudo/ML-DA | e1332bcf31f27e4f7726c1912396ebbe27efb4a3 | 867adfd39eb3f1c6019247ddca4c9744764cd2f7 | refs/heads/master | 2022-04-18T06:13:33.496412 | 2020-03-30T21:09:11 | 2020-03-30T21:09:11 | 106,531,327 | 1 | 1 | null | 2020-03-30T21:09:13 | 2017-10-11T09:08:05 | HTML | UTF-8 | R | false | false | 1,050 | r | regrmod_1_st.R | # "regression toward the mean"
# From one generation to the next the heights moved closer to the average or regressed toward the mean.
# From the Galton study on hereditary height
# Sir Francis Galton 's parent/child height data
# from http://wienformulaer.math.csi.cuny.edu/UsingR/
# Load the library (UsingR) and data (galton) needed
# Explore graphically the relationship between parent/child heights
# Note that there are more observations for each point.
# Make the graph more informative on this respect (hint: jitter data; point size dependent on numerosity)
# Fit a LS line with "lm"
?formula
regrline <- lm( ~ , galton)
# add the regression line to the plot with "abline".
# Accuracy of estimation is gauged by theoretical techniques and expressed in terms of "standard error."
# Look at R summary on estimation.
# From the output of "summary" what is the slope of the regression line?
# What is the standard error of the slope?
# Plot the line that would result if there weren't the "regression to the mean" phenomenon
|
9a0f9fa8f922cdf1c87eaef1b13ec5ff6c44cbfb | f5bd80b992420f43bbd62e9b4db3cb9d08d28c1a | /code/explore/S.coelicolor Superhost Data Exploration.R | b189b03bc6b42bb736e4fb5dc1723e7e07b4729c | [] | no_license | franciscozorrilla/S.coelicolor | 3309f6cd236527c85b206f9df8358f3a35950515 | 7a5d7ac832009f5ed626ed426cd80409b205f5d4 | refs/heads/master | 2021-05-03T05:10:22.626597 | 2018-03-05T14:06:05 | 2018-03-05T14:06:05 | 118,149,679 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,125 | r | S.coelicolor Superhost Data Exploration.R |
## S.coelicolor Superhost Data Exploration
# Author: Francisco Zorrilla
# PACKAGES
source("https://bioconductor.org/biocLite.R")
biocLite(c("limma", "edgeR", "piano", "biomaRt", "Glimma"))
install.packages(c('tidyverse', 'RColorBrewer','snowfall','tibble'))
library(limma)
library(edgeR)
library(tidyverse) # Collection of useful R-packages
library(RColorBrewer)
library(biomaRt)
library(Glimma)
library(piano)
library(snowfall)
library(readr) #Need this library to load/read .csv files
library(tibble)
#CLEAR ENVIRONMENT
rm(list = ls())
#IMPORT DATA
setwd("C:/Users/zorrilla/Documents/GitHub/S.coelicolor/data/raw_internal/RNAseq") #Go to data folder
rawCountData_M1152 <- read_csv("rawCountData_M1152.csv") #These two lines give an error/warning message about a missing
rawCountData_M145 <- read_csv("rawCountData_M145.csv") #column name, but they seem to work fine.
setwd("C:/Users/zorrilla/Documents/GitHub/S.coelicolor/code/explore") #Go back to code explore folder
#CHECK DATA
head(rawCountData_M1152) #Just to make sure data was properly imported/nothing weird happened
head(rawCountData_M145)
dim(rawCountData_M1152) #Check dimensions of count matrices, should be equal
dim(rawCountData_M145)
#MAKE SOME COSMETIC MODIFICATIONS
row.names(rawCountData_M1152)=rawCountData_M1152$X1 #Set row names to gene names. These two lines give a warning
row.names(rawCountData_M145)=rawCountData_M145$X1 #message about tibble depreciation?? They work fine.
rawCountData_M1152$X1 <- NULL # delete columns with gene names
rawCountData_M145$X1 <- NULL
rawCounts = cbind(rawCountData_M145,rawCountData_M1152) #Create single dataframe combining raw counts
#of the two strains
remove(rawCountData_M1152,rawCountData_M145)
View(rawCounts) #Check that new data frame looks good
#Columns 1-9 contain 9 time points P(6,14,18,22,26,30,34,38,42) of F516 sample (WT)
#Columns 10-18 contain 9 time points P(6,14,18,22,26,30,34,38,42) of F517 sample (WT)
#Columns 19-27 contain 9 time points P(6,14,18,22,26,30,34,38,42) of F518 sample (WT)
#Columns 28-36 contain 9 time points P(18,26,30,34,38,42,46,50,51) of F519 sample (MUT)
#Columns 37-45 contain 9 time points P(18,26,30,34,38,42,46,50,51) of F521 sample (MUT)
#Columns 46-54 contain 9 time points P(18,26,30,34,38,42,46,50,51) of F522 sample (MUT)
# Move data into DGEList, which can store additional metadata
x <- DGEList(counts = rawCounts, genes = rownames(rawCounts))
# Add the grouping information (and enforce the order of the samples).
group <- factor(rep(1:9,6),levels = c("1","2","3","4","5","6","7","8","9"))
x$samples$group <- group
strain <- factor(c(rep("WT",27),rep("MUT",27)),levels = c("WT","MUT"))
x$samples$strain <- strain
#NORMALIZATION USING TMM
x <- calcNormFactors(x, method = "TMM") #do before filtering, violates assumption that downreg genes=upreg genes
#lcpmTMM = cpm(x,normalized.lib.sizes= TRUE, log=TRUE) #not necessary, non standard, need to justify reason for donig this
#FILTERING STEP
cpm <- cpm(x,normalized.lib.sizes= TRUE) #i think cpm() uses norm.lib.sizes by default
#cpm2 <- cpm(x) #should there be a diffrence between cpm and cpm2?
#lcpm <- cpm(x,normalized.lib.sizes= TRUE, log=TRUE) #use logcpm for filtering, not normalization. Should i use this?
keep.exprs <- rowSums(cpm > 1) >= 3 #Here we apply a filter that states that for each
# gene at least 3 of the samples should have a CPM value higher than 1.
#is it better to filter using lcpm or cpm?
x <- x[keep.exprs,, keep.lib.sizes = FALSE] #Consider changing this filtering step to account for fact that
dim(x) #we know that gene expression in the MUT will be low for the
#deleted genes. ie: filter based on WT and non-deleted MUT genes
# Visualize effect of TMM, so do I need to use CPM to incorporate TMM?? I thought it was an alternative
lcpmTMM = cpm(x,normalized.lib.sizes= TRUE, log=TRUE)
lcpm2 = cpm(x,normalized.lib.sizes= FALSE, log=TRUE)
boxplot(lcpm2)
title(main = "logCPM")
boxplot(lcpmTMM)
title(main = "logCPM w/TMM")
remove(lcpm2)
#note, if using CPM or logCPM make sure it is recalculated after filtering
# PCA -ED
# Good to see if there are any outliers in the data: cant really tell from plot A tho
par(mfrow = c(1, 2))
col.group <- group
levels(col.group) <- brewer.pal(nlevels(col.group), "Set1")
col.group <- as.character(col.group)
x11();plotMDS(lcpmTMM, labels = group, col = col.group) #use TMM not lcmp, use spearman
title(main = "A. Color based on timegroup")
col.group = c(rep('#E41A1C',27),rep('#377EB8',27))
x11();plotMDS(lcpmTMM, labels = group, col = col.group, dim = c(1, 2))
title(main = "B. Color based on strain")
#one of the time point 9 samples from the mutant strain looks like it may be an outlier
#especially considering the general "walk" pattern we see where the smaller numbers tend to be
#lower than the bigger numbers
# Look interactively with Glimma
glMDSPlot(lcpmTMM, labels = strain, groups = x$samples$strain,
launch = T) #plot is good but table is kind of wonky/doesnt display information besides strain
# PCA - Benj
require(ade4)
#Transpose the data
countsForPCA = t(lcpmTMM)
#Perform PCA on the counts
pcar <- dudi.pca(countsForPCA, center = TRUE, scale = FALSE, scannf = FALSE, nf=10)
#Check how much of the total variance each principal component accounts for:
var <- pcar$eig/sum(pcar$eig)*100
plot(var, type = 'b')
#Plot factorial map with representation of observations in the 1st 2 components:
x11();s.class(pcar$li[,1:2], strain, cpoint = 1, col = c('blue','red'))
s.class(pcar$li[,2:3], strain, cpoint = 1, col = c('blue','red'))
# LINEAR MODELS
# strainTest <- c(rep(1,3),rep(0,3))
# t1 <- rep(c(1,0,0,0,0,0,0,0,0),6)
# t2 <- rep(c(0,1,0,0,0,0,0,0,0),6)
# t3 <- rep(c(0,0,1,0,0,0,0,0,0),6)
# t4 <- rep(c(0,0,0,1,0,0,0,0,0),6)
# t5 <- rep(c(0,0,0,0,1,0,0,0,0),6)
# t6 <- rep(c(0,0,0,0,0,1,0,0,0),6)
# t7 <- rep(c(0,0,0,0,0,0,1,0,0),6)
# t8 <- rep(c(0,0,0,0,0,0,0,1,0),6)
# t9 <- rep(c(0,0,0,0,0,0,0,0,1),6)
# design <- model.matrix(~ strain ) #only strain, no time component
# design <- model.matrix(~ strain + t2 + t3 + t4 + t5 + t6 + t7 + t8 + t9) #no interaction
# design <- model.matrix(~ strain * t1 * t2 * t3 * t4 * t5 * t6 * t7 * t8 * t9) #dont use this, it creates
#weird interaction terms
t1 <- rep(c(1,0,0,0,0,0,0,0,0),6)
t2 <- rep(c(0,1,0,0,0,0,0,0,0),6)
t3 <- rep(c(0,0,1,0,0,0,0,0,0),6)
t4 <- rep(c(0,0,0,1,0,0,0,0,0),6)
t5 <- rep(c(0,0,0,0,1,0,0,0,0),6)
t6 <- rep(c(0,0,0,0,0,1,0,0,0),6)
t7 <- rep(c(0,0,0,0,0,0,1,0,0),6)
t8 <- rep(c(0,0,0,0,0,0,0,1,0),6)
t9 <- rep(c(0,0,0,0,0,0,0,0,1),6)
design <- model.matrix(~ strain * ( t2 + t3 + t4 + t5 + t6 + t7 + t8 + t9))
design
#daniel: try making design matrix with one term for time, with differnt levels
#if we do this then we cant get interaction terms right?
x <- estimateDisp(x, design) # Calculate dispersion using x and design model
mfit <- glmQLFit(x, design) # Fit data to linear model
head(mfit$coefficients) # Inspect the coefficients that are found
# Perform DE analysis per factor
de <- glmLRT(mfit,coef = 2) #is this legal to put groups of coefficients at the same time? or do i have to do 1 at a time
#how does choice of coeffcients influence pvalues?
topTags(de) #is the fold change relative to MUT or WT?
tt <- topTags(de, n = Inf) #store top DE genes in list tt
genes <- tt$table$genes[tt$table$FDR < 0.001]
#plotMDS(lcpmTMM[genes, ], labels = group, col = col.group) #not needed
#look at interaction terms
################################do tSNE
library(Rtsne)
##### GSA
## Download GO terms from Uniprot, S. coelicolor 'proteome': http://www.uniprot.org/uniprot/?query=proteome:UP000001973
## Use columns 'Gene names (ordered locus)' and 'Gene ontology (GO)'
## Save as tab-delimited
GO<-read.delim('uniprot-proteome%3AUP000001973.tab')
GO<-GO[,-1] # Remove 'Entry' column
GO<-GO[!GO$Gene.names...ordered.locus..=='',] # Remove rows without gene name
colnames(GO)<-c('gene','GO') # Rename columns
# install.packages('splitstackshape') # Run this installation command if the package is not installed yet.
library(splitstackshape)
GO<-cSplit(GO,"GO",sep=';',direction='long') # Split the GO column, separated by ';', and make a long list
head(GO)
library(piano)
GO<-loadGSC(GO) #ignore warning?
fc <- subset(tt$table,select = "logFC")
p <- subset(tt$table,select = "FDR")
##############BEN
resGSA <- runGSA(geneLevelStats=p, #We supply our p-values
directions=fc, #We supply the fold-changes
geneSetStat="reporter", #We decided the method
signifMethod="geneSampling", #We choose the statistical distribution against which we measure our confidence
adjMethod="fdr", #We adjust the gene-set p-values for multiple testing
gsc=GO, #We supply which gene-sets to test
ncpus=4) #We decide the number of processor cores to run the calculations
windows()
GSAheatmap(resGSA,cutoff =10,adjusted = T,ncharLabel = Inf, cellnote='pvalue') #cutoff of 9 - 10 is readable
#dev.off()
#################ED
gsaRes <- runGSA(p, fc, gsc = GO, gsSizeLim = c(10, 400))
x11();GSAheatmap(gsaRes, cutoff = 5, cellnote = "nGenes",ncharLabel = Inf)
#BEN/ED GSAs give me different heatmaps: numbers are different(FCs?) and the GO terms
#are also somewhat differrent. which one should i use and what parameters are causing
#these differences? Why did Ed narrow down the gsSizeLim to c(10,400)?
#How should I determine the parameters to set for geneSetStat and signifMethod?
#how should i run GSA in order to determine DE expression between time points?
#start from line de <- glmLRT(mfit,coef = 2) but change coef to an interaction term coef |
1d988f3bb991957ca00cc687efeae4afe03877a1 | 259fe6446e0f059be228f95745db1aa54ad5ce31 | /R/2-layers-DeepTRIAGE.R | ff937318597d931e94c86cd89981457ddbf6baa4 | [] | no_license | tpq/caress | 9fd1c306e8f6bb23f88203f6e6329a72d4689aaa | 04386b3ab61ef9036e91ab1bbd6e42a1265b5ea9 | refs/heads/master | 2021-06-24T08:16:31.155396 | 2021-03-03T03:34:27 | 2021-03-03T03:34:27 | 202,971,472 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,826 | r | 2-layers-DeepTRIAGE.R | #' Apply a DeepTRIAGE Layer
#'
#' This function applies a variant of the DeepTRIAGE attention
#' mechanism to the incoming layer (see DOI:10.1101/533406).
#' This implementation differs slightly from the publication
#' in that all layers have the same activation function and
#' the random embedding weights are optionally learnable.
#'
#' @inheritParams layer_pseudo_embed
#' @param result_dim An integer. The size of the final layer.
#' @param hidden_dim An integer. The size of the hidden layers.
#' @param hidden_activation A string. The activation for the hidden layers.
#' @param hidden_dropout A numeric. The dropout for the hidden layers.
#' @export
layer_to_dense_DeepTRIAGE <- function(object, result_dim,
embed_dim = result_dim*4, random_embedding = FALSE,
hidden_dim = 32, hidden_activation = "tanh", hidden_dropout = .5,
name = NULL){
# Name layer based on incoming data
if(is.null(name)){
name <- get_incoming_layer_name(object)
}
# Embed the data (N x D x M) [M << D]
embedded_data <- object %>%
layer_pseudo_embed(embed_dim, random_embedding = random_embedding, name = name)
# Compress the embedded data (N x D x P) [P << M]
compressed_data <- embedded_data %>%
layer_dense(units = result_dim, activation = hidden_activation, name = paste0(name, "_3D_compressor")) %>%
layer_dropout(rate = hidden_dropout, name = paste0(name, "_dropout_c"))
# Calculate importance (N x D x 1)
input_dim <- unlist(dim(object)[-1])
importance_scores <- embedded_data %>%
layer_dense(units = hidden_dim, activation = hidden_activation, name = paste0(name, "_hidden_i")) %>%
layer_dropout(rate = hidden_dropout, name = paste0(name, "_dropout_i")) %>%
layer_dense(units = 1, name = paste0(name, "_3D_importance_raw")) %>%
# totally softmax the entire (N x D x 1) layer -> then, reshape it back to (N x D x 1)
layer_flatten(name = paste0(name, "_importance_raw")) %>%
layer_activation_softmax(name = paste0(name, "_importance_scores")) %>%
layer_reshape(c(input_dim, 1), name = paste0(name, "_3D_importance_scores"))
# Dot product: (N x D x P) %*% (N x D x 1) = (N x P x 1)
dot_axis <- length(dim(importance_scores))-2
outgoing <- layer_dot(list(compressed_data, importance_scores), axes = dot_axis,
name = paste0(name, "_3D_latent"))
# Drop to (N x P)
out_dim <- dim(outgoing)[c(-1, -length(dim(outgoing)))]
outgoing %>%
layer_reshape(out_dim, name = paste0(name, "_latent")) %>%
# run through one last hidden layer
layer_dense(units = hidden_dim, activation = hidden_activation, name = paste0(name, "_hidden_l")) %>%
layer_dropout(rate = hidden_dropout, name = paste0(name, "_dropout_l"))
}
|
c2dbca1b92929a4cf649d70fafe88e4432eb3a38 | 882a43935f3353a94c5d67e13f8308c04f9152f9 | /0450_data_frame.R | 79c503639fa910295becd0ab68428119d0bca480 | [] | no_license | akicho8/learn_R | a1711c6cd5f07b004b9dbccae6d681b8148c19ec | 8f0977dfe8da05d179d265a5805304b4ecbebf08 | refs/heads/master | 2021-01-10T20:39:29.459947 | 2013-06-03T15:38:05 | 2013-06-03T15:39:28 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,515 | r | 0450_data_frame.R | # 同じ長さを持つ名前付けされた複数のベクトルからなるリスト???
# DBの扱いと逆なのが気持ちわるすぎる。けどこっちの方が便利だからそうなっているんだろう。でもまだ利点を感じてはない。
team <- c("烏山", "太子堂", "豪徳寺")
win <- c(40, 70, 90)
lose <- c(60, 30, 10)
table <- data.frame(team, win, lose) # ここで変数名がキー名になっている(気持ちわるー)
table
table[c("team", "win")] # チーム名と勝数の列だけ取得(このアクセスのしやすさが利点???)
table[c("team")] # チーム名だけ(こっちは表になってる状態)
table$team # チーム名だけ(こっちはベクトル?)
table$team[table$win >= 50] # 勝数が50以上
table$team[table$win > table$lose] # 勝ち越し(冗長)
with(table, team[win > lose]) # 勝ち越し(簡潔にかける)
pts <- c(400, 700, 900) # ポイントベクトルが別にある
point <- data.frame(team, pts) # キーと値のペアにする
table <- merge(table, point, by="team") # point 列ができた
with(table, table[order(-pts),]) # ポイント順でソート(どゆこと??)
# レコードの追加
team <- c("下馬", "駒沢")
win <- c(60, 65)
lose <- c(40, 35)
pts <- c(10, 20)
table2 <- data.frame(team, win, lose, pts)
table3 <- rbind(table, table2) # 合体
table3
|
d4a7569a561d319c6711b338cbdc4e049b312769 | 16e0c39a5355479827f56268b2f26fe4654bdd59 | /TF-IDF_viz.R | c26ce50c75784122eb4ee5c463bd1055e69ebbc9 | [] | no_license | vijaysakhamuri/TextMining-Topic-Modelling | c76c2724b9f8baa34b5db404ccdc343a32564c93 | ed31851818c4e0e532aea527f7820b2670e67b31 | refs/heads/master | 2020-04-06T13:21:56.698496 | 2018-11-14T06:03:23 | 2018-11-14T06:03:23 | 157,496,471 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,580 | r | TF-IDF_viz.R | #############################################################
########### Visualize the tf-idf of multiple texts
library(gutenbergr)
library(dplyr)
library(tidytext)
library(tidyverse)
physics = gutenberg_download(c(37729, 14725, 13476, 5001),
meta_fields = "author")
physics_words = physics %>%
unnest_tokens(word, text) %>%
count(author, word, sort = TRUE) %>%
ungroup()
physics_words
### compute the Tf-IDF of multiple physicts
physics_words = physics_words %>%
bind_tf_idf(word, author, n)
plot_physics <- physics_words %>%
arrange(desc(tf_idf)) %>%
mutate(word = factor(word, levels = rev(unique(word)))) %>%
mutate(author = factor(author, levels = c("Galilei, Galileo",
"Huygens, Christiaan",
"Tesla, Nikola",
"Einstein, Albert")))
physics_words
library(ggplot2)
library(ggstance)
library(ggthemes)
library(viridis)
ggplot(plot_physics[1:20,], aes(tf_idf, word, fill = author, alpha = tf_idf)) +
geom_barh(stat = "identity") +
labs(title = "Highest tf-idf words in Classic Physics Texts",
y = NULL, x = "tf-idf") +
theme_tufte(base_family = "Arial", base_size = 13, ticks = FALSE) +
scale_alpha_continuous(range = c(0.6, 1), guide = FALSE) +
scale_x_continuous(expand=c(0,0)) +
scale_fill_viridis(end = 0.6, discrete=TRUE) +
theme(legend.title=element_blank()) +
theme(legend.justification=c(1,0), legend.position=c(1,0)) |
8610dd60828510b40b8ab2d339276da3f73ded6b | 1641f4f3816152e0610a0faf8091b03a43c00a38 | /plot1.R | 2c90c4189344683cd8288cde23191f24fc631404 | [] | no_license | KayleeWissink/ExData_Plotting1 | 03634a54731f2ae2c3227b37693b6ccdf29d0f84 | 55d3fb94e98cb0df210c07f26e8b8af92cdb184f | refs/heads/master | 2021-01-21T06:10:20.255245 | 2016-01-07T20:31:05 | 2016-01-07T20:31:05 | 49,224,874 | 0 | 0 | null | 2016-01-07T19:13:29 | 2016-01-07T19:13:29 | null | UTF-8 | R | false | false | 708 | r | plot1.R | ## Load Data
data<- read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?", nrows=2075259, check.names=F, stringsAsFactors=F, comment.char="", quote='\"')
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
## Subsetting the data
data_subset <- subset(data, subset=(Date >= "2007-02-01" & Date <= "2007-02-02"))
## Converting dates
date_time <- paste(as.Date(data_subset$Date), data_subset$Time)
data_subset$Datetime <- as.POSIXct(date_time)
## Create Plot
with(data_subset, hist(Global_active_power, xlab = "Global Active Power (kilowatts)", col = "red", main = "Global Active Power"))
## Saving to file
dev.copy(png, file="plot1.png", height=480, width=480)
dev.off()
|
36367ac3c317ae6e028576fed33e57128bddb4e1 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.media.services/man/medialive_create_multiplex.Rd | 775ce49998e31d7a59b590c5f0b3a17660300d52 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 1,876 | rd | medialive_create_multiplex.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/medialive_operations.R
\name{medialive_create_multiplex}
\alias{medialive_create_multiplex}
\title{Create a new multiplex}
\usage{
medialive_create_multiplex(AvailabilityZones, MultiplexSettings, Name,
RequestId, Tags)
}
\arguments{
\item{AvailabilityZones}{[required] A list of availability zones for the multiplex. You must specify exactly
two.}
\item{MultiplexSettings}{[required] Configuration for a multiplex event.}
\item{Name}{[required] Name of multiplex.}
\item{RequestId}{[required] Unique request ID. This prevents retries from creating multiple
resources.}
\item{Tags}{A collection of key-value pairs.}
}
\value{
A list with the following syntax:\preformatted{list(
Multiplex = list(
Arn = "string",
AvailabilityZones = list(
"string"
),
Destinations = list(
list(
MediaConnectSettings = list(
EntitlementArn = "string"
)
)
),
Id = "string",
MultiplexSettings = list(
MaximumVideoBufferDelayMilliseconds = 123,
TransportStreamBitrate = 123,
TransportStreamId = 123,
TransportStreamReservedBitrate = 123
),
Name = "string",
PipelinesRunningCount = 123,
ProgramCount = 123,
State = "CREATING"|"CREATE_FAILED"|"IDLE"|"STARTING"|"RUNNING"|"RECOVERING"|"STOPPING"|"DELETING"|"DELETED",
Tags = list(
"string"
)
)
)
}
}
\description{
Create a new multiplex.
}
\section{Request syntax}{
\preformatted{svc$create_multiplex(
AvailabilityZones = list(
"string"
),
MultiplexSettings = list(
MaximumVideoBufferDelayMilliseconds = 123,
TransportStreamBitrate = 123,
TransportStreamId = 123,
TransportStreamReservedBitrate = 123
),
Name = "string",
RequestId = "string",
Tags = list(
"string"
)
)
}
}
\keyword{internal}
|
d50c1dd5d28672f1ec34a8f7a49e01161cf501c4 | 9d3b2f6f3c4ec2a34365f3c9040d11e764bfe006 | /add_dseq_annotation.R | f5fa0b4dcfb635a2778449aa8dc4688cb02fee6b | [
"MIT"
] | permissive | jfnavarro/AD_POLB_ST | 481b0ab7aab2be24eb61c653f40698154c79b3eb | 5e072094a9f53947ecbbc576ac904e5aca00d0a5 | refs/heads/master | 2022-12-16T10:10:27.828093 | 2020-09-21T12:42:43 | 2020-09-21T12:42:43 | 291,464,183 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 730 | r | add_dseq_annotation.R | #!/usr/bin/env Rscript
# give paths to DESeq output tables
library(org.Mm.eg.db)
addExtraAnnotation <- function(table) {
table$ensembl = mapIds(org.Mm.eg.db, keys=rownames(table), column="ENSEMBL", keytype="SYMBOL", multiVals="first")
table$name = mapIds(org.Mm.eg.db, keys=rownames(table), column="GENENAME", keytype="SYMBOL", multiVals="first")
table$entrez = mapIds(org.Mm.eg.db, keys=rownames(table), column="ENTREZID", keytype="SYMBOL", multiVals="first")
return(table)
}
args = commandArgs(trailingOnly=T)
for (path in args) {
x = read.table(path, sep="\t", header=T, row.names=1)
x = addExtraAnnotation(x)
write.table(x, file=paste("annotated_", path, sep=""), sep="\t", row.names=T, col.names=T)
}
|
6909d078f9fd2d5d30ae14892245f8859884477c | cea51b60f43efac0959bb6e52749f608e1eddd13 | /archived/runner-now-NLScompare/test_files/Isom_1.R | 6f70f79d0e797b63d6ff8b37a10b9f2ac2b81a6e | [] | no_license | ArkaB-DS/GSOC21-improveNLS | 6bd711bca3ad1deac5aff3128cfc02e0237915a9 | f33839f8ceef78591ee296c6e515cd52339bb2b0 | refs/heads/master | 2023-07-16T21:07:33.900548 | 2021-08-22T05:41:26 | 2021-08-22T05:41:26 | 398,542,725 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,583 | r | Isom_1.R | # NLSProbName: Isom_1.R
# NLSProbDescription: { The Isom data frame has 24 rows and 4 columns from an isomerization experiment.
# The four columns are:This data frame contains the following columns:
# `hyd`: partial pressure of hydrogen (psia).
# `n.pent`: partial pressure of n-pentane (psia).
# `iso.pen`: partial pressure of isopentane (psia).
# `rate`: reaction rate for isomerization of n-pentane to isopentane (1/hr).
# }
# Use the Isom data from NRAIA package
## DATA
rate=c(3.541, 2.397, 6.694, 4.722, 0.593, 0.268, 2.797,
2.451, 3.196, 2.021, 0.896, 5.084, 5.686, 1.193,
2.648, 3.303, 3.054, 3.302, 1.271, 11.648, 2.002,
9.604, 7.754, 11.590)
hyd = c(205.8, 404.8, 209.7, 401.6, 224.9, 402.6, 212.7, 406.2, 133.3, 470.9, 300.0,
301.6, 297.3, 314.0, 305.7, 300.1, 305.4, 305.2, 300.1, 106.6, 417.2, 251.0,
250.3, 145.1)
iso.pen = c(37.1, 36.3, 49.4, 44.9, 116.3, 128.9, 134.4, 134.9, 87.6, 86.9, 81.7,
101.7, 10.5, 157.1, 86.0, 90.2, 87.4, 87.0, 66.4, 33.0, 32.9, 41.5,
14.7, 50.2)
n.pent = c( 90.9, 92.9, 174.9, 187.2, 92.7, 102.2, 186.9, 192.6, 140.8, 144.2, 68.3,
214.6, 142.2, 146.7, 142.0, 143.7, 141.1, 141.5, 83.0, 209.6, 83.9, 294.4,
148.0, 291.0)
NLSdata <- data.frame(rate,hyd,iso.pen,n.pent)
## STARTING VALUE
b2 = 0.1
b3 = 0.1
b4 = 0.1
NLSstart <-c(b2 = b2, b3 = b3, b4 = b4) # a starting vector (named!)
## MODEL
NLSformula <- rate ~ b3*(n.pent - iso.pen/1.632)/(1+b2*hyd+b3*n.pent+b4*iso.pen)
NLSlower<- c(-Inf,-Inf,-Inf)
NLSupper<- c(Inf,Inf,Inf)
NLSweights <- rep(1,length(n.pent))
NLSsubset <- 1:length(n.pent)
NLSstart1 <-c(b1=1, b2 = b2, b3 = b3, b4 = b4) # a starting vector (named!)
rm(b2,b3,b4,rate,hyd,iso.pen,n.pent)
## tx <- nlxb(NLSformula, start=NLSstart, data=NLSdata)
# residual sumsquares = 414.6, at b2=-0.045945, b3= 0.105282, b4 =0.010473
## Nonlinear regression model
## model: rate ~ b3 * (n.pent - iso.pen/1.632)/(1 + b2 * hyd + b3 * n.pent + b4 * iso.pen)
## data: NLSdata
## b2 b3 b4 .lin
## 0.0708 0.0377 0.1671 35.9201
## residual sum-of-squares: 3.23
## Number of iterations to convergence: 7
## Achieved convergence tolerance: 6.24e-06
## NLSformula1 <- rate ~ b1*b3*(n.pent - iso.pen/1.632)/(1+b2*hyd+b3*n.pent+b4*iso.pen)
## txn0 <- nls(NLSformula, start=NLSstart, data=NLSdata) # singular gradient
## txn <- nls(NLSformula, start=NLSstart, data=NLSdata, algorith="plinear")
## summary(txn) # shows that tx1 below gets SEs right
## print(txn)
## tx1 <- nlxb(NLSformula1, start=NLSstart1, data=NLSdata)
## print(tx1)
## coef(tx1)
|
79305c86a7f99462f0191bd22a5f769b42c27f5f | 483397ab64360fd9df71336118baa478d5f07c0d | /R/data.R | bc85877f7f41eb4ebfa2d87a0d6980c59962646f | [] | no_license | cagarciae/GSA.UN | 34de48c4fad0318ea8b94af5fe8c8a81d653a85d | b09778c8cf40ba93fdd552ebfbeea2062d0bd1d1 | refs/heads/master | 2023-04-09T12:18:51.156839 | 2022-08-25T19:40:15 | 2022-08-25T19:40:15 | 255,471,233 | 0 | 1 | null | 2020-06-16T16:45:10 | 2020-04-14T00:24:59 | R | UTF-8 | R | false | false | 2,102 | r | data.R | #' @title Example - parameters names
#'
#' @description 10 parameters names.
#'
#'
#'@format A \code{value}
#' \describe{
#' \item{pp_names}{a vector of characters}
#' }
#'
#' @references Arenas-Bautista, M. C. (2020). Integration of Hydrological and Economical
#' Aspects for Water Management in Tropical Regions. Case Study: Middle Magdalena Valley, Colombia.
#' National University of Colombia.
#'
#' @author CGE
#'
"pp_names"
#' @title Results of a sample model
#'
#' @description Output generated with an example mathematical model.
#'
#'
#'@format A \code{matrix}
#' \describe{
#' \item{out_set}{a matrix of dimensions 500 x 365 (pp x t), runs of the model x
#' temporary steps (365 days)}
#' }
#'
#' @references Arenas-Bautista, M. C. (2020). Integration of Hydrological and Economical
#' Aspects for Water Management in Tropical Regions. Case Study: Middle Magdalena Valley, Colombia.
#' National University of Colombia.
#'
"out_set"
#' @title Set of parameters randomly generated
#'
#' @description It contains 10 parameters
#'
#'@format A \code{matrix}
#' \describe{
#' \item{parameters_set}{a matrix of dimensions 500 x 10 (n x pp),runs of the model x
#' number of parameters}
#' }
#'
#' @references Arenas-Bautista, M. C. (2020). Integration of Hydrological and Economical
#' Aspects for Water Management in Tropical Regions. Case Study: Middle Magdalena Valley, Colombia.
#' National University of Colombia.
#'
"parameters_set"
#' @title First four conditional moments of example data
#'
#' @description Data generated by Cond_Moments example
#'
#'
#'@format A \code{list}
#' \describe{
#' \item{CM}{A list of arrays, each array has dimensions of steps, t, pp}
#' }
#'
#'
#' @author Camila Garcia-Echeverri
#'
"CM"
#' @title First four conditional moments of example data
#'
#' @description Data generated with the example of the function Cond_Moments
#'
#'
#'@format A \code{data.frame}
#' \describe{
#' \item{data_Bstat}{a data frame of dimensions t x 6}
#' }
#'
#' @source Function Bstat
#'
#' @author Camila Garcia-Echeverri
#'
"data_Bstat"
|
68162da6ef6d133c2c572db1e7d4b688c60f50c6 | 4be9593ef80a8e4eb6d802d9975d3b57b1ee83fb | /EMR_long.R | b5b70d82a123bfefeac1efcd5f3dfd0fe467cc85 | [] | no_license | silviapineda/PTB | 96f795efcc1a07dba72e5ec3fdd1ad0aa1271252 | a907c8c1c5f2a0434b6c11c8b862d453a598875f | refs/heads/master | 2020-03-11T21:55:40.625723 | 2018-10-03T15:44:00 | 2018-10-03T15:44:00 | 130,278,055 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 15,671 | r | EMR_long.R | rm(list = ls(all = TRUE))
x<-date()
print(x)
###########################################################################################
### PROJECT: EMR PTB
###
### CITATION:
###
### PROCESS:
###
### DESCRIP: Analysis of EMR data for PTB
###
###
### Author: Silvia Pineda
### Date: April, 2018
############################################################################################
library(lattice)
library(lme4)
library("RColorBrewer")
library(ggplot2)
working_directory<-"/Users/Pinedasans/PTB/"
setwd(working_directory)
EMR_long_labs<-read.csv("Data/EMR_LABS_Term_PTB_longitudinal_36_ordered.csv")
EMR_long_diags<-read.csv("Data/EMR_Diagnoses_Term_PTB_longitudinal_36.csv")
EMR_long_meds<-read.csv("Data/EMR_Meds_Term_PTB_longitudinal_36.csv")
#########################
#### Diags data ########
########################
##Extract Individual ID for the Patient_index
splitpop <- strsplit(as.character(EMR_long_diags$Patient_index),"_")
EMR_long_diags$Individual_id <- unlist(lapply(splitpop, "[", 1))
EMR_long_diags$Birth_id<- unlist(lapply(splitpop, "[", 2))
##Select all the variables
EMR_long_diags_data<-EMR_long_diags[,6:(ncol(EMR_long_diags)-2)] ##2321 diags
rownames(EMR_long_diags_data)<-EMR_long_diags$Sample_ID
table(EMR_long_diags$Term)
matrix<-data.matrix(table(EMR_long_diags$Patient_index,EMR_long_diags$Term))
matrix[which(matrix[,1]!=0 & matrix[,2]!=0),] ##Individuals classify as PTB and Term
dim(matrix[which(matrix[,1]!=0),]) #155 births with PTB
dim(matrix[which(matrix[,2]!=0),]) #3364 births with Term
###Select the lab test that are complete
num_null<-NULL
for (i in 1:ncol(EMR_long_diags_data)){
num_null[i]<-dim(table(EMR_long_diags_data[,i]))
}
##None null
EMR_long_diags_merge<-cbind(EMR_long_diags$Term,EMR_long_diags$WeekOfPregnancy,EMR_long_diags$Patient_index,EMR_long_diags$Individual_id,EMR_long_diags_data)
colnames(EMR_long_diags_merge)[1:4]<-c("Term","WeekOfPregnancy","Pat_birth_id","Patient_id")
EMR_long_diags_merge$Pat_birth_id<-factor(EMR_long_diags_merge$Pat_birth_id)
EMR_long_diags_merge$Term<-factor(EMR_long_diags$Term,levels = c("Term","PTB"))
EMR_long_diags_merge$Patient_id<-factor(EMR_long_diags_merge$Patient_id)
results_2categ<-matrix(NA,ncol(EMR_long_diags_merge),4)
for (i in 5:ncol(EMR_long_diags_merge)){
print(i)
fm_full <- try(glmer(Term ~ relevel(factor(EMR_long_diags_merge[,i]),ref="0") + WeekOfPregnancy + (1|Pat_birth_id) + (1|Patient_id),
data=EMR_long_diags_merge, family=binomial))
if(class(fm_full)!="try-error"){
results_2categ[i,1]<-coefficients(summary(fm_full))[2,1] #coef diags
results_2categ[i,2]<-coefficients(summary(fm_full))[2,4] #p diags
results_2categ[i,3]<-coefficients(summary(fm_full))[3,1] #coef week
results_2categ[i,4]<-coefficients(summary(fm_full))[3,4] #p week
}
}
results_2categ<-results_2categ[-c(1:4),]
colnames(results_2categ)<-c("coef_diags","p_diags","coef_week","p_week")
rownames(results_2categ)<-colnames(EMR_long_diags_merge)[-c(1:4)]
write.csv(results_2categ,"results_diags.csv")
##adjust for MT
p_val_long_diags_adj<-p.adjust(results_2categ[,2],method = "fdr")
table(p_val_long_diags_adj<0.05) #129
##Extract significant
id_sign<-match(names(which(p_val_long_diags_adj<0.05)),colnames(EMR_long_diags_merge))
EMR_long_diags_merge[,id_sign]
#########################
#### MEDs data ########
########################
##Extract Individual ID for the Patient_index
splitpop <- strsplit(as.character(EMR_long_meds$Patient_index),"_")
EMR_long_meds$Individual_id <- unlist(lapply(splitpop, "[", 1))
EMR_long_meds$Birth_id<- unlist(lapply(splitpop, "[", 2))
##Select all the variables
EMR_long_meds_data<-EMR_long_meds[,6:(ncol(EMR_long_meds)-2)] ##427 meds
table(EMR_long_meds$Term) #426 PTB and 4193 with TERM
matrix<-data.matrix(table(EMR_long_meds$Patient_index,EMR_long_meds$Term))
matrix[which(matrix[,1]!=0 & matrix[,2]!=0),] ##Individuals classify as PTB and Term
dim(matrix[which(matrix[,1]!=0),]) #218 births with PTB
dim(matrix[which(matrix[,2]!=0),]) #2894 births with Term
###Select the lab test that are complete
num_null<-NULL
for (i in 1:ncol(EMR_long_meds_data)){
num_null[i]<-dim(table(EMR_long_meds_data[,i]))
}
###None
EMR_long_meds_merge<-cbind(EMR_long_meds$Term,EMR_long_meds$WeekOfPregnancy,EMR_long_meds$Patient_index,
EMR_long_meds$Individual_id,EMR_long_meds_data)
colnames(EMR_long_meds_merge)[1:4]<-c("Term","WeekOfPregnancy","Patient_birth_id","Patient_id")
EMR_long_meds_merge$Patient_birth_id<-factor(EMR_long_meds_merge$Patient_birth_id)
EMR_long_meds_merge$Term<-factor(EMR_long_meds_merge$Term,levels = c("Term","PTB"))
EMR_long_meds_merge$Patient_id<-factor(EMR_long_meds_merge$Patient_id)
results_meds<-matrix(NA,ncol(EMR_long_meds_merge),4)
for (i in 5:ncol(EMR_long_meds_merge)){
print(i)
fm_full <- try(glmer(Term ~ relevel(factor(EMR_long_meds_merge[,i]),ref="0") + WeekOfPregnancy + (1|Patient_birth_id) +
(1|Patient_id),data=EMR_long_meds_merge, family=binomial))
if(class(fm_full)!="try-error"){
results_meds[i,1]<-coefficients(summary(fm_full))[2,1] #coef diags
results_meds[i,2]<-coefficients(summary(fm_full))[2,4] #p diags
results_meds[i,3]<-coefficients(summary(fm_full))[3,1] #coef week
results_meds[i,4]<-coefficients(summary(fm_full))[3,4] #p week
}
}
results_meds<-results_meds[-c(1:4),]
colnames(results_meds)<-c("coef_meds","p_meds","coef_week","p_week")
rownames(results_meds)<-colnames(EMR_long_meds_merge)[-c(1:4)]
write.csv(results_meds,"results_meds.csv")
##adjust for MT
p_val_long_meds_adj<-p.adjust(results_meds[,2],method = "fdr")
table(p_val_long_meds_adj<0.05) #6
##Extract significant
id_sign<-match(names(which(p_val_long_meds_adj<0.05)),colnames(EMR_long_meds_merge))
EMR_long_meds_merge_sign<-EMR_long_meds_merge[,c(1:4,id_sign)]
results_meds[which(p_val_long_meds_adj<0.05),]
tiff("Progesterone.Vaginal.Insert_MEDS.tiff",res=300,w=2000,h=2500)
ggplot(EMR_long_meds_merge_sign, aes(x=as.character(WeekOfPregnancy))) +
geom_bar(data=EMR_long_meds_merge_sign[EMR_long_meds_merge_sign$Term=="Term",],
aes(y=(Progesterone.Vaginal.Insert)/length(Progesterone.Vaginal.Insert),fill=Term), stat="identity") +
geom_bar(data=EMR_long_meds_merge_sign[EMR_long_meds_merge_sign$Term=="PTB",],
aes(y=-(Progesterone.Vaginal.Insert)/length(Progesterone.Vaginal.Insert),fill=Term), stat="identity") +
geom_hline(yintercept=0, colour="white", lwd=1) +
coord_flip(ylim=c(-0.05,0.05)) +
scale_y_continuous(breaks=seq(-0.05,0.05,0.025), labels=c(0.05,0.025,0,0.025,0.05)) +
labs(y="Percentage of Progesterone.Vaginal.Insert", x="Week of pregnancy") +
ggtitle(" PTB (10/416) Term (91/4102)")
dev.off()
##Interaction
p_value_int_med<-NULL
for (i in 4:ncol(EMR_long_meds_merge)){
print(i)
fm_full <- try(glmer(Term ~ EMR_long_meds_merge[,i]*WeekOfPregnancy + (1|Patient_ID) ,data=EMR_long_meds_merge,
family=binomial))
if(class(fm_full)!="try-error"){
if(dim(coefficients(summary(fm_full)))[1]>3){
p_value_int_med[i]<-coefficients(summary(fm_full))[4,4]
}
}
}
p_val_int_meds_adj<-p.adjust(p_value_int_med,method = "fdr")
table(p_val_int_meds_adj<0.05) #
id_sign<-match(colnames(EMR_long_meds_merge[,which(p_val_int_meds_adj<0.05)]),colnames(EMR_long_meds_merge))
p <- ggplot(fm_full, aes(x = WeekOfPregnancy, y = EMR_long_meds_merge_sign[,i], colour = Term)) +
geom_point(size=1.2) +
geom_smooth(method = "glm", method.args = list(family = "binomial"),size=0.8) +
labs(x = "Weeks of Pregnancy",y = "Meds") + theme_bw() + theme_light()
print(p)
################
### LAB TEST ###
###############
##Extract Individual ID for the Patient_index
splitpop <- strsplit(as.character(EMR_long_labs$Patient_index),"_")
EMR_long_labs$Individual_id <- unlist(lapply(splitpop, "[", 1))
EMR_long_labs$Birth_id<- unlist(lapply(splitpop, "[", 2))
##Select all the variables
EMR_long_labs_data<-EMR_long_labs[,7:(ncol(EMR_long_labs)-2)] ##196 lab test
rownames(EMR_long_labs_data)<-EMR_long_labs$Sample_ID
matrix<-data.matrix(table(EMR_long_labs$Patient_index,EMR_long_labs$Term))
matrix[which(matrix[,1]!=0 & matrix[,2]!=0),] ##Individuals classify as PTB and Term
dim(matrix[which(matrix[,1]!=0),]) #106 births with PTB
dim(matrix[which(matrix[,2]!=0),]) #2136 births with Term
###Select the lab test that are complete
num_null<-NULL
for (i in 1:ncol(EMR_long_labs_data)){
num_null[i]<-dim(table(EMR_long_labs_data[,i]))
}
EMR_long_labs_full<-EMR_long_labs_data[,which(num_null>1)] ##Only 27 lab test are complete
###Obtain the number of categories per lab test
num_categ<-NULL
for (i in 1:ncol(EMR_long_labs_full)){
num_categ[i]<-dim(table(EMR_long_labs_full[,i]))
}
EMR_long_labs_full_2categ<-EMR_long_labs_full[,which(num_categ==2)]
EMR_long_labs_full_3categ<-EMR_long_labs_full[,which(num_categ==3)]
##For those that only have the categories taken vs taken ab
EMR_long_labs_merge_2categ<-cbind(EMR_long_labs$Term,EMR_long_labs$WeekOfPregnancy,EMR_long_labs$Patient_index,EMR_long_labs$Individual_id,EMR_long_labs_full_2categ)
colnames(EMR_long_labs_merge_2categ)[1:4]<-c("Term","WeekOfPregnancy","Pat_birth_id","Patient_id")
EMR_long_labs_merge_2categ$Pat_birth_id<-factor(EMR_long_labs_merge_2categ$Pat_birth_id)
EMR_long_labs_merge_2categ$Term<-factor(EMR_long_labs_merge_2categ$Term,levels = c("Term","PTB"))
EMR_long_labs_merge_2categ$Patient_id<-factor(EMR_long_labs_merge_2categ$Patient_id)
results_2categ<-matrix(NA,ncol(EMR_long_labs_merge_2categ),4)
for (i in 5:ncol(EMR_long_labs_merge_2categ)){
print(i)
fm_full <- try(glmer(Term ~ relevel(EMR_long_labs_merge_2categ[,i],ref="Not_taken") + WeekOfPregnancy + (1|Pat_birth_id) + (1|Patient_id),
data=EMR_long_labs_merge_2categ, family=binomial))
if(class(fm_full)!="try-error"){
results_2categ[i,1]<-coefficients(summary(fm_full))[2,1] #coef not-taken
results_2categ[i,2]<-coefficients(summary(fm_full))[2,4] #p not taken
results_2categ[i,3]<-coefficients(summary(fm_full))[3,1] #coef week
results_2categ[i,4]<-coefficients(summary(fm_full))[3,4] #p week
}
}
results_2categ<-results_2categ[-c(1:4),]
colnames(results_2categ)<-c("coef takenAb","p takenAb","coef week","p week")
rownames(results_2categ)<-colnames(EMR_long_labs_merge_2categ)[-c(1:4)]
write.csv(results_2categ,"results_2categ_labs.csv")
# EMR_long_labs_merge_2categ$Glucose..non.fasting_num<-ifelse(EMR_long_labs_merge_2categ$Glucose..non.fasting=="Not_taken",0,1)
#
# tiff("LABS_Glucose..non.fasting.tiff",res=300,w=2000,h=2500)
# ggplot(EMR_long_labs_merge_2categ, aes(x=as.character(WeekOfPregnancy))) +
# geom_bar(data=EMR_long_labs_merge_2categ[EMR_long_labs_merge_2categ$Term=="Term",],
# aes(y=(Glucose..non.fasting_num)/length(Glucose..non.fasting_num),fill=Term), stat="identity") +
# geom_bar(data=EMR_long_labs_merge_2categ[EMR_long_labs_merge_2categ$Term=="PTB",],
# aes(y=-(Glucose..non.fasting_num)/length(Glucose..non.fasting_num),fill=Term), stat="identity") +
# geom_hline(yintercept=0, colour="white", lwd=1) +
# coord_flip(ylim=c(-0.05,0.05)) +
# scale_y_continuous(breaks=seq(-0.05,0.05,0.025), labels=c(0.05,0.025,0,0.025,0.05)) +
# labs(y="Percentage of Abnormal Glucose..non.fasting", x="Week of pregnancy") +
# ggtitle(" PTB Term")
# dev.off()
##For those that have three categories
EMR_long_labs_merge_3categ<-cbind(EMR_long_labs$Term,EMR_long_labs$WeekOfPregnancy,EMR_long_labs$Patient_index,EMR_long_labs$Individual_id,EMR_long_labs_full_3categ)
colnames(EMR_long_labs_merge_3categ)[1:4]<-c("Term","WeekOfPregnancy","Pat_birth_id","Patient_id")
EMR_long_labs_merge_3categ$Pat_birth_id<-factor(EMR_long_labs_merge_3categ$Pat_birth_id)
EMR_long_labs_merge_3categ$Term<-factor(EMR_long_labs_merge_3categ$Term,levels = c("Term","PTB"))
EMR_long_labs_merge_3categ$Patient_id<-factor(EMR_long_labs_merge_3categ$Patient_id)
##The reference is taken normal
results_3categ<-matrix(NA,ncol(EMR_long_labs_merge_3categ),6)
for (i in 5:ncol(EMR_long_labs_merge_3categ)){
print(i)
fm_full <- try(glmer(Term ~ relevel(EMR_long_labs_merge_3categ[,i],ref="Taken_normal") + WeekOfPregnancy + (1|Pat_birth_id) +(1|Patient_id),
data=EMR_long_labs_merge_3categ, family=binomial))
if(class(fm_full)!="try-error"){
results_3categ[i,1]<-coefficients(summary(fm_full))[2,1] #coef not-taken
results_3categ[i,2]<-coefficients(summary(fm_full))[2,4] #p not taken
results_3categ[i,3]<-coefficients(summary(fm_full))[3,1] #coef taken-ab
results_3categ[i,4]<-coefficients(summary(fm_full))[3,4] #p taken-ab
results_3categ[i,5]<-coefficients(summary(fm_full))[4,1] #coef week
results_3categ[i,6]<-coefficients(summary(fm_full))[4,4] #p week
}
}
results_3categ<-results_3categ[-c(1:4),]
colnames(results_3categ)<-c("coef notTaken","p notTaken","coef takenAb","p takenAb","coef week","p week")
rownames(results_3categ)<-colnames(EMR_long_labs_merge_3categ)[-c(1:4)]
write.csv(results_3categ,"results_3categ_labs.csv")
EMR_long_labs_merge_3categ$Neutrophil.Absolute.Count<-ifelse(EMR_long_labs_merge_3categ$Neutrophil.Absolute.Count=="Not_taken",1,
ifelse(EMR_long_labs_merge_3categ$Neutrophil.Absolute.Count=="Taken_normal",0,NA))
tiff("Neutrophil.Absolute.Count_labs_takenNormvsNotTaken.tiff",res=300,w=2000,h=2500)
ggplot(EMR_long_labs_merge_3categ, aes(x=as.character(WeekOfPregnancy))) +
geom_bar(data=EMR_long_labs_merge_3categ[EMR_long_labs_merge_3categ$Term=="Term",],
aes(y=(Neutrophil.Absolute.Count)/length(Neutrophil.Absolute.Count),fill=Term), stat="identity") +
geom_bar(data=EMR_long_labs_merge_3categ[EMR_long_labs_merge_3categ$Term=="PTB",],
aes(y=-(Neutrophil.Absolute.Count)/length(Neutrophil.Absolute.Count),fill=Term), stat="identity") +
geom_hline(yintercept=0, colour="white", lwd=1) +
coord_flip(ylim=c(-0.2,0.2)) +
scale_y_continuous(breaks=seq(-0.2,0.2,0.1), labels=c(0.2,0.1,0,0.1,0.2)) +
labs(y="Percentage of Not Taken vs. Taken Normal (Neutrophil.Absolute.Count)", x="Week of pregnancy") +
ggtitle(" PTB Term")
dev.off()
EMR_long_labs_merge_3categ$Neutrophil.Absolute.Count<-ifelse(EMR_long_labs_merge_3categ$Neutrophil.Absolute.Count=="Taken_normal",0,
ifelse(EMR_long_labs_merge_3categ$Neutrophil.Absolute.Count=="Taken_abnormal",1,NA))
tiff("Neutrophil.Absolute.Count_labs_takenNormvsTakenAb.tiff",res=300,w=2000,h=2500)
ggplot(EMR_long_labs_merge_3categ, aes(x=as.character(WeekOfPregnancy))) +
geom_bar(data=EMR_long_labs_merge_3categ[EMR_long_labs_merge_3categ$Term=="Term",],
aes(y=(Neutrophil.Absolute.Count)/length(Neutrophil.Absolute.Count),fill=Term), stat="identity") +
geom_bar(data=EMR_long_labs_merge_3categ[EMR_long_labs_merge_3categ$Term=="PTB",],
aes(y=-(Neutrophil.Absolute.Count)/length(Neutrophil.Absolute.Count),fill=Term), stat="identity") +
geom_hline(yintercept=0, colour="white", lwd=1) +
coord_flip(ylim=c(-0.05,0.05)) +
scale_y_continuous(breaks=seq(-0.05,0.05,0.025), labels=c(0.05,0.025,0,0.025,0.05)) +
labs(y="Percentage of Taken Abnormal vs. Taken Normal (Neutrophil.Absolute.Count)", x="Week of pregnancy") +
ggtitle(" PTB Term")
dev.off()
|
ba671ebd5f5fa24041313dd955edc847211549ba | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/8683_0/rinput.R | d0aa7a1cd48e0386d60c4eef9bc09d6d53c65ae8 | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 135 | r | rinput.R | library(ape)
testtree <- read.tree("8683_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="8683_0_unrooted.txt") |
ec8f57ea6725158cfd1157812f5d786d2d8e15a7 | 775ad47961377bc0deb2aeb7d524bc4d1544e6e6 | /plot4.R | 634ba5c5f5f51257db7aacbe350c89aa265e2324 | [] | no_license | mcatalano/ExData_Plotting2 | 7678ad25d84d405fee5d04818b532ff33279468b | 00e6b2b046235648f05b77817a014be273fa18a4 | refs/heads/master | 2021-01-11T08:33:29.365743 | 2015-03-11T21:44:09 | 2015-03-11T21:44:09 | 32,041,587 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 909 | r | plot4.R | ## Load packages
library(data.table)
library(ggplot2)
## Read data files; store NEI as data table
SCC <- readRDS("Source_Classification_Code.rds")
NEI <- readRDS("summarySCC_PM25.rds")
DT <- data.table(NEI)
## Retrieve SCC codes for entries with 'coal' in 'Short.Name'
SCC_codes <- SCC[grep('[cC]oal', SCC$Short.Name), 'SCC']
## Subset data from NEI with SCC equal to character in SCC_codes list
coal_data <- DT[SCC == as.character(SCC_codes), ]
## Open graphics device
png(file = "plot4.png")
## Bar plot showing total emissions and individual data points as color.
q <- qplot(year, Emissions, data = coal_data, geom = 'bar', stat = 'identity',
fill = Emissions, xlab = 'Year', ylab = 'Total Tons of PM2.5',
main = 'Coal-Related Emissions')
## Scale x axis
q <- q + scale_x_continuous(breaks = c(1999,2002,2005,2008))
## Print plot
print(q)
## Close graphics device
dev.off()
|
da5843f792b5f1d6bdac3cae8e5421c904ec8189 | 14186d0abe0cb632c3c5795e30467b6cd2472c57 | /Taxonomic_profile.R | 57fb3ba63a7692da907d92e18b51f227b0ecc098 | [] | no_license | AnneChao/iNEXT.4step | cb483b070e35c5681118291ac4b6b250747028fd | 1f4035774d1a9e6f6fdace03813fabee1c98b6ef | refs/heads/master | 2022-11-17T00:41:05.090964 | 2020-06-29T05:39:49 | 2020-06-29T05:39:49 | 275,739,070 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 25,290 | r | Taxonomic_profile.R |
Diversity_profile <- function(x,q){
x = x[x>0]
n = sum(x)
f1 = sum(x==1)
f2 = sum(x==2)
p1 = ifelse(f2>0,2*f2/((n-1)*f1+2*f2),ifelse(f1>0,2/((n-1)*(f1-1)+2),1))
r <- 1:(n-1)
Sub <- function(q){
if(q==0){
sum(x>0) + (n-1)/n*ifelse(f2>0, f1^2/2/f2, f1*(f1-1)/2)
}
else if(q==1){
A <- sum(x/n*(digamma(n)-digamma(x)))
B <- ifelse(f1==0|p1==1,0,f1/n*(1-p1)^(1-n)*(-log(p1)-sum((1-p1)^r/r)))
exp(A+B)
}else if(abs(q-round(q))==0){
A <- sum(exp(lchoose(x,q)-lchoose(n,q)))
#ifelse(A==0,NA,A^(1/(1-q)))
A^(1/(1-q))
}else {
sort.data = sort(unique(x))
tab = table(x)
term = sapply(sort.data,function(z){
k=0:(n-z)
sum(choose(k-q,k)*exp(lchoose(n-k-1,z-1)-lchoose(n,z)))
})
r <- 0:(n-1)
A = sum(tab*term)
B = ifelse(f1==0|p1==1,0,f1/n*(1-p1)^(1-n)*(p1^(q-1)-sum(choose(q-1,r)*(p1-1)^r)))
(A+B)^(1/(1-q))
}
}
sapply(q, Sub)
}
Diversity_profile_MLE <- function(x,q){
p <- x[x>0]/sum(x)
Sub <- function(q){
if(q==0) sum(p>0)
else if(q==1) exp(-sum(p*log(p)))
else exp(1/(1-q)*log(sum(p^q)))
}
sapply(q, Sub)
}
Diversity_Tsallis <- function(x,q){
qD = Diversity_profile(x, q)
ans = rep(0,length(qD))
ans[which(q==1)] <- log(qD[which(q==1)])
q1 = q[q!=1]
ans[which(q!=1)] <- (qD[which(q!=1)]^(1-q1)-1)/(1-q1)
return(ans)
}
Diversity_Tsallis_MLE <- function(x,q){
qD = Diversity_profile_MLE(x,q)
ans = rep(0,length(qD))
ans[which(q==1)] <- log(qD[which(q==1)])
q1 = q[q!=1]
ans[which(q!=1)] <- (qD[which(q!=1)]^(1-q1)-1)/(1-q1)
return(ans)
}
bootstrap_forq = function(data,B,q,conf,FUNNAME){
data <- data[data!=0]
n <- sum(data)
f1 = sum(data==1); f2 = sum(data==2)
f0 = ceiling(ifelse( f2>0, (n-1)*f1^2/n/2/f2, (n-1)*f1*(f1-1)/2/n ))
C_hat = ifelse(f2>0, 1-f1*(n-1)*f1/n/((n-1)*f1+2*f2), 1-f1*(n-1)*(f1-1)/n/((n-1)*(f1-1)+2))
lamda_hat = (1-C_hat)/sum((data/n)*(1-data/n)^n)
pi_hat = (data/n)*(1-lamda_hat*((1-data/n)^n))
p_hat = c( pi_hat, rep( (1-C_hat)/f0, f0 ))
random = rmultinom( B, n, p_hat )
#Bt_estimate <- sapply(c(1:B),function(i) FUNNAME(random[,i],q))
Bt_estimate <- apply(random,MARGIN = 2,function(i) FUNNAME(i,q))
estimate <- FUNNAME(data,q)
#Interval_mean = apply( Bt_estimate, 1, mean)
Interval_mean = rowMeans(Bt_estimate)
Interval_sd = apply(Bt_estimate, 1, sd)
Interval_quantileL = apply( Bt_estimate, 1, quantile, p=(1-conf)/2)
Interval_quantileU = apply( Bt_estimate, 1, quantile, p=1-(1-conf)/2)
Upper_bound = estimate+Interval_quantileU-Interval_mean
Lower_bound = estimate+Interval_quantileL-Interval_mean
result <- cbind("estimate"=estimate,"sd"=Interval_sd,"LCL"=Lower_bound,"UCL"=Upper_bound)
result
}
MakeTable_Proposeprofile_nose = function(data,q){
Diversity = Diversity_profile(data,q)
#Entropy = Diversity_Tsallis(Diversity,q)
output = data.frame("Order.q" = q,"Target"="Diversity","Estimate"=Diversity)
output[,c(1,3)] = round(output[,c(1,3)],9)
return(output)
}
MakeTable_Proposeprofile = function(data, B, q, conf){
Diversity = bootstrap_forq(data, B, q, conf, Diversity_profile)
#Entropy = bootstrap_forq(data, B, q, conf, Diversity_Tsallis)
output = data.frame("Order.q" = q,"Target"="Diversity","Estimate"=Diversity[,1],"s.e."=Diversity[,2],"LCL"=Diversity[,3],"UCL"=Diversity[,4])
output[,c(1,3,4,5,6)] = round(output[,c(1,3,4,5,6)],9)
return(output)
}
MakeTable_Empericalprofile_nose = function(data,q){
Diversity = Diversity_profile_MLE(data,q)
#Entropy = Diversity_Tsallis_MLE(Diversity,q)
output = data.frame("Order.q" = q,"Target"="Diversity","Emperical"=Diversity)
output[,c(1,3)] = round(output[,c(1,3)],9)
return(output)
}
MakeTable_Empericalprofile = function(data, B, q, conf){
Diversity = bootstrap_forq( data, B,q,conf,Diversity_profile_MLE)
#Entropy = bootstrap_forq( data, B,q,conf,Diversity_Tsallis_MLE)
# tmp <- Diversity_Tsallis(Diversity[,1],q)
# Entropy = cbind("estimate"=tmp,"sd"=Diversity[,2],"LCL"=tmp-Diversity[,2],"UCL"=tmp+Diversity[,2])
output = data.frame("Order.q" = q,"Target"="Diversity","Emperical"=Diversity[,1],"s.e."=Diversity[,2],"LCL"=Diversity[,3],"UCL"=Diversity[,4])
output[,c(1,3,4,5,6)] = round(output[,c(1,3,4,5,6)],9)
return(output)
}
plot_diversity_profile_nose_yhc=function(output){
ggplot(output, aes(x=Order.q, y=Diversity, colour=Community,lty = method))+
geom_line(size=1.2) +
labs(x="Order q", y="Diversity")+
theme(text=element_text(size=18))
}
plot_diversity_profile_yhc=function(output){
ggplot(output, aes(x=Order.q, y=Diversity, colour=Community, lty = method))+
labs(x="Order q", y="Diversity")+
theme(text=element_text(size=18))+
geom_ribbon(data = output %>% filter(method=="Asymptotic"),
aes(ymin=LCL, ymax=UCL, fill=Community, colour=NULL), alpha=0.4, linetype=0)+
geom_line(size=1.1)
}
#=======incidence profile==========#
#cpp function in Diversity_profile.inc
cppFunction(
"NumericVector qDFUN(NumericVector q,NumericVector Xi,const int n){
const int length = q.size();
const int Sobs = Xi.size();
NumericVector Q(length);
NumericVector delta(n);
NumericVector temp(Sobs);
for(int k=0;k<=(n-1);k++){
for(int i = 0;i<Sobs;i++){
temp[i] = (Xi[i]/n)*exp(Rf_lchoose(n-Xi[i],k)-Rf_lchoose(n-1,k));
}
delta[k] = sum(temp);
}
for(int i=0;i<length;i++){
float temp = 0;
for(int k=0;k<=(n-1);k++){
temp = temp + (Rf_choose(q[i]-1,k)*pow(-1,k)*delta[k]);
}
Q[i] = temp;
}
return Q;
}")
#cpp function in Diversity_profile_MLE.inc
cppFunction(
"NumericVector qD_MLE(NumericVector q,NumericVector ai){
const int length = q.size();
const int S = ai.size();
NumericVector Q(length);
NumericVector temp(S);
for(int j = 0; j<length;j++){
for(int i = 0 ; i<S;i++){
temp[i] = pow(ai[i],q[j]);
}
Q[j] = pow(sum(temp),1/(1-q[j]));
}
return Q;
}")
AA.inc <- function(data){
T = data[1]
U <- sum(data[-1])
data = data[-1]
Yi = data[data!=0]
Q1 <- sum(Yi==1)
Q2 <- sum(Yi==2)
if(Q2>0 & Q1>0){
A <- 2*Q2/((T-1)*Q1+2*Q2)
}
else if(Q2==0 & Q1>1){
A <- 2/((T-1)*(Q1-1)+2)
}
else{
A <- 1
}
return(A)
}
Diversity_profile.inc <- function(data,q){
T = data[1]
Yi = data[-1]
Yi <- Yi[Yi!=0]
U <- sum(Yi)
Q1 <- sum(Yi==1)
Q2 <- sum(Yi==2)
Sobs <- length(Yi)
A <- AA.inc(data)
Q0hat <- ifelse(Q2 == 0, (T - 1) / T * Q1 * (Q1 - 1) / 2, (T - 1) / T * Q1 ^ 2/ 2 / Q2)
B <- sapply(q,function(q) ifelse(A==1,0,(Q1/T)*(1-A)^(-T+1)*(A^(q-1)-sum(sapply(c(0:(T-1)),function(r) choose(q-1,r)*(A-1)^r)))))
qD <- (U/T)^(q/(q-1))*(qDFUN(q,Yi,T) + B)^(1/(1-q))
qD[which(q==0)] = Sobs+Q0hat
yi <- Yi[Yi>=1 & Yi<=(T-1)]
delta <- function(i){
(yi[i]/T)*sum(1/c(yi[i]:(T-1)))
}
if(sum(q %in% 1)>0){
C_ <- ifelse(A==1,0,(Q1/T)*(1-A)^(-T+1)*(-log(A)-sum(sapply(c(1:(T-1)),function(r) (1-A)^r/r))))
qD[which(q==1)] <- exp((T/U)*( sum(sapply(c(1:length(yi)),function(i) delta(i))) + C_)+log(U/T))
}
return(qD)
}
Diversity_profile_MLE.inc <- function(data,q){
Yi = data[-1]
U = sum(Yi)
Yi <- Yi[Yi!=0]
ai <- Yi/U
qD = qD_MLE(q,ai)
qD[which(q==1)] <- exp(-sum(ai*log(ai)))
return(qD)
}
Diversity_Tsallis.inc <- function(qD,q){
#qD = Diversity_profile.inc(data,q)
ans = rep(0,length(qD))
ans[which(q==1)] <- log(qD[which(q==1)])
q1 = q[q!=1]
ans[which(q!=1)] <- (qD[which(q!=1)]^(1-q1)-1)/(1-q1)
return(ans)
}
Diversity_Tsallis_MLE.inc <- function(qD,q){
#qD = Diversity_profile_MLE.inc(data,q)
ans = rep(0,length(qD))
ans[which(q==1)] <- log(qD[which(q==1)])
q1 = q[q!=1]
ans[which(q!=1)] <- (qD[which(q!=1)]^(1-q1)-1)/(1-q1)
return(ans)
}
bootstrap_forq.inc = function(data,B,q,conf,FUNNAME){
T <- data[1]
Yi <- data[-1]
Yi <- Yi[Yi>0]
Sobs <- sum(Yi > 0)
Q1 <- sum(Yi == 1)
Q2 <- sum(Yi == 2)
Q0.hat <- ifelse(Q2 == 0, (T - 1) / T * Q1 * (Q1 - 1) / 2, (T - 1) / T * Q1 ^ 2/ 2 / Q2) #estimation of unseen species via Chao2
A <- ifelse(Q1>0, T*Q0.hat/(T*Q0.hat+Q1), 1)
a <- Q1/T*A
b <- sum(Yi / T * (1 - Yi / T) ^ T)
w <- a / b
Prob.hat <- Yi / T * (1 - w * (1 - Yi / T) ^ T)
Prob.hat.Unse <- rep(a/ceiling(Q0.hat), ceiling(Q0.hat))
p_hat =c(Prob.hat, Prob.hat.Unse)
random = t(sapply(1:length(p_hat), function(i){rbinom(B,T,p_hat[i])}))
random = rbind(rep(T,B),random)
Bt_estimate <- apply(random,MARGIN = 2,function(i) FUNNAME(i,q))
estimate <- FUNNAME(data,q)
Interval_mean = rowMeans(Bt_estimate)
Interval_sd = apply(Bt_estimate, 1, sd)
Interval_quantileL = apply( Bt_estimate, 1, quantile, p=(1-conf)/2)
Interval_quantileU = apply( Bt_estimate, 1, quantile, p=1-(1-conf)/2)
Upper_bound = estimate+Interval_quantileU-Interval_mean
Lower_bound = estimate+Interval_quantileL-Interval_mean
result <- cbind("estimate"=estimate,"sd"=Interval_sd,"LCL"=Lower_bound,"UCL"=Upper_bound)
result
}
MakeTable_EmpericalDiversityprofile.inc_nose = function(data,q){
Diversity = Diversity_profile_MLE.inc(data,q)
#Entropy = Diversity_Tsallis_MLE.inc(Diversity,q)
output = data.frame("Order.q" = q,"Target"="Diversity","Emperical"=Diversity)
return(output)
}
MakeTable_Proposeprofile.inc_nose = function(data,q){
Diversity = Diversity_profile.inc(data,q)
#Entropy = Diversity_Tsallis.inc(Diversity,q)
output = rbind(data.frame("Order.q" = q,"Target"="Diversity","Estimate"=Diversity))
return(output)
}
MakeTable_EmpericalDiversityprofile.inc = function(data, B, q,conf){
Diversity = bootstrap_forq.inc( data, B,q,conf,Diversity_profile_MLE.inc)
#Entropy = bootstrap_forq.inc( data, B,q,conf,Diversity_Tsallis_MLE.inc)
#tmp <- Diversity_Tsallis_MLE.inc(Diversity[,1],q)
#Entropy = cbind("estimate"=tmp,"sd"=Diversity[,2],"LCL"=tmp-Diversity[,2],"UCL"=tmp+Diversity[,2])
output = data.frame("Order.q" = q,"Target"="Diversity","Emperical"=Diversity[,1],"s.e."=Diversity[,2],"LCL"=Diversity[,3],"UCL"=Diversity[,4])
return(output)
}
MakeTable_Proposeprofile.inc = function(data, B, q,conf){
Diversity = bootstrap_forq.inc(data, B, q,conf,Diversity_profile.inc)
#Entropy = bootstrap_forq.inc( data, B,q,conf,Diversity_Tsallis.inc)
#tmp <- Diversity_Tsallis.inc(Diversity[,1],q)
#Entropy = cbind("estimate"=tmp,"sd"=Diversity[,2],"LCL"=tmp-Diversity[,2],"UCL"=tmp+Diversity[,2])
output = data.frame("Order.q" = q,"Target"="Diversity","Estimate"=Diversity[,1],"s.e."=Diversity[,2],"LCL"=Diversity[,3],"UCL"=Diversity[,4])
return(output)
}
plot_diversity_profile.inc_nose_yhc <- function(data){
ggplot(data, aes(x=Order.q, y=Diversity, colour=Community, lty = method))+
geom_line(size=1.2) +
labs(x="Order q", y="Diversity")+
theme(text=element_text(size=18))
}
plot_diversity_profile.inc_yhc <- function(data){
ggplot(data, aes(x=Order.q, y=Diversity, colour=Community, lty = method))+
labs(x="Order q", y="Diversity")+
theme(text=element_text(size=18))+
geom_ribbon(aes(ymin=LCL, ymax=UCL, fill=Community, colour=NULL), alpha=0.4)+
geom_line(size=1.2)
}
#=======Sample Completeness Curve=========#
sample_coverage = function(freq, q, datatype = c("abundance","incidence_freq")){
if(datatype=="abundance"){
freq = freq[freq>0]
n = sum(freq)
f1 = sum(freq==1)
f2 = sum(freq==2)
A = ifelse(f2>0,2*f2/((n-1)*f1+2*f2),ifelse(f1>0,2/((n-1)*(f1-1)+2),1))
c_hat = function(q){
if (q==0){
S_obs = length(freq)
# f0_hat = if ( f2 == 0 ){( (n-1)/n ) * ( f1*(f1-1)/2 )} else {( (n-1)/n ) * ( (f1^2) / (2*f2) )}
# f0_hat_star = ceiling(f0_hat)
# c_hat = S_obs / (S_obs + f0_hat_star)
f0_hat = ifelse ( f2 == 0 ,( (n-1)/n ) * ( f1*(f1-1)/2 ), ( (n-1)/n ) * ( (f1^2) / (2*f2) ))
c_hat = S_obs / (S_obs + f0_hat)
return(c_hat)
} else if (q==1){
c_hat = 1 - (f1/n)*(1-A)
return(c_hat)
} else if (q==2){
x = freq[freq>=2]
c_hat = 1 - (f1/n)*( (A*(1-A))/sum( x*(x-1) / (n*(n-1)) ) )
return(c_hat)
} else {
r <- 0:(n-1)
sort.data = sort(unique(freq))
tab = table(freq)
term = sapply(sort.data,function(z){
k=0:(n-z)
sum(choose(k-q,k)*exp(lchoose(n-k-1,z-1)-lchoose(n,z)))
})
lambda_hat = sum(tab*term) + ifelse(f1==0|A==1,0,f1/n*(1-A)^(1-n)*(A^(q-1)-sum(choose(q-1,r)*(A-1)^r)))
c_hat = 1 - ((f1/n)*(A^(q-1))*(1-A)/lambda_hat)
return(c_hat)
}
}
} else {
t = freq[1]
freq = freq[-1]; freq = freq[freq>0]
u = sum(freq)
Q1 = sum(freq==1)
Q2 = sum(freq==2)
B = ifelse(Q2>0,2*Q2/((t-1)*Q1+2*Q2),ifelse(Q1>0,2/((t-1)*(Q1-1)+2),1))
c_hat = function(q){
if (q==0){
S_obs = length(freq)
# Chao2 = S_obs + ceiling(if ( Q2 == 0 ){( (t-1)/t ) * ( Q1*(Q1-1)/2 )} else {( (t-1)/t ) * ( (Q1^2) / (2*Q2) )})
Q0_hat = ifelse( Q2 == 0,( (t-1)/t ) * ( Q1*(Q1-1)/2 ), ( (t-1)/t ) * ( (Q1^2) / (2*Q2) ))
c_hat = S_obs / (S_obs + Q0_hat)
return(c_hat)
} else if (q==1){
c_hat = 1 - (Q1/u)*(1-B)
return(c_hat)
} else if (q==2){
x = freq[freq>=2]
c_hat = 1 - (t-1)*Q1*( (B*(1-B))/sum( x*(x-1) ) )
return(c_hat)
} else {
r <- 0:(t-1)
sort.data = sort(unique(freq))
tab = table(freq)
term = sapply(sort.data,function(z){
k=0:(t-z)
sum(choose(k-q,k)*exp(lchoose(t-k-1,z-1)-lchoose(t,z)))
})
phi_hat = sum(tab*term) + ifelse(Q1==0|B==1,0,Q1/t*(1-B)^(1-t)*(B^(q-1)-sum(choose(q-1,r)*(B-1)^r)))
c_hat = 1 - ((Q1/t)*(B^(q-1))*(1-B)/phi_hat)
return(c_hat)
}
}
}
sapply(q,c_hat)
}
bootstrap_sample = function(freq, B, datatype = c("abundance","incidence_freq")){
if(datatype=="abundance"){
freq = freq[freq>0]
n = sum(freq)
f1 = sum(freq == 1)
f2 = sum(freq == 2)
S_obs = length(freq)
f0_hat = if ( f2 == 0 ){( (n-1)/n ) * ( f1*(f1-1)/2 )} else {( (n-1)/n ) * ( (f1^2) / (2*f2) )}
f0_hat_star = ceiling(f0_hat)
S_hat_Chao1 = S_obs + f0_hat_star
c_hat = if ( f2 != 0 ){ 1 - (f1/n)*((n-1)*f1/(((n-1)*f1)+2*f2))
} else if (f1 != 0) { 1 - (f1/n)*((n-1)*(f1-1)/(((n-1)*(f1-1))+2*f2)) } else { 1 }
lambda_hat = (1-c_hat) / sum((freq/n)*(1-freq/n)^n )
p_i_hat_obs = (freq/n) * (1-lambda_hat* (1-freq/n)^n )
p_i_hat_unobs = rep( (1-c_hat)/ f0_hat_star, f0_hat_star )
bootstrap_population = c(p_i_hat_obs,p_i_hat_unobs)
bootstrap_sample = rmultinom(n=B, size=n, prob=bootstrap_population)
return(bootstrap_sample)
} else {
t = freq[1]
freq = freq[-1]; freq = freq[freq>0]
u = sum(freq)
Q1 = sum(freq==1)
Q2 = sum(freq==2)
S_obs = length(freq)
Q_0_hat = if ( Q2 == 0 ){( (t-1)/t ) * ( Q1*(Q1-1)/2 )} else {( (t-1)/t ) * ( (Q1^2) / (2*Q2) )}
Q_0_hat_star = ceiling(Q_0_hat)
c_hat = if ( Q2 > 0 ){ 1 - (Q1/u)*((t-1)*Q1/(((t-1)*Q1)+2*Q2))
} else { 1 - (Q1/u)*((t-1)*(Q1-1)/(((t-1)*(Q1-1))+2)) }
tau_hat = (u/t) * (1-c_hat) / sum((freq/t)*(1-freq/t)^t )
pi_i_hat_obs = (freq/t) * (1-tau_hat* (1-freq/t)^t )
pi_i_hat_unobs = rep( (u/t) * (1-c_hat)/ Q_0_hat_star, Q_0_hat_star )
bootstrap_population = c(1,pi_i_hat_obs,pi_i_hat_unobs)
bootstrap_sample = sapply(1:length(bootstrap_population), function(i) rbinom(n=B, size=t, prob=bootstrap_population[i]))
bootstrap_sample = if(B==1) {as.matrix(bootstrap_sample)} else {t(bootstrap_sample)}
return(bootstrap_sample)
}
}
sc_profile.nose = function(freq, q, datatype = c("abundance","incidence_freq")) {
data.frame(Order.q=q, Estimate=sample_coverage(freq, q, datatype))
}
sc_profile = function(freq, q, B, conf, datatype = c("abundance","incidence_freq")) {
bootstrap_samples = bootstrap_sample(freq, B, datatype)
sc_bs = sapply(1:B, function(i) sample_coverage(bootstrap_samples[,i], q, datatype))
LCL = sample_coverage(freq, q, datatype) - qnorm(1-(1-conf)/2)*apply(sc_bs, 1, sd); LCL[LCL<0]=0
UCL = sample_coverage(freq, q, datatype) + qnorm(1-(1-conf)/2)*apply(sc_bs, 1, sd); UCL[UCL>1]=1
data.frame(Order.q=q, Estimate=sample_coverage(freq, q, datatype), s.e.=apply(sc_bs, 1,sd), LCL=LCL, UCL=UCL)
}
plot_sc_profile_nose <- function(data){
ggplot(data, aes(x=Order.q, y=Estimate, colour=Community))+
geom_line(size=1.2) +
labs(x="Order q", y="sample completeness")+
theme(text=element_text(size=18), legend.position="bottom")
}
plot_sc_profile <- function(data){
ggplot(data, aes(x=Order.q, y=Estimate, colour=Community))+
labs(x="Order q", y="sample completeness")+
theme(text=element_text(size=18))+
geom_ribbon(aes(ymin=LCL, ymax=UCL, fill=Community, colour=NULL), alpha=0.4,show.legend = FALSE)+
geom_line(size=1.1,show.legend = FALSE)
}
#191018 yhc add ggiNEXT
ggiNEXT.iNEXT <- function(x, type=1, se=TRUE, facet.var="none", color.var="site", grey=FALSE){
TYPE <- c(1, 2, 3)
SPLIT <- c("none", "order", "site", "both")
if(is.na(pmatch(type, TYPE)) | pmatch(type, TYPE) == -1)
stop("invalid plot type")
if(is.na(pmatch(facet.var, SPLIT)) | pmatch(facet.var, SPLIT) == -1)
stop("invalid facet variable")
if(is.na(pmatch(color.var, SPLIT)) | pmatch(color.var, SPLIT) == -1)
stop("invalid color variable")
type <- pmatch(type, 1:3)
facet.var <- match.arg(facet.var, SPLIT)
color.var <- match.arg(color.var, SPLIT)
if(facet.var=="order") color.var <- "site"
if(facet.var=="site") color.var <- "order"
options(warn = -1)
z <- fortify(x, type=type)
z$order <- factor(paste0("q = ",z$order),levels = paste0("q = ",unique(z$order)))
options(warn = 0)
if(ncol(z) ==7) {se <- FALSE}
datatype <- unique(z$datatype)
if(color.var=="none"){
if(levels(factor(z$order))>1 & "site"%in%names(z)){
warning("invalid color.var setting, the iNEXT object consists multiple sites and orders, change setting as both")
color.var <- "both"
z$col <- z$shape <- paste(z$site, z$order, sep="-")
}else if("site"%in%names(z)){
warning("invalid color.var setting, the iNEXT object consists multiple orders, change setting as order")
color.var <- "site"
z$col <- z$shape <- z$site
}else if(levels(factor(z$order))>1){
warning("invalid color.var setting, the iNEXT object consists multiple sites, change setting as site")
color.var <- "order"
z$col <- z$shape <- factor(z$order)
}else{
z$col <- z$shape <- rep(1, nrow(z))
}
}else if(color.var=="order"){
z$col <- z$shape <- factor(z$order)
}else if(color.var=="site"){
if(!"site"%in%names(z)){
warning("invalid color.var setting, the iNEXT object do not consist multiple sites, change setting as order")
z$col <- z$shape <- factor(z$order)
}
z$col <- z$shape <- z$site
}else if(color.var=="both"){
if(!"site"%in%names(z)){
warning("invalid color.var setting, the iNEXT object do not consist multiple sites, change setting as order")
z$col <- z$shape <- factor(z$order)
}
z$col <- z$shape <- paste(z$site, z$order, sep="-")
}
zz=z
z$method[z$method=="observed"]="Interpolated"
z$method[z$method=="interpolated"]="Interpolated"
z$method[z$method=="extrapolated"]="Extrapolated"
z$lty <- z$lty <- factor(z$method, levels=unique(c("Interpolated", "Extrapolated")))
z$col <- factor(z$col)
data.sub <- zz[which(zz$method=="observed"),]
g <- ggplot(z, aes_string(x="x", y="y", colour="col")) + theme_bw() +
geom_point(aes_string(shape="shape"), size=3, data=data.sub)
g <- g + geom_line(aes_string(linetype="lty"), lwd=1.1) +
guides(linetype=guide_legend(title="Method"),
colour=guide_legend(title="Guides"),
fill=guide_legend(title="Guides"),
shape=guide_legend(title="Guides")) +
theme(legend.position = "bottom",
legend.title=element_blank(),
text=element_text(size=18),
legend.key.width = unit(1.2,"cm"))
if(type==2L) {
g <- g + labs(x="Number of sampling units", y="Sample coverage")
if(datatype=="abundance") g <- g + labs(x="Number of individuals", y="Sample coverage")
}
else if(type==3L) {
g <- g + labs(x="Sample coverage", y="Species diversity")
}
else {
g <- g + labs(x="Number of sampling units", y="Species diversity")
if(datatype=="abundance") g <- g + labs(x="Number of individuals", y="Species diversity")
}
if(se)
g <- g + geom_ribbon(aes_string(ymin="y.lwr", ymax="y.upr", fill="factor(col)", colour="NULL"), alpha=0.4)
if(facet.var=="order"){
if(length(levels(factor(z$order))) == 1 & type!=2){
warning("invalid facet.var setting, the iNEXT object do not consist multiple orders.")
}else{
g <- g + facet_wrap(~order, nrow=1)
if(color.var=="both"){
g <- g + guides(colour=guide_legend(title="Guides", ncol=length(levels(factor(z$order))), byrow=TRUE),
fill=guide_legend(title="Guides"))
}
}
}
if(facet.var=="site"){
if(!"site"%in%names(z)) {
warning("invalid facet.var setting, the iNEXT object do not consist multiple sites.")
}else{
g <- g + facet_wrap(~site, nrow=1)
if(color.var=="both"){
g <- g + guides(colour=guide_legend(title="Guides", nrow=length(levels(factor(z$order)))),
fill=guide_legend(title="Guides"))
}
}
}
if(facet.var=="both"){
if(length(levels(factor(z$order))) == 1 | !"site"%in%names(z)){
warning("invalid facet.var setting, the iNEXT object do not consist multiple sites or orders.")
}else{
g <- g + facet_wrap(site~order)
if(color.var=="both"){
g <- g + guides(colour=guide_legend(title="Guides", nrow=length(levels(factor(z$site))), byrow=TRUE),
fill=guide_legend(title="Guides"))
}
}
}
if(grey){
g <- g + theme_bw(base_size = 18) +
scale_fill_grey(start = 0, end = .4) +
scale_colour_grey(start = .2, end = .2) +
guides(linetype=guide_legend(title="Method"),
colour=guide_legend(title="Guides"),
fill=guide_legend(title="Guides"),
shape=guide_legend(title="Guides")) +
theme(legend.position="bottom",
legend.title=element_blank())
}
# g <- g + theme(legend.box = "vertical")
return(g)
}
#' Fortify method for classes from the iNEXT package.
#'
#' @name fortify.iNEXT
#' @param model \code{iNEXT} to convert into a dataframe.
#' @param data not used by this method
#' @param type three types of plots: sample-size-based rarefaction/extrapolation curve (\code{type = 1});
#' sample completeness curve (\code{type = 2}); coverage-based rarefaction/extrapolation curve (\code{type = 3}).
#' @param ... not used by this method
#' @export
#' @examples
#' data(spider)
#' # single-assemblage abundance data
#' out1 <- iNEXT(spider$Girdled, q=0, datatype="abundance")
#' ggplot2::fortify(out1, type=1)
fortify.iNEXT <- function(model, data = model$iNextEst, type = 1, ...) {
datatype <- ifelse(names(model$DataInfo)[2]=="n","abundance","incidence")
z <- data
if(class(z) == "list"){
z <- data.frame(do.call("rbind", z), site=rep(names(z), sapply(z, nrow)))
rownames(z) <- NULL
}else{
z$site <- ""
}
if(ncol(z)==6) {
warning("invalid se setting, the iNEXT object do not consist confidence interval")
se <- FALSE
}else if(ncol(z)>6) {
se <- TRUE
}
if(type==1L) {
z$x <- z[,1]
z$y <- z$qD
if(se){
z$y.lwr <- z[,5]
z$y.upr <- z[,6]
}
}else if(type==2L){
if(length(unique(z$order))>1){
z <- subset(z, order==unique(z$order)[1])
}
z$x <- z[,1]
z$y <- z$SC
if(se){
z$y.lwr <- z[,8]
z$y.upr <- z[,9]
}
}else if(type==3L){
z$x <- z$SC
z$y <- z$qD
if(se){
z$y.lwr <- z[,5]
z$y.upr <- z[,6]
}
}
z$datatype <- datatype
z$plottype <- type
if(se){
data <- z[,c("datatype","plottype","site","method","order","x","y","y.lwr","y.upr")]
}else{
data <- z[,c("datatype","plottype","site","method","order","x","y")]
}
data
}
|
f6e71482013f83887f7ef564b57d20c66d223ff1 | 72052673e90cdd9391fd52be11b5da93441ddf43 | /DAT204x/DAT204x/02.Vectors.R | 4630abf23cba8a294e16591479b8a3d8322662fe | [] | no_license | KrishnaGMohan/Projects-R | d348d387d38bf5a9143d26391e9672a6db2e9bdf | 337d3601c4deb1285b969248cd7facc8f98e0e58 | refs/heads/master | 2021-01-24T07:31:19.783812 | 2017-08-07T20:42:32 | 2017-08-07T20:42:32 | 55,706,529 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,319 | r | 02.Vectors.R | #---------------------------------------------------------------------------------------------
#
# Sequence of data elements of the same basic type
#
#---------------------------------------------------------------------------------------------
numeric_vector <- c(1, 10, 49)
character_vector <- c("a", "b", "c")
# Create boolean_vector
boolean_vector <- c(TRUE, FALSE, TRUE)
#---------------------------------------------------------------------------------------------
# Poker winnings from Monday to Friday
poker_vector <- c(140, -50, 20, -120, 240)
# Roulette winnings from Monday to Friday
roulette_vector <- c(-24, -50, 100, -350, 10)
# Create the variable days_vector
days_vector <- c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday")
# Assign the names of the day to roulette_vector and poker_vector
names(poker_vector) <- days_vector
names(roulette_vector) <- days_vector
#---------------------------------------------------------------------------------------------
# A_vector and B_vector have already been defined for you
A_vector <- c(1, 2, 3)
B_vector <- c(4, 5, 6)
# Take the sum of A_vector and B_vector: total_vector
total_vector <- A_vector + B_vector
# Print total_vector
total_vector
# Calculate the difference between A_vector and B_vector: diff_vector
diff_vector <- A_vector - B_vector
# Print diff_vector
diff_vector
#---------------------------------------------------------------------------------------------
# Casino winnings from Monday to Friday
poker_vector <- c(140, -50, 20, -120, 240)
roulette_vector <- c(-24, -50, 100, -350, 10)
days_vector <- c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday")
names(poker_vector) <- days_vector
names(roulette_vector) <- days_vector
# Calculate your daily earnings: total_daily
total_daily <- poker_vector + roulette_vector
#---------------------------------------------------------------------------------------------
# Casino winnings from Monday to Friday
poker_vector <- c(140, -50, 20, -120, 240)
roulette_vector <- c(-24, -50, 100, -350, 10)
days_vector <- c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday")
names(poker_vector) <- days_vector
names(roulette_vector) <- days_vector
# Total winnings with poker: total_poker
total_poker <- sum(poker_vector)
# Total winnings with roulette: total_roulette
total_roulette <- sum(roulette_vector)
# Total winnings overall: total_week
total_week <- total_poker + total_roulette
# Print total_week
total_week
#---------------------------------------------------------------------------------------------
# Casino winnings from Monday to Friday
poker_vector <- c(140, -50, 20, -120, 240)
roulette_vector <- c(-24, -50, 100, -350, 10)
days_vector <- c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday")
names(poker_vector) <- days_vector
names(roulette_vector) <- days_vector
# Calculate poker_better
poker_better <- poker_vector > roulette_vector
# Calculate total_poker and total_roulette, as before
total_poker <- sum(poker_vector)
total_roulette <- sum(roulette_vector)
# Calculate choose_poker
choose_poker <- total_poker > total_roulette
# Print choose_poker
choose_poker
#---------------------------------------------------------------------------------------------
# Calculate total gains for your entire past week: total_past
total_past <- sum(poker_past) + sum(roulette_past)
# Difference of past to present performance: diff_poker
diff_poker <- poker_present - poker_past
#---------------------------------------------------------------------------------------------
#
# Vector Subsetting
#
#---------------------------------------------------------------------------------------------
# Casino winnings from Monday to Friday
poker_vector <- c(140, -50, 20, -120, 240)
roulette_vector <- c(-24, -50, 100, -350, 10)
days_vector <- c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday")
names(poker_vector) <- days_vector
names(roulette_vector) <- days_vector
# Poker results of Wednesday: poker_wednesday
poker_wednesday <- poker_vector["Wednesday"]
# Roulette results of Friday: roulette_friday
roulette_friday <- roulette_vector["Friday"]
#---------------------------------------------------------------------------------------------
# Mid-week poker results: poker_midweek
poker_midweek <- poker_vector[c(2, 3, 4)]
# End-of-week roulette results: roulette_endweek
roulette_endweek <- roulette_vector[c(4, 5)]
#---------------------------------------------------------------------------------------------
# Roulette results for Tuesday to Friday inclusive: roulette_subset
roulette_subset <- roulette_vector[2:5]
# Print roulette_subset
roulette_subset
#---------------------------------------------------------------------------------------------
# Select Thursday's roulette gains: roulette_thursday
roulette_thursday <- roulette_vector["Thursday"]
# Select Tuesday's poker gains: poker_tuesday
poker_tuesday <- poker_vector["Tuesday"]
#---------------------------------------------------------------------------------------------
# Select the first three elements from poker_vector: poker_start
poker_start <- poker_vector[c(1, 2, 3)]
# Calculate the average poker gains during the first three days: avg_poker_start
avg_poker_start <- mean(poker_start)
#---------------------------------------------------------------------------------------------
# Roulette results for day 1, 3 and 5: roulette_subset
roulette_subset <- roulette_vector[c(1, 3, 5)]
# Poker results for first three days: poker_start
poker_start <- poker_vector[1:3]
#---------------------------------------------------------------------------------------------
# Casino winnings from Monday to Friday
poker_vector <- c(140, -50, 20, -120, 240)
roulette_vector <- c(-24, -50, 100, -350, 10)
days_vector <- c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday")
names(poker_vector) <- days_vector
names(roulette_vector) <- days_vector
# Create logical vector corresponding to profitable poker days: selection_vector
selection_vector <- poker_vector > 0
# Select amounts for profitable poker days: poker_profits
poker_profits <- poker_vector[selection_vector]
#---------------------------------------------------------------------------------------------
# Casino winnings from Monday to Friday
poker_vector <- c(140, -50, 20, -120, 240)
roulette_vector <- c(-24, -50, 100, -350, 10)
days_vector <- c("Monday", "Tuesday", "Wednesday", "Thursday", "Friday")
names(poker_vector) <- days_vector
names(roulette_vector) <- days_vector
# Select amounts for profitable roulette days: roulette_profits
roulette_profits <- roulette_vector[roulette_vector > 0]
# Sum of the profitable roulette days: roulette_total_profit
roulette_total_profit <- sum(roulette_profits)
# Number of profitable roulette days: num_profitable_days
num_profitable_days <- sum(roulette_vector > 0)
#---------------------------------------------------------------------------------------------
player <- c(14, 17, 20, 21, 20, 18, 14)
house <- c(20, 15, 21, 20, 20, 17, 19)
# Select the player's score for the third game: player_third
player_third <- player[3]
# Select the scores where player exceeds hous: winning_scores
winning_scores <- player[player > house]
# Count number of times player < 18: n_low_score
n_low_score <- sum(player < 18) |
e2c7b31427e8e1900ad15c6045f9e863d95c2c23 | 5c2350f172e1a7b7f61e1047d515357735e5895e | /R/pkg_options.R | db98355b7b4418ffd937b0a1bb86931305578adf | [
"CC-BY-4.0",
"MIT",
"LicenseRef-scancode-public-domain",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | richarddmorey/Morey_Hoekstra_StatCognition | 4da5b3f205d1038b850fa701354bd59b62a05eed | 373b9ac75219d84d7b5a6454296e80aa4f34ea54 | refs/heads/master | 2022-12-06T14:50:55.198542 | 2022-11-30T18:23:58 | 2022-11-30T18:23:58 | 189,821,493 | 4 | 1 | null | null | null | null | UTF-8 | R | false | false | 840 | r | pkg_options.R | # Variable, global to package's namespace.
# This function is not exported to user space and does not need to be documented.
MYPKGOPTIONS <- settings::options_manager(
ggplot_family = "Lato Light",
base_family = "lato"
)
# User function that gets exported:
#' Set or get options for my package
#'
#' @param ... Option names to retrieve option values or \code{[key]=[value]} pairs to set options.
#'
#' @section Supported options:
#' The following options are supported
#' \itemize{
#' \item{\code{ggplot_family}}{(\code{character};"Lato Light") Font family to use for ggplot2 graphs }
#' \item{\code{base_family}}{(\code{character};"lato") Font family to use for base graphs }
#' }
#'
#' @export
pkg_options <- function(...){
# protect against the use of reserved words.
settings::stop_if_reserved(...)
MYPKGOPTIONS(...)
} |
0c174adf36a0cf24b11904c5298edc4ae7c19c6f | fff2d145dc2160fad90eec9d780d503b8868d5b2 | /scripts/model_DecisionTree.R | 2e4abd1bcedfa6b50a162eaf3eb11bce6449ab7a | [] | no_license | Sponghop/dalex_tidymodels_example | dbbb17354b575118dc27eeb195ba1c8121da1f3a | 81ac52dad2bb83c230af599d7a49ee8b0c08f3e8 | refs/heads/master | 2022-11-21T23:41:21.717657 | 2020-07-31T10:17:25 | 2020-07-31T10:17:25 | 284,003,676 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,084 | r | model_DecisionTree.R | ##Model Training/Tuning Decision Tree
tree_mod <-
decision_tree(tree_depth = tune()) %>%
set_engine("rpart") %>%
set_mode("classification")
ml_wflow <-
workflow() %>%
add_recipe(mod_rec) %>%
add_model(tree_mod)
ctrl <- control_resamples(save_pred = TRUE)
folds <- vfold_cv(pid_train, v = 2, repeats = 3)
grid <- expand.grid(tree_depth = 3:25)
all_cores <- parallel::detectCores(logical = TRUE) - 1
registerDoFuture()
cl <- makeCluster(all_cores)
plan(future::cluster, workers = cl)
res <-
ml_wflow %>%
tune_grid(resamples = folds, control = ctrl, grid = grid)
stopCluster(cl)
res %>%
tune::collect_metrics()
best_params <-
res %>%
tune::select_best(metric = "accuracy")
best_params
## Validation
reg_res_decisiontree <-
ml_wflow %>%
# Attach the best tuning parameters to the model
finalize_workflow(best_params) %>%
# Fit the final model to the training data
fit(data = pid_train)
reg_res_decisiontree %>%
predict(new_data = pid_test) %>%
bind_cols(pid_test, .) %>%
select(diabetes, .pred_class) %>%
accuracy(diabetes, .pred_class) |
442051a0e686aff6f6dfa16ab95220904a48def4 | f742e300d0d886a2093acc43a37dc0d65cf6e877 | /man/node_locations.Rd | d8e2e72c0377ea8d533bddc59c6f5f1054f8969f | [] | no_license | cran/whitechapelR | 347608622b3828dcade330f4cf25f7c3fe4cab9e | 35986d29898717d2cc5f7343d02584af9c2a1725 | refs/heads/master | 2020-03-27T04:02:00.429566 | 2018-10-02T16:40:03 | 2018-10-02T16:40:03 | 145,906,933 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 780 | rd | node_locations.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/data.R
\docType{data}
\name{node_locations}
\alias{node_locations}
\title{x,y coordinates of node points from the game board}
\format{A data frame with 195 rows and 4 variables
\describe{
\item{id}{An artifact of the computer vision process used to obtain coordinates}
\item{x}{The number of pixels from the left edge of the board to the center of the node}
\item{y}{The number of pixels from the top edge of the board to the center of the node}
\item{name}{The integer assigned to the node on the game board}
}}
\usage{
node_locations
}
\description{
Data used to place nodes in graphical output according to their relative positions on the game board
}
\keyword{datasets}
|
b40726076457a115f14cc5b2018da2998b94b42d | 7c0d885b9960f7adaf44e4e7db7f79b5f87d1697 | /some tests/human tissue atlas.R | 24b097a349a47d0844d30d103fede4e8bf428d46 | [] | no_license | gheger11/Comparison-of-batch-effect-correction-methods | 0990eeb3f9e63133ff203a2bb463e5f5b40a09a8 | 91a3cad82ab42e1bfc657f3c55368c0a1439bb20 | refs/heads/master | 2022-05-03T08:24:36.593085 | 2019-09-24T18:23:10 | 2019-09-24T18:23:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,748 | r | human tissue atlas.R | library(magrittr)
library(stringr)
library(purrr)
library(ggplot2)
library(ExpressionAtlas)
library(igraph)
batch_data<-"Human Expression Atlas/batch data/"
experiments<-list();for(filename in dir(batch_data)){
experiments[[filename %>% str_split("-") %>% extract2(1) %>% extract(2:3) %>% paste(collapse="")]] <- get(load(paste0(batch_data,filename)))$rnaseq
}
genes<-experiments %>% map(rownames)
common.genes<-genes %>% purrr::reduce(intersect)
data<-NULL;batch<-NULL;tissue<-NULL; for(i in experiments %>% seq_along){
data%<>%cbind(experiments[[i]] %>% assays %$% counts %>% extract(common.genes,))
batch%<>%c(names(experiments)[[i]] %>% rep(dim(experiments[[i]])[2]))
tissue%<>%c(experiments[[i]]$organism_part)
}; batch%<>%factor
tissues <- tissue %>% split(batch)
intersections<-NULL
for(i in tissues %>% seq_along){
for(j in tissues %>% seq_along){
intersections%<>%c(length(intersect(tissues[[i]],tissues[[j]])))
}
};intersections%<>%matrix(length(tissues))%<>%set_colnames(names(tissues))%<>%set_rownames(names(tissues))
graph_from_adjacency_matrix(intersections) %>% plot
library(GGally)
library(network)
intersections %>% network(ignore.eval=FALSE,names.eval='weights') %>% ggnet2(label=TRUE, edge.size = 'weights')
#"Human Expression Atlas/intersections graph.png" %>% png; graph_from_adjacency_matrix(intersections) %>% plot; dev.off()
#no isolated experiment
# data %>% gPCA(batch) -> gdata
# gdata %>% viz_gpca(guided=FALSE)
#
# data %>% log1p %>% gPCA(batch) -> gdata
# gdata %>% viz_gpca(guided=FALSE)
data%<>%log1p
#filter<-TRUE %>% rep(nrow(data))
#filter <- data %>% t %>% data.frame %>% split(tissue) %>% map(~colSums(.)!=0) %>% purrr::reduce(`&`)
filter <- data %>% t %>% data.frame %>% split(batch) %>% map(~colSums(.)!=0) %>% purrr::reduce(`&`)
filtered<-data[filter,]
#filtered[filtered==0]<-1
filtered %>% gPCA(batch,nperm=100) -> gfilt
filtered %>% eigenangles(batch,tissue) -> angfilt
library(pamr)
filtered %>% list(x=.,batchlabels=batch) %>% pamr.batchadjust %$% x -> pam
pam %>% eigenangles(batch,tissue)->angpam
pam %>% t %>% prcomp(rank.=2) -> pampca
ggplot()+aes(x=pampca$x[,1],y=pampca$x[,2],colour=tissue)+geom_point()+stat_ellipse()+
scale_colour_manual(values=c('#e6194b', '#3cb44b', '#ffe119', '#4363d8', '#f58231', '#911eb4', '#46f0f0', '#f032e6', '#bcf60c', '#fabebe', '#008080', '#e6beff', '#9a6324', '#fffac8', '#800000', '#aaffc3', '#808000', '#ffd8b1', '#000075', '#808080', '#ffffff', '#000000') %>% rep(3))
library(sva)
filtered %>% ComBat(batch,model.matrix(~tissue)) -> combat
combat %>% eigenangles(batch,tissue)->angcomb
combat %>% save(file='Human Expression Atlas/Human Expression Atlas across tissues (ComBat).Rdata')
combat %>% t %>% prcomp -> pcacombat
pcacombat %>% save(file='Human Expression Atlas/ComBat PCA.Rdata')
'Human Expression Atlas/ComBat PCA.png' %>% png
ggplot()+aes(x=pcacombat$x[,1],y=pcacombat$x[,2],colour=tissue)+geom_point()+stat_ellipse()+
scale_colour_manual(values=c('#e6194b', '#3cb44b', '#ffe119', '#4363d8', '#f58231', '#911eb4', '#46f0f0', '#f032e6', '#bcf60c', '#fabebe', '#008080', '#e6beff', '#9a6324', '#fffac8', '#800000', '#aaffc3', '#808000', '#ffd8b1', '#000075', '#808080', '#ffffff', '#000000') %>% rep(3))
dev.off()
'Human Expression Atlas/ComBat PCA text.png' %>% png(800,800)
ggplot()+aes(x=pcacombat$x[,1],y=pcacombat$x[,2],label=tissue,colour=tissue)+
geom_text(size=3)+theme(legend.position = 'none')+
scale_colour_manual(values=c('#e6194b', '#3cb44b', '#ffe119', '#4363d8', '#f58231', '#911eb4', '#46f0f0', '#f032e6', '#bcf60c', '#fabebe', '#008080', '#e6beff', '#9a6324', '#fffac8', '#800000', '#aaffc3', '#808000', '#ffd8b1', '#000075', '#808080', '#ffffff', '#000000') %>% rep(3))
dev.off()
combat %>% gPCA(tissue,scaleY=TRUE) -> gtissuepcacombat
gtissuepcacombat %>% viz_gpca + scale_colour_manual(values=c('#e6194b', '#3cb44b', '#ffe119', '#4363d8', '#f58231', '#911eb4', '#46f0f0', '#f032e6', '#bcf60c', '#fabebe', '#008080', '#e6beff', '#9a6324', '#fffac8', '#800000', '#aaffc3', '#808000', '#ffd8b1', '#000075', '#808080', '#ffffff', '#000000') %>% rep(3))
library(RUVSeq)
filtered %>% RUVs(cIdx=rownames(filtered),k=1,scIdx=makeGroups(tissue),isLog=TRUE) -> ruv
ruv$normalizedCounts %>% eigenangles(batch,tissue) -> angruv
list(filtered=angfilt,combat=angcomb,pam=angpam,ruv=angruv) %>% transpose -> ang
ang %>% save(file='Human Expression Atlas/angles.Rdata')
'Human Expression Atlas/ang.pdf' %>% pdf;for(b in ang){
plot(
ggplot()+aes(x=seq_along(b$filtered))+
geom_point(aes(y=b$filtered),colour='red')+
geom_point(aes(y=b$combat),colour='green')+
geom_point(aes(y=b$pam),colour='blue')+
geom_point(aes(y=b$ruv),colour='orange')+scale_y_log10()
)
};dev.off()
|
f2cf6536920e03eae6bfba650c84c09385df623a | 8da8d9e15d5294ebed03dc0bec6a7727f3a9fec1 | /3_R_scripts/3_R_script_two_point_assay/Yeaman_et_al_2010.R | a0835324a3449fbc627b6cbf8ae0e5159f5ac391 | [] | no_license | SarthakMalusareISEM/2020_04_01_SM_Temperature_niche_review | 5e4972ab37f6dd745aa0e5bfe64f46631234e045 | 7abc17f6f5269e78ff86ffd58272d7fad7c7bce6 | refs/heads/master | 2023-05-03T20:59:48.289049 | 2021-05-26T08:43:58 | 2021-05-26T08:43:58 | 370,953,471 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 643 | r | Yeaman_et_al_2010.R | library(ggplot2)
library(reshape)
library(dplyr)
library(ggpubr)
df<-read.table(file="Yeaman_et_al_2010.txt" ,header= TRUE,sep = " ")
df0 <- df%>% slice(2:5)
a<- ggplot(df0, aes(x=Assay_temperature, y=Mean_female,fill= )) +
geom_line(aes(group=Control_or_selection_line, color=Control_or_selection_line)) +geom_point(aes(color=Control_or_selection_line))+
ggtitle("No Effect of Environmental Heterogeneity on the Maintenance
of Genetic Variation in Wing Shape in Drosophila Melanogaster ") +
ylab(" progeny productivity ")
#geom_errorbar(aes(ymin=Mean_female-Calc_CI, ymax=Mean_female+Calc_CI), width=.2,)
print(a)
|
8228a42a550c2b72e6881abdad4c403c72f364f4 | 29585dff702209dd446c0ab52ceea046c58e384e | /phenology/R/phenology-package.R | 63f8c6b889ff3f5b101da3bdaf8b61309213aa5a | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,747 | r | phenology-package.R | #' Fit a parametric function that describes phenology
#'
#' \tabular{ll}{
#' Package: \tab phenology\cr
#' Type: \tab Package\cr
#' Version: \tab 5.1 build 354\cr
#' Date: \tab 2016-05-02\cr
#' License: \tab GPL (>= 2)\cr
#' LazyLoad: \tab yes\cr
#' }
#' @title Tools to Manage a Parametric Function that Describes Phenology
#' @author Marc Girondot \email{marc.girondot@@u-psud.fr}
#' @docType package
#' @name phenology-package
#' @description Functions used to fit and test the phenology of species based on counts.\cr
#' Note that only the most significant changes are reported in the NEWS.\cr
#' To do:\cr
#' * There are problems with SE for fitRMU().\cr
#' * Auto-scaling for optim during fitRMU search.\cr
#' * I must adapt TCF (total clutch frequency) fit from OCF-ECF (observed clutch frequency-estimated cluth frequency) table based on:\cr
#' Briane, J.-P., Rivalan, P., Girondot, M., 2007. The inverse problem applied to the Observed Clutch Frequency of Leatherbacks from Yalimapo beach, French Guiana. Chelonian Conservation and Biology 6, 63-69.\cr
#' Until now it is an Excel spreadsheet.\cr
#' * Fit tag-loss rate based on:\cr
#' Rivalan, P., Godfrey, M.H., Prévot-Julliard, A.-C., Girondot, M., 2005. Maximum likelihood estimates of tag loss in leatherback sea turtles. Journal of Wildlife Management 69, 540-548.\cr
#' Until now it is a RealBasic software.\cr
#' The lastest version of this package can always been installed using:\cr
#' install.packages("http://www.ese.u-psud.fr/epc/conservation/CRAN/phenology.tar.gz", repos=NULL, type="source")
#' @references Girondot, M. 2010. Estimating density of animals during
#' migratory waves: application to marine turtles at nesting site.
#' Endangered Species Research, 12, 85-105.
#' @references Girondot M. and Rizzo A. 2015. Bayesian framework to integrate
#' traditional ecological knowledge into ecological modeling: A case
#' study. Journal of Ethnobiology, 35, 339-355.
#' @references Girondot, M. 2010. Editorial: The zero counts. Marine
#' Turtle Newsletter, 129, 5-6.
#' @keywords Seasonality Phenology Ecology
#' @seealso Girondot, M., Rivalan, P., Wongsopawiro, R., Briane, J.-P., Hulin, V.,
#' Caut, S., Guirlet, E. & Godfrey, M. H. 2006. Phenology of marine turtle
#' nesting revealed by a statistical model of the nesting season. BMC Ecology,
#' 6, 11.
#' @seealso Delcroix, E., Bédel, S., Santelli, G., Girondot, M., 2013. Monitoring
#' design for quantification of marine turtle nesting with limited human
#' effort: a test case in the Guadeloupe Archipelago. Oryx 48, 95-105.
#' @seealso Weber, S.B., Weber, N., Ellick, J., Avery, A., Frauenstein, R.,
#' Godley, B.J., Sim, J., Williams, N., Broderick, A.C., 2014. Recovery
#' of the South Atlantic’s largest green turtle nesting population.
#' Biodiversity and Conservation 23, 3005-3018.
#' @examples
#' \dontrun{
#' library(phenology)
#' # Get the lastest version at:
#' # install.packages("http://www.ese.u-psud.fr/epc/conservation/CRAN/phenology.tar.gz",
#' repos=NULL, type="source")
#' # Read a file with data
#' data(Gratiot)
#' # Generate a formatted list nammed data_Gratiot
#' data_Gratiot<-add_phenology(Gratiot, name="Complete",
#' reference=as.Date("2001-01-01"), format="%d/%m/%Y")
#' # Generate initial points for the optimisation
#' parg<-par_init(data_Gratiot, parametersfixed=NULL)
#' # Run the optimisation
#' result_Gratiot<-fit_phenology(data=data_Gratiot,
#' parametersfit=parg, parametersfixed=NULL, trace=1)
#' data(result_Gratiot)
#' # Plot the phenology and get some stats
#' output<-plot(result_Gratiot)
#' }
NULL
|
3c2d11721f04cb577bb57fb99fec7c01868cb1d9 | dca8f298d746b2c915031bf6ed8552d67798e864 | /protein.R | 2a0a85a7c61581261d9efdb5eae349a59949e164 | [] | no_license | kozenumezawa/ccm | 63fec04fd47c2765736cd84ba66bb734638f7907 | dc2344f7c3ff94221ba66c4baf99df1daae20f4d | refs/heads/master | 2020-12-24T07:59:08.176048 | 2016-12-25T09:38:42 | 2016-12-25T09:38:42 | 73,350,988 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,955 | r | protein.R | library(rEDM)
determineEmbeddingDimension <- function(data) {
lib <- c(1, 30)
pred <- c(50, 90)
simplex_output <- simplex(data, lib, pred)
plot(simplex_output$E, simplex_output$rho, type = "l", xlab = "Embedding Dimension (E)", ylab = "Forecast Skill (rho)")
}
predictionDeacy <- function(data, Em) {
lib <- c(1, 30)
pred <- c(50, 90)
simplex_output <- simplex(data, lib, pred, E = Em, tp = 1:10)
par(mar = c(4, 4, 1, 1))
plot(simplex_output$tp, simplex_output$rho, type = "l", xlab = "Time to Prediction (tp)", ylab = "Forecast Skill (rho)")
}
identifyingNonlinearity <- function(data, Em) {
lib <- c(1, 30)
pred <- c(50, 90)
smap_output <- s_map(data, lib, pred, E=Em)
par(mar = c(4, 4, 1, 1), mgp = c(2.5, 1, 0))
plot(smap_output$theta, smap_output$rho, type = "l", xlab = "Nonlinearity (theta)", ylab = "Forecast Skill (rho)")
}
drawCCM <- function(Accm, Bccm, Em, TAU) {
Accm_Bccm <- data.frame(Accm=Accm, Bccm=Bccm)
Accm_xmap_Bccm <- ccm(Accm_Bccm, E = Em, lib_column = "Accm", tau = TAU,
target_column = "Bccm", lib_sizes = seq(10, 200, by = 10), random_libs = TRUE)
Bccm_xmap_Accm <- ccm(Accm_Bccm, E = Em, lib_column = "Bccm", tau = TAU,
target_column = "Accm", lib_sizes = seq(10, 200, by = 10), random_libs = TRUE)
Accm_xmap_Bccm_means <- ccm_means(Accm_xmap_Bccm)
Bccm_xmap_Accm_means <- ccm_means(Bccm_xmap_Accm)
par(mar = c(4, 4, 1, 1), mgp = c(2.5, 1, 0))
plot(Accm_xmap_Bccm_means$lib_size, pmax(0, Accm_xmap_Bccm_means$rho), type = "l", col = "red", xlab = "Library Size", ylab = "Cross Map Skill (rho)", ylim = c(0, 1))
lines(Bccm_xmap_Accm_means$lib_size, pmax(0, Bccm_xmap_Accm_means$rho), col = "blue")
legend(x = "topleft", bty = "n", legend = c("Y xmap X", "X xmap Y"), col = c("red", "blue"), lwd = 1, inset = 0.02, cex = 1.5)
}
inputdata <- read.csv("./csv/protein.csv", header=TRUE)
time <- inputdata$time
ratio <- inputdata$ratio
round <- inputdata$Round
TIMESTEP <- 100
t <- 1:TIMESTEP
# show inputdata
plot(t, ratio, type = "l",xlim=c(0, TIMESTEP), ylim=c(0,2), xlab = "t", ylab = "X(t)", col = "black")
lines(t, round, type = "l", xlab = "t", ylab = "Y(t)", col = "red")
legend("topleft", c("ratio", "round"), lty=c(1,2), col=c(1,2), lwd=2, bty="n", cex=1.2)
Accm <- as.numeric(ratio)
Bccm <- as.numeric(round)
# create ARmodel
# ratio_normalized <- Accm - 1
# plot(t, ratio_normalized, type = "l",xlim=c(0, TIMESTEP), ylim=c(-1,1), xlab = "t", ylab = "X(t)", col = "red")
# ratio <- ar(ratio_normalized, aic=TRUE)
# ratio$ar
# determine Embedding Dimension
determineEmbeddingDimension(Accm)
determineEmbeddingDimension(Bccm)
E <- 7
# Prediction Decay
predictionDeacy(data = Accm, Em = E)
predictionDeacy(data = Bccm, Em = E)
TAU = 1
# Identifying Nonlinearity
identifyingNonlinearity(data = Accm, Em = E)
identifyingNonlinearity(data = Bccm, Em = E)
# draw CCM
drawCCM(Accm = Accm, Bccm = Bccm, E = E, TAU = TAU)
|
e71d739e7ecfbdaf9b877a231e286a6788b0ae39 | 44f71f6997183e7769ecc77391aa4bfd61c35707 | /man/SIRbirths.Rd | 52a69c3eb3cbc6b323f3fbb68776f4c0f7c607e1 | [] | no_license | ictr/shinySIR | 71bb2e76f0cce10a6548a5acc72553168c2920a6 | e42c3b9b73a1f9148c67d290d3187ab2b8745ada | refs/heads/master | 2023-03-06T22:16:25.771833 | 2021-02-22T19:22:35 | 2021-02-22T19:22:35 | 341,286,082 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 450 | rd | SIRbirths.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/models.R
\name{SIRbirths}
\alias{SIRbirths}
\title{SIR model with demography}
\usage{
SIRbirths(t, y, parms)
}
\arguments{
\item{t}{numeric vector of time points.}
\item{y}{numeric vector of variables.}
\item{parms}{named vector of model parameters.}
}
\value{
equation list
}
\description{
These equations describe the classic SIR model with equal births and deaths.
}
|
65abc02a58fc9442eb57bd2a07e5d58a66fc024f | 9f36da07d28413b16ad416857331c9eb77156800 | /Twitter Senitimental Analysis/ui.r | 18d573d36994483944f6363db53d4f764df081dc | [] | no_license | harpratap/R | 5a634160f47b5eaba513355f9035b43703877619 | 16b568d63c311d153df6ae83c9b9d27f3a922998 | refs/heads/master | 2021-01-20T21:12:16.496625 | 2016-07-04T06:27:54 | 2016-07-04T06:27:54 | 62,534,580 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 580 | r | ui.r | # 11-ui.r
library(shiny)
shinyUI(fluidPage(
titlePanel("Uploading Files"),
sidebarPanel(
uiOutput("inpFileName"),
tags$hr(),
tags$b(p("File Info")),
textOutput('LineCountText'),
textOutput('TotalWordCount'),
textOutput('AvgWordsPerLine'),
tags$hr(),
tableOutput('LineCountEmotions'),
tableOutput('LineCountPolarity')
),
mainPanel(
dataTableOutput('ShowData'),
plotOutput('HistSigWords'),
plotOutput('CloudSigWords'),
plotOutput('HistEmotions'),
plotOutput('HistPolarity')
)
)) |
bddf18d938fb6d5d156690d554e9c834d5482c73 | f96af69ed2cd74a7fcf70f0f63c40f7725fe5090 | /MonteShaffer/humanVerseWSU/humanVerseWSU/man/removeAllColumnsBut.Rd | e085744c402833d75daf25b3b75b7df9ca21321c | [
"MIT"
] | permissive | sronchet/WSU_STATS419_2021 | 80aa40978698305123af917ed68b90f0ed5fff18 | e1def6982879596a93b2a88f8ddd319357aeee3e | refs/heads/main | 2023-03-25T09:20:26.697560 | 2021-03-15T17:28:06 | 2021-03-15T17:28:06 | 333,239,117 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 665 | rd | removeAllColumnsBut.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions-dataframe.R
\name{removeAllColumnsBut}
\alias{removeAllColumnsBut}
\alias{removeAllColumnsExcept}
\title{removeAllColumnsBut}
\usage{
removeAllColumnsBut(df, mycols)
}
\arguments{
\item{df}{dataframe}
\item{mycols}{names of cols to keep}
}
\value{
dataframe, updated
}
\description{
Remove all columns in the data frame but (except for)
those columns listed.
}
\details{
Useful when you have a df with lots of columns.
}
\examples{
library(datasets);
data(iris);
head(iris);
dim(iris);
ndf = removeAllColumnsBut(iris, c("Petal.Length","Petal.Width"));
head(ndf);
dim(ndf);
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.