blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
67fe66a11ca422befdc57690f6d9cad50e2de386 | 184180d341d2928ab7c5a626d94f2a9863726c65 | /valgrind_test_dir/orderTriGrid_-test.R | 7fd7626669795d56c98dbf1724a9d33cb5662c7a | [] | no_license | akhikolla/RcppDeepStateTest | f102ddf03a22b0fc05e02239d53405c8977cbc2b | 97e73fe4f8cb0f8e5415f52a2474c8bc322bbbe5 | refs/heads/master | 2023-03-03T12:19:31.725234 | 2021-02-12T21:50:12 | 2021-02-12T21:50:12 | 254,214,504 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 384 | r | orderTriGrid_-test.R | function (faces, neigh, startFaces, startVert, nBelts, nV)
{
e <- get("data.env", .GlobalEnv)
e[["orderTriGrid_"]][[length(e[["orderTriGrid_"]]) + 1]] <- list(faces = faces,
neigh = neigh, startFaces = startFaces, startVert = startVert,
nBelts = nBelts, nV = nV)
.Call("_icosa_orderTriGrid_", faces, neigh, startFaces, startVert,
nBelts, nV)
}
|
e716cfc1800d461206f559982a0d90f10e9320d6 | bd21a32d8baf53669f6e65c388d41aab38569ce5 | /R/plot_S_event_type.R | 95de75e722aa70440dc26f1111022da0e7ce30ee | [] | no_license | han-tun/rstanbmcm | e5b8f9a315ff09e11a11bd9816e555f5ce7aedce | 76d2c423fdd5d0a63040176df1068867a81da8e8 | refs/heads/main | 2023-01-06T09:38:31.432306 | 2020-11-03T15:20:05 | 2020-11-03T15:20:05 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,064 | r | plot_S_event_type.R |
#' plot_S_event_type
#'
#' Plot results of running stan
#' relative survival mixture cure model.
#'
#' @param file_names Nested list of file name for stan output
#'
#' @importFrom purrr map
#' @importFrom reshape2 melt
#' @importFrom rstan extract
#' @importFrom dplyr mutate
#'
#' @examples
#' load("data/file_names.RData")
#'
plot_S_event_type <- function(file_names) {
##TODO:
# text cure fractions
fit_stan <- list()
S_stats <- list()
S_pred <- NULL
event_types <- names(file_names)
tx_names <- names(file_names[[1]])
for (i in event_types) {
fit_stan[[i]] <- list()
S_stats[[i]] <- list()
for (j in tx_names) {
fit_stan[[i]][[j]] <-
readRDS(file_names[[i]][[j]]) %>%
rstan::extract()
# rearrange to time as rows
S_dat <-
list(
t(fit_stan[[i]][[j]]$S_pred) %>%
as_tibble() %>%
mutate(month = 1:n(),
type = "S_pred"),
t(fit_stan[[i]][[j]]$S0) %>%
as_tibble() %>%
mutate(month = 1:n(),
type = "S0"),
t(fit_stan[[i]][[j]]$S_bg) %>%
as_tibble() %>%
mutate(month = 1:n(),
type = "S_bg"))
# means and credible intervals
S_stats[[i]][[j]] <-
S_dat %>%
do.call(rbind, .) %>%
melt(id.vars = c("month", "type")) %>%
group_by(month, type) %>%
summarise(mean = mean(value),
lower = quantile(value, probs = 0.025),
upper = quantile(value, probs = 0.975))
}
}
# unnest
plot_dat <-
S_stats %>%
map(bind_rows, .id = "Tx") %>%
bind_rows(.id = "event_type") %>%
mutate(scenario = paste(event_type, Tx, sep = "_"))
ggplot(plot_dat, aes(month, mean, group = type, colour = type)) +
geom_line() +
# facet_grid(. ~ scenario)
facet_grid(event_type ~ Tx) +
ylab("Survival") +
geom_ribbon(aes(x = month, ymin = lower, ymax = upper, fill = type),
linetype = 0,
alpha = 0.2)
}
|
103230e9164a71f59bad8de6c47cee655f80036b | dcbd82e150369094a73253fa48cdd2887e1dd778 | /IO_M2.1_new/Report_M2.1a_LoadingDemoData.R | 3d76e6ba16f6386e68cee15bb101e36fc2445c13 | [] | no_license | yimengyin16/Model_Main | 0e8140ba296d1115bbbc64bcffefb05ac7a4fe86 | b63573407aaa0f6489d96083bf2fe09e843ca43f | refs/heads/master | 2022-07-03T22:44:47.660461 | 2018-02-22T01:36:15 | 2018-02-22T01:36:15 | 27,349,525 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 8,612 | r | Report_M2.1a_LoadingDemoData.R | rm(list = ls())
library(reshape2)
library(plyr)
library(dplyr)
library(tidyr)
library(ggplot2)
library(knitr)
library(magrittr) # to use %<>%
library(zoo)
library(grid)
library(gridExtra)
library(stringr)
library("readr")
library("readxl")
library(xlsx)
source("Functions.R")
IO_folder <- "IO_M2.1_new/"
#*****************************************************
## Selecting runs and calculating risk measures ####
#*****************************************************
runs_demo <- c("D1F075-average_gn2",
"D1F075-average",
"D1F075-average_g2",
"D1F075-mature1_gn1",
"D1F075-mature2_gn1",
"D1F075-immature_g1")
runs_demo_labels <- c("Average, 2% decline",
"Average, constant workforce",
"Average, 2% growth",
"Mature plan 1 (high asset-payroll ratio)",
"Mature plan 2 (high normal cost)",
"Immature plan")
#****************************************************************************************************
# ## 3. Function for loading a single run ####
#****************************************************************************************************
get_demoSum <- function(fileName, folder = IO_folder){
# ## Loading Data ####
#****************************************************************************************************
load(paste0(folder, "/", fileName))
# ## Computing remaining working life and remaining lifetime. ####
#****************************************************************************************************
get_expLife <- function(p, age){
# get expected remaining work life/life time.
# p: probability of survival at age x
len <- length(age)
exp.life <- numeric(len)
for(i in seq_along(age)){
p.i <- p[i:len]
age.i <- age[i:len]
exp.life[i] <- sum(cumprod(p.i) * c( 1 - p.i[-1], 0) * seq_along(age.i)) # prob of quitting the workforce at age x * age x
}
return(exp.life)
}
df_workLife <- expand.grid(ea = 20:74, age = 20:74) %>%
filter(age >= ea) %>%
left_join(outputs_list$decrement) %>%
select( ea, age, pxT) %>%
group_by(ea) %>%
mutate(workLife = get_expLife(pxT, age))
df_workLife.sum <- outputs_list$ind_active %>%
left_join(df_workLife) %>% group_by(year) %>%
summarise(workLife.avg = weighted.mean(workLife, number.a, na.rm = TRUE))
#df_workLife.sum %>% print(n = 60)
df_lifeTime <- expand.grid(ea = 20, age = 50:120) %>%
left_join(outputs_list$decrement) %>%
select(age, pxm) %>%
mutate(lifeTime = get_expLife(pxm, age))
df_lifeTime.sum <- outputs_list$ind_retiree %>%
left_join(df_lifeTime) %>% group_by(year) %>%
summarise(lifeTime.avg = weighted.mean(lifeTime, number.r, na.rm = TRUE))
#df_lifeTime.sum %>% print(n = 60)
# ## Summary measures ####
#****************************************************************************************************
# To be done:
# - more flow measures for terms.
df_demo_summary <- outputs_list$demo_summary %>%
left_join(df_workLife.sum) %>%
left_join(df_lifeTime.sum)
# ## AL by age and ea ####
#****************************************************************************************************
df_AL_sx <- outputs_list$ind_active %>%
select(runname, year, ea, age, number.a, sx, ALx, ALx.v) %>%
mutate(AL_sx = 100 * (ALx + ALx.v) / sx) %>%
filter(year == 30, age < 75)
df_NC_sx <- outputs_list$ind_active %>%
select(runname, year, ea, age, number.a, sx, NCx, NCx.v) %>%
mutate(NC.av_sx = 100 * (NCx + NCx.v) / sx,
NC.a_sx = 100 * NCx / sx,
NC.v_sx = 100 * NCx.v / sx) %>%
filter(year == 30, year == 30, age == ea, ea %in% 20:74 )
#df_AL_sx
#df_NC_sx
# ## AL-payroll ratio ####
#****************************************************************************************************
df_AL.ratios <- outputs_list$results %>% filter(sim == 1) %>%
select(runname, year, AL_PR, AL.act_PR, AL.ret_PR, AL.Ben_PR, AL.term_PR)
# df_AL.ratios %>% kable(digits = 3)
# ## Descriptive statistics of the demo runs. ####
#****************************************************************************************************
# runName <- "average"
# load(paste0(IO_folder,"/Outputs_D1F075-",runName,".RData"))
# Distribution of Age
pop.actives <- outputs_list$ind_active %>% select(runname, year, ea, age, number.a)
pop.retirees <- outputs_list$ind_retiree %>% select(runname, year, ea, age, number.r)
df_ageDist <- pop.actives %>% group_by(year, age) %>%
summarize(nactives_age = sum(number.a)) %>%
mutate(pct_age =100* nactives_age / sum(nactives_age),
runname = outputs_list$paramlist$runname )
df_eaDist <- pop.actives %>% group_by(year, ea) %>%
summarize(nactives_age = sum(number.a)) %>%
mutate(pct_age =100* nactives_age / sum(nactives_age),
runname = outputs_list$paramlist$runname ) %>% rename(age = ea)
df_yosDist <- pop.actives %>% mutate(yos = age - ea) %>% group_by(year, yos) %>%
summarize(nactives_age = sum(number.a)) %>%
mutate(pct_age =100 * nactives_age / sum(nactives_age),
runname = outputs_list$paramlist$runname )
df_entDist <- data.frame(runname = outputs_list$paramlist$runname,
age = outputs_list$paramlist$range_ea,
pct_age = outputs_list$entrant_dist * 100)
df_ageDist.r <- pop.retirees %>% group_by(year, age) %>%
filter(age >= outputs_list$paramlist$r.min) %>%
summarize(nretirees_age = sum(number.r)) %>%
mutate(pct_age =100* nretirees_age / sum(nretirees_age),
runname = outputs_list$paramlist$runname)
save(df_demo_summary,
df_AL_sx,
df_NC_sx,
df_AL.ratios,
pop.actives,
pop.retirees,
df_ageDist,
df_eaDist,
df_yosDist,
df_entDist,
df_ageDist.r,
file = paste0(IO_folder,"/Analysis_Demo/DemoSum_", outputs_list$paramlist$runname, ".RData"))
invisible()
}
# get_demoSum("Outputs_D1F075-average.RData")
#****************************************************************************************************
# ## Calculate summary results for demographics runs ####
#****************************************************************************************************
file_select <- dir(IO_folder, pattern = "^Outputs_D1")
file_select
for (i in seq_along(file_select)){
get_demoSum(file_select[i])
}
#****************************************************************************************************
# ## loading all demographics runs ####
#****************************************************************************************************
folder <- paste0(IO_folder,"/Analysis_Demo/")
file_select <- dir(folder, pattern = "DemoSum_D1")
df_demo_summary_all <- list(0)
df_AL_sx_all <- list(0)
df_NC_sx_all <- list(0)
df_AL.ratios_all <- list(0)
df_ageDist_all <- list(0)
df_eaDist_all <- list(0)
df_yosDist_all <- list(0)
df_entDist_all <- list(0)
df_ageDist.r_all <- list(0)
# load(paste0(folder,"DemoSum_D1F075-average.RData"))
for (i in seq_along(file_select)){
load(paste0(folder, file_select[i]))
df_demo_summary_all[[i]] <- df_demo_summary
df_AL_sx_all[[i]] <- df_AL_sx
df_NC_sx_all[[i]] <- df_NC_sx
df_AL.ratios_all[[i]] <- df_AL.ratios
df_ageDist_all[[i]] <- df_ageDist
df_eaDist_all[[i]] <- df_eaDist
df_yosDist_all[[i]] <- df_yosDist
df_entDist_all[[i]] <- df_entDist
df_ageDist.r_all[[i]] <- df_ageDist.r
}
df_demo_summary_all %<>% rbind_all
df_AL_sx_all %<>% rbind_all
df_NC_sx_all %<>% rbind_all
df_AL.ratios_all %<>% rbind_all
df_ageDist_all %<>% rbind_all
df_eaDist_all %<>% rbind_all
df_yosDist_all %<>% rbind_all
df_entDist_all %<>% rbind_all
df_ageDist.r_all %<>% rbind_all
save(df_demo_summary_all,
df_AL_sx_all,
df_NC_sx_all,
df_AL.ratios_all,
df_ageDist_all,
df_eaDist_all,
df_yosDist_all,
df_entDist_all,
df_ageDist.r_all, file = paste0(IO_folder, "/Analysis_Demo/DemoSum_all.RData"))
|
c803d2dad20a1ce642179d8d74963c00083a6eb8 | c83543898829fb207002ba13995d6a1f1e17fad0 | /subset selection.R | 2a6988444e088273044c4f16be4e0eccd5170044 | [] | no_license | eslh2050/Machine-Learning | ff6b28ac5bd7dfe4e61435887898cb08172b1376 | 038d62c3ed8c498e088a89545cd9ac9328c75496 | refs/heads/master | 2021-01-01T19:35:13.692499 | 2015-04-21T22:50:12 | 2015-04-21T22:50:12 | 34,356,186 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,484 | r | subset selection.R | library(ISLR)
?Hitters
attach(Hitters)
### check for missing Salary Values.
### There are 59 missing salaries
sum(is.na(Salary))
Hitters = na.omit(Hitters)
attach(Hitters)
dim(Hitters)
### the observations with missing values are deleted
### Use subset selection to decide what
### variables should be in our model
### Use "leaps" package to run subset selection
library(leaps)
model = regsubsets(Salary~., data = Hitters)
summary(model)
### the "*" in the output refers that the variable is included in the model
### notice that regsubsets() gives you the first 8 models
### with 8 variables, but we can change that as follows:
model = regsubsets(Salary~., data = Hitters, nvmax = 19)
model_summary = summary(model)
names(model_summary)
model_summary$adjr2
max(model_summary$adjr2)
### plot in order to find which model is best based on adjusted
### r-squared
par(mfrow= c(1,2))
plot(1:19,
model_summary$adjr2,
xlab = "Number of Variables",
ylab = "Adjusted R-sqaured",
type = "l")
index = which.max(model_summary$adjr2)
points(index,
model_summary$adjr2[index],
col = "red", cex = 2, pch = 4)
abline(v=index, col = "blue")
### CP
plot(1:19,
model_summary$cp,
xlab = "Number of Variables",
ylab = "CP",
type = "l")
index = which.min(model_summary$cp)
points(index,
model_summary$cp[index],
col = "red", cex = 2, pch = 4)
abline(v=index, col = "blue")
par(mfrow= c(1,1))
|
0c7f9c467f1db6cc57d1fc7429fa6053121f5988 | d2088d962b87482688cfe98c0c73fe978bc38c5f | /src/function_def_stages.R | 9b5320838d98c3d263591e01ad9162c4794c2b88 | [] | no_license | pascaltimshel/temporal-brain-expression-scz | e7360890243de5df04cd3e3a71cd522315a67ed0 | 8a5bed33d1c3b689cbba18e0a6ec54c5eb05f2e8 | refs/heads/master | 2021-06-29T18:47:25.020356 | 2017-09-15T08:33:49 | 2017-09-15T08:33:49 | 26,609,253 | 2 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,719 | r | function_def_stages.R | ################### Defining stages ####################
stages = list()
stages[["s1"]] = c() # 1 4-7 pcw Embryonic
stages[["s2a"]] = c("8 pcw","9 pcw") # 2A 8-9 pcw Early prenatal
stages[["s2b"]] = c("12 pcw") # 2B 10-12 pcw Early prenatal
stages[["s3a"]] = c("13 pcw") # 3A 13-15 pcw Early mid-prenatal
stages[["s3b"]] = c("16 pcw","17 pcw") # 3B 16-18 pcw Early mid-prenatal
stages[["s4"]] = c("19 pcw","21 pcw","24 pcw") # 4 19-24 pcw Late mid-prenatal
stages[["s5"]] = c("25 pcw","26 pcw","35 pcw","37 pcw") # 5 25-38 pcw Late prenatal
stages[["s6"]] = c("4 mos") # 6 Birth-5 months Early infancy
###
# *ERROR* in inclusion of '1 yrs' in 's8' - discovered 03/12/2015*
# this is affecting 16 samples (from 1 donor) that are put in the wrong bin.
# this means that 's7' (Late infancy) consists of only 10 samples instead of 10+16=26 samples.
# Likewise, 's8' (Early childhood) has 16 too many samples (current size=44+16=60). 's8' should be of size 44
stages[["s7"]] = c("10 mos") # 7 6-18 months Late infancy
stages[["s8"]] = c("1 yrs","2 yrs","3 yrs","4 yrs") # 8 19 months-5 yrs Early childhood
### CORRECT according to "TECHNICAL WHITE PAPER: TRANSCRIPTOME PROFILING BY RNA SEQUENCING AND EXON MICROARRAY"
#stages[["s7"]] = c("10 mos", "1 yrs") # 7 6-18 months Late infancy
#stages[["s8"]] = c("2 yrs","3 yrs","4 yrs") # 8 19 months-5 yrs Early childhood
stages[["s9"]] = c("8 yrs","11 yrs") # 9 6-11 yrs Late childhood
stages[["s10"]] = c("13 yrs","15 yrs","18 yrs","19 yrs") # 10 12-19 yrs Adolescence
stages[["s11"]] = c("21 yrs","23 yrs","30 yrs","36 yrs","37 yrs","40 yrs") # 11 20-60+ yrs Adulthood
order.stages <- c("s1", "s2a", "s2b", "s3a", "s3b", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11")
|
63c95bddec246d5c8f41904a82ac70f645e38a7a | a6eb20e849fd02cef5ad041762a8a753e70b0f1b | /plot1.R | 3d0d3dab797a0245c831c5f10ff2aaa444bcbf48 | [] | no_license | prcm066/ExData_Plotting1 | 6d367953493c378650cafd9178807e5696832df1 | 8849cd62b36eda990006696b73611015dfc24e1b | refs/heads/master | 2023-05-12T21:56:11.998679 | 2015-02-08T21:41:00 | 2015-02-08T21:41:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 620 | r | plot1.R | library(data.table)
library(lubridate)
library(dplyr)
#setwd("/home/pablo/Data-Science/ExpData/ExData_Plotting1")
# Download and Unzip data.
download.file("http://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip","hpc.zip")
unzip("hpc.zip")
#read data and filter by date
hpc<-read.csv("household_power_consumption.txt",sep=";",na.strings="?")
hpc$Date<-dmy(hpc$Date)
hpc<-data.table(hpc)
datos<-hpc[Date==dmy("01/02/2007") | Date==dmy("02/02/2007") ,]
#Plot 1 is the Red Histogram
hist(datos$Global_active_power,col="red", xlab="Global Active Power (kilowatts)",main="Global Active Power")
|
7247d33b062835f78b71ff1b21dbef4f84101201 | 4fe5f0bcf41beac5ef402d62df2aa87ff7a1bf32 | /man/jd_checklast.Rd | ea49d4dc0a23e2ff8d96af1c0919b0be62ca7b93 | [] | no_license | palatej/jdemetra-jdlight | 21a6769e1b6073496886fb8e3e605c6533b62f61 | 244693eca78c9c72cae5c3d3ac9b095683e73591 | refs/heads/master | 2021-01-11T04:11:45.177191 | 2018-09-17T07:46:38 | 2018-09-17T07:46:38 | 71,233,740 | 0 | 0 | null | 2016-10-18T09:49:36 | 2016-10-18T09:49:36 | null | UTF-8 | R | false | false | 978 | rd | jd_checklast.Rd | \name{jd_checklast}
\alias{jd_checklast}
\title{
Checking of the last observations (Terror-like)
}
\description{
jd_checklast is a straightforward implementation of Terror, a popular tool for detecting anomalies in the last observation(s). This function compares the out of sample forecasts of the series with the actual figures.It returns the difference in function of the standard error of the forecast.
}
\usage{
jd_checklast(s,last, method,absoluteErrors)
}
\arguments{
\item{s}{
series
}
\item{last}{
number of observations (default=1)
}
\item{method}{
tramo method used to compute the forecast (default= "TRfull")
}
\item{absoluteErrors}{
return result as absolute error (vs relative error; default= FALSE)
}
}
\value{
returns a numeric vector
}
\examples{
data("retail")
myseries <- retail$AutomobileDealers
checkLastObservations <-jd_checklast(myseries,4)
checkLastObservations_abs <-jd_checklast(myseries,4,absoluteErrors = TRUE)
}
|
d5ab54250efc74439e38291994dfab657b5d1c03 | 489df444580b3462aa582b6ad6aa24a779685754 | /mgmt-run.R | 4da3374fe203fb5cec2b4d14b0cc85e9b5e5a682 | [] | no_license | jogwalker/mgmt-jacobian-parallel | 75ccb351f509bbe1833f9408118498239d54ad13 | 25a0b4b8d1cc366f7d289430f6be8560919d43a8 | refs/heads/master | 2020-04-01T12:58:26.377062 | 2013-07-01T17:14:33 | 2013-07-01T17:14:33 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,009 | r | mgmt-run.R | args <- commandArgs(T)
outdir <- args[1]
jid <- as.numeric(args[2])
# read in the necessary functions
source("~/mgmt-jacobian-parallel/mgmt-functions.R")
# load network data
load("~/mgmt-jacobian-parallel/network.rdata")
# run simulations
set.seed(jid)
# load n
load("~/mgmt-jacobian-parallel/n.rdata")
mat.stable <- list()
count <- 0
for (i in 1:n) {
mat <- createMatrix(network)
stable <- filterMatrix(mat)
if (stable==TRUE) {
count <- count+1
mat.stable[[count]] <- mat
}
}
# only proceed if any are stable
if (count > 0) {
mat.filtered <- array(unlist(mat.stable),dim=c(dim(network),length(mat.stable)))
inverses <- array(NA,dim=dim(mat.filtered))
for (i in 1:dim(mat.filtered)[3]) {
inverses[,,i] <- invertMatrix(mat.filtered[,,i])
}
# write output files
filename1 <- paste(outdir,"mat_stable_",jid,".Rdata",sep="")
filename2 <- paste(outdir,"mat_inverse_",jid,".Rdata",sep="")
save(mat.filtered, file=filename1)
save(inverses,file=filename2)
}
|
7f59b186e250d9e3810c6bc01683740c758623b5 | 73db71f7316c3d921a438cb2b6e4c567e9f84364 | /core/GENEO_VINYL.R | 03511eda456ce1e6f7a32eb35645d297fde4004d | [] | no_license | matteo14c/VINYL | f9249d6977b0f429e7a870c0cf2fabfc64456d29 | db14c03c609a4b204346c0da9630cb325595e9bd | refs/heads/master | 2020-12-19T16:42:46.461149 | 2020-10-27T17:26:16 | 2020-10-27T17:26:16 | 235,791,563 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,776 | r | GENEO_VINYL.R | #!/usr/bin/env Rscript
args <- commandArgs(trailingOnly = TRUE)
file1=args[1]
file2=args[2]
config=args[3]
minsV=as.vector(as.integer(unlist(strsplit(args[4], split=" "))))
maxV=as.vector(as.integer(unlist(strsplit(args[5], split=" "))))
ofile=args[6]
header=c("disease_clinvar","score_AF","score_functional","score_NS","score_nIND","scoreeQTL","scoreG","scoreT","scoreM","scoreR","scoreSP","scoreGW","survP","totP","survA","totA","p-value","ratio","score");
cat(paste(header,collapse="\t"),"\n",file=ofile,append=T);
evalVINYL=function(x)
{
scoreN=c("-disease_clinvar","-score_AF","-score_functional",
"-score_NS","-score_nIND","-scoreeQTL",
"-scoreG","-scoreT","-scoreM",
"-scoreR","-scoreSP","-scoreGW")
params=unlist(paste(scoreN,x,sep=" ",collapse=" "))
onameprefix=as.integer(runif(1)*10000000)#paste(x,collapse="_");
oname_files1=unlist(paste(c("-ofile","-ovcfile","-osummary"),
paste(rep(file1,3), rep(onameprefix,3), c("ofile","ovcfile","osummary"),sep="."),sep=" ",collapse=" "))
oname_files2=unlist(paste(c("-ofile","-ovcfile","-osummary"),
paste(rep(file2,3), rep(onameprefix,3), c("ofile","ovcfile","osummary"),sep="."),sep=" ",collapse=" "))
command1=paste("perl ~/bin/score_complete_alt_M.pl -vcf",file1,config,params,oname_files1,sep=" ",collapse=" ")#,oname_files1)#,config)#,params)
command2=paste("perl ~/bin/score_complete_alt_M.pl -vcf",file2,config,params,oname_files2,sep=" ",collapse=" ")#,oname_files2)#,config)#,params)
Res1=system(command1,intern=FALSE)
Res2=system(command2,intern=FALSE)
fileR=paste(file1,onameprefix,"ofile",sep=".",collapse="")
fileT=paste(file2,onameprefix,"ofile",sep=".",collapse="")
data_R=read.table(fileR,header=T)
data_T=read.table(fileT,header=T)
range= rev(seq(min(data_R$VINYL_score),max(data_R$VINYL_score),0.5))
m=matrix(ncol=2,nrow=2)
totR=nrow(data_R)
totT=nrow(data_T)
score=0;
surv1=0;
surv2=0;
rat=0;
pval=1;
for (r in range)
{
posR=sum(data_R$VINYL_score>=r);
posT=sum(data_T$VINYL_score>=r)+1;
m[,1]=c(posR,totR);
m[,2]=c(posT,totT);
F=fisher.test(m,alternative="greater")
Fpv=F$p.value
Fodds=F$estimate
localScore=0.2*-log10(Fpv)+0.6*log2(Fodds)-0.2*posT
if (localScore>score)
{
score=localScore
pval=Fpv
rat=Fodds
surv1=posR
surv2=posT
}
}
Command=system("rm *.ofile *.ovcfile *.osummary",intern=FALSE)
outV=paste(round(x,digits=2),collapse="\t");
cat(paste(outV,surv1,totR,surv2,totT,pval,rat,score,"\n",sep="\t"),file=ofile,append=T);
return(score*-1);
}
library(genalg)
G=rbga(stringMin=minsV,stringMax=maxV,popSize=40,iters=40,evalFunc=evalVINYL)
#cat(summary(G),file=ofile,append=T)
|
2442ee284389a3e888d1549a12d5a7e4a7b7a8d4 | 007ae03cfe5abf41a0ad864eade451141c267cca | /auto-docs/executables/r/line_scatter.r | 5055441c928078cc1e43730137494f8b3e9cb806 | [] | no_license | VukDukic/documentation | ca96eb1994eeb532fe60c542960b017354bcede1 | 8e5aefdc38788956cfe31d8fe8b4b77cdf790e57 | refs/heads/master | 2021-01-18T09:02:27.034396 | 2015-01-20T23:46:58 | 2015-01-20T23:46:58 | 30,007,728 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 654 | r | line_scatter.r | # Learn about API authentication here: {{BASE_URL}}/r/getting-started
# Find your api_key here: {{BASE_URL}}/settings/api
library(plotly)
py <- plotly(username='TestBot', key='r1neazxo9w')
trace1 <- list(
x = c(1, 2, 3, 4),
y = c(10, 15, 13, 17),
mode = "markers",
type = "scatter"
)
trace2 <- list(
x = c(2, 3, 4, 5),
y = c(16, 5, 11, 9),
mode = "lines",
type = "scatter"
)
trace3 <- list(
x = c(1, 2, 3, 4),
y = c(12, 9, 15, 12),
mode = "lines+markers",
type = "scatter"
)
data <- list(trace1, trace2, trace3)
response <- py$plotly(data, kwargs=list(filename="line-scatter", fileopt="overwrite"))
url <- response$url
|
46d96a38252b4616f0ea7b05ae8a0b7a87f61a64 | d160bcfc5ad9a9f2f0b2b8013927df0d91755553 | /code.R | abd74419da103a401bee89c0e992a22a2025f957 | [] | no_license | NafissahPouye/sdc-msna-afg-microdata | d461fe8ef3a204c02733fc5a4c6d5b7c04cce116 | a1687bc2d825c32adcecc48dc66e4f0b953291a4 | refs/heads/master | 2020-11-23T20:21:43.865057 | 2019-12-13T09:44:35 | 2019-12-13T09:44:35 | 227,806,223 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 10,107 | r | code.R | #Load required libraries
library(readxl) #for excel, csv sheets manipulation
library(sdcMicro) #sdcMicro package with functions for the SDC process
library(tidyverse) #optional #for data cleaning
#Import data
setwd("C:/Users/LENOVO T46OS/Desktop/sdc-afg-msna-microdata")
data <-read_excel("data.xlsx", sheet = "WoAA_2019_Dataset_with_Weights",
col_types = c("date", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "numeric", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "numeric",
"text", "numeric", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "text", "text",
"text", "text", "text", "numeric",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "text", "text", "text",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "numeric", "text",
"text", "numeric", "text", "text",
"numeric", "numeric", "text", "text",
"text", "text", "numeric", "numeric",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "numeric", "numeric", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "text", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"text", "numeric", "numeric", "numeric",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "text", "text", "text", "text",
"text", "numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "numeric", "numeric",
"numeric", "text", "numeric", "numeric",
"numeric", "numeric", "numeric",
"text", "text", "text", "text", "numeric"))
#Select key variables
selectedKeyVars <- c( 'district', 'birth_location', 'religious_attendance',
'hoh_sex', 'hoh_age', 'hoh_marital_status',
'displacement_status', 'highest_edu',
'total_income','district','host_hh_num',
'region', 'province','host_hh_num', 'hh_size'
)
#select weights
weightVars <- c('weights')
#Convert variables to factors
cols = c('district', 'birth_location', 'religious_attendance',
'hoh_sex', 'hoh_age', 'hoh_marital_status',
'displacement_status', 'highest_edu',
'total_income','district','host_hh_num',
'region', 'province','host_hh_num', 'hh_size')
data[,cols] <- lapply(data[,cols], factor)
#Convert sub file to a dataframe
subVars <- c(selectedKeyVars, weightVars)
fileRes<-data[,subVars]
fileRes <- as.data.frame(fileRes)
objSDC <- createSdcObj(dat = fileRes,
keyVars = selectedKeyVars
)
#print the risk
print(objSDC, "risk")
#Generate an internal (extensive) report
report(objSDC, filename = "index",internal = T, verbose = TRUE)
|
71da4840224443f4e255775d5521284c6490f781 | 7eb4348b59159f4f0c9b11d9b01b8a86fa839e71 | /R/For loops Running through column names.R | 1d8159d2aade2664955d4986aaa07f06d4d419c9 | [] | no_license | JustinMShea/stack-overflow | f5417e8cb968e62d1f9a54128c2bd2d7ad9c6d90 | e6559de8a50fc0db8faa2a0d1eb983205806163e | refs/heads/master | 2021-01-17T07:21:14.072144 | 2018-09-12T23:48:29 | 2018-09-12T23:48:29 | 95,308,778 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 430 | r | For loops Running through column names.R |
# example using 1000 rows and 50 cols of data
N <- 1000
P <- 50
data <- as.data.frame(rep(data.frame(rnorm(N)), P))
# Assign your y data to y.
y <- as.data.frame(rep(data.frame(rnorm(N)), 1))
# create a new data.frame containing y and the last 22 columns.
model_data <- cbind(y, data[ ,29:50])
colnames(model_data) <- c("y", paste0("x", 1:10), paste0("z",1:12))
str(model_data)
reg <-lm(y ~ ., data = model_data)
plot(reg)
|
1cdcc99faa873829d75381af054db7a5495e9767 | e6a31548cd2e3099adaad65ce3482c5bccfe410c | /jimmy_jimmy/jimmy_cumulative.R | 679ffc2c7023e41480d1796d74a51403e049d8bc | [] | no_license | committedtotape/i-dont-like-cricket | eb97ebd06115893782cad50142a2b0139eadd8d4 | 82496c223e5405069464f3891170292f4bc6da69 | refs/heads/main | 2023-03-09T19:05:21.082862 | 2021-03-01T21:33:27 | 2021-03-01T21:33:27 | 338,899,043 | 4 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,978 | r | jimmy_cumulative.R | library(tidyverse)
library(here)
library(janitor)
library(polite)
library(rvest)
library(ggforce)
library(glue)
library(ggtext)
#getPlayerData(profile = 8608, dir = here("data"), type = "bowling", file = "james_anderson.csv")
#jimmy <- cleanBowlerData(here("data","james_anderson.csv"))
jimmy_url <- "https://stats.espncricinfo.com/ci/engine/player/8608.html?class=1;template=results;type=bowling;view=innings"
#jimmy_page <- read_html(jimmy_url)
jimmy_table <- jimmy_url %>%
bow() %>%
scrape() %>%
html_table(fill = TRUE) %>%
.[4] %>%
.[[1]] %>%
select(-8, -12)
# jimmy_table <- jimmy_page %>%
# html_nodes("table") %>%
# .[4] %>%
# html_table() %>%
# .[[1]] %>%
# select(-8, -12)
jimmy_clean <- jimmy_table %>%
clean_names() %>%
mutate_at(vars(1:7), as.numeric) %>%
mutate(innings_no = cumsum(!is.na(overs)),
match_no = cumsum(start_date != lag(start_date, default="")),
overs = replace_na(overs, 0),
balls = floor(overs)*6 + (overs - floor(overs))*10,
cum_balls = cumsum(balls),
cum_overs = floor(cum_balls/6) + (balls %% 6) / 10 ,
cum_runs = cumsum(replace_na(runs, 0)),
cum_wkts = cumsum(replace_na(wkts, 0)),
cum_avg = round(cum_runs / cum_wkts, 2),#rounded to 2 dec places
cum_econ = floor(100 * cum_runs / (cum_balls/6))/ 100,#rounded to floor of 2 dec places
year = str_sub(start_date, -4),
label = glue("Match {match_no}\n{opposition}, {year}"))
# just keep end of match records
jimmy_by_match <- jimmy_clean %>%
group_by(match_no) %>%
filter(row_number() == max(row_number())) %>%
ungroup()
# annotations
jimmy_text <- jimmy_by_match %>%
filter(match_no %in% c(20,49,158)) %>%
mutate(desc = case_when(match_no == 49 ~ glue("Avg: {cum_avg} ER: {cum_econ}\nRecords Best Match Figures of 11/71"),
TRUE ~ glue("Avg: {cum_avg} ER: {cum_econ}")))
jimmy_by_match %>%
filter(match_no >= 20) %>%
ggplot(aes(x = cum_econ, y = cum_avg)) +
geom_path(aes(colour = match_no), alpha = 0.7, size = 1.5, lineend = "round") +
geom_point(aes(fill = match_no), colour = "white", shape = 21) +
geom_mark_ellipse(data = jimmy_text,
aes(label = label,
description = desc, group = match_no),
label.fontsize = 11,
label.family = "Avenir Next Condensed",
label.colour = c("#050833", "#808080"),
con.colour = "#808080",
colour = "#808080",
con.cap = 0) +
labs(caption = "Graphic: @committedtotape | Source: espncricinfo") +
annotate("text", 2.5, 39.1, label = "James Anderson, like a fine wine...",
family = "Avenir Next Condensed Bold", color = "#050833", hjust = 0, vjust = 0,
size = 8) +
annotate("text", 2.5, 38.1, label = "Cumulative Test Bowling Average and Economy Rate (since 20th Match)",
family = "Avenir Next Condensed Bold", color = "#808080", hjust = 0, vjust = 0,
size = 4.2, fontface = "italic") +
annotate("text", 2.5, 37.4, label = "\"He's like a fine wine - getting better and better\" - Joe Root after a sensational\nbowling spell from James Anderson helps England claim victory in his 158th Test Match",
family = "Avenir Next Condensed", color = "#808080", hjust = 0, vjust = 1,
size = 4.2, fontface = "italic") +
scale_x_continuous("Economy Rate (Cumulative)",limits = c(2.5, 4), breaks = seq(2.5,4,0.5)) +
scale_y_continuous("Average (Cumulative)",limits = c(24, 42), breaks = seq(24,42,2)) +
scale_colour_gradient(low = "#050833", high = "#3AA3F6") +
scale_fill_gradient(low = "#050833", high = "#3AA3F6") +
theme_minimal(base_family = "Avenir Next Condensed") +
theme(legend.position = "none",
plot.title = element_text(family = "Avenir Next Condensed", color = "#050833", hjust = 0.2,
size = 24, lineheight = 2 ,face = "bold"),
panel.grid.major = element_line(colour = "gray92"),
panel.grid.minor = element_line(colour = "gray95"),
axis.text = element_text(size = 10),
axis.title.x = element_text(size = 12, margin = margin(10,0,10,0)),
axis.title.y = element_text(size = 12, margin = margin(0,10,0,0)),
plot.caption = element_text(size = 10),
plot.margin = margin(10, 20, 10, 20))
ggsave(here("jimmy_jimmy", "plots", "jimmy_bowling_connect.png"), width = 7.4, height = 9, dpi = 320)
# jimmy_by_match %>%
# mutate(wkts_10 = cum_wkts - lag(cum_wkts, 10, default = 0),
# runs_10 = cum_runs - lag(cum_runs, 10, default = 0),
# balls_10 = cum_balls - lag(cum_balls, 10, default = 0),
# avg_10 = round(runs_10 / wkts_10, 2),
# econ_10 = floor(100 * runs_10 / (balls_10/6))/ 100) %>%
# filter(match_no >= 10) %>%
# ggplot(aes(x = match_no, y = avg_10)) +
# geom_line()
|
9ccbafde991a3a8aff8682d52efb3ef0ed945f91 | e58ef035ef5f03d0407cc99e83c56cbe8fc5f44c | /R/zzz.R | ac109db72d6a9d6b902e5df57d3d5c732dd4e418 | [
"MIT"
] | permissive | medewitt/staninside | 422ec48ceaa8473ee4951384e9251dd12efeda05 | 2a27a54f87fe26bd45ed72a000621c9d7762e248 | refs/heads/main | 2023-05-22T16:24:11.087344 | 2021-11-20T12:55:29 | 2021-11-20T12:55:29 | 368,686,599 | 6 | 0 | NOASSERTION | 2021-11-20T12:55:30 | 2021-05-18T22:59:39 | R | UTF-8 | R | false | false | 197 | r | zzz.R | globalVariables({
})
this_package <- function() {
"staninside"
}
make_this_package <- function(pkg_name) {
txt <- sprintf(
"this_pkg <- function () {
'%s'
}\n", pkg_name
)
txt
}
|
1496d63dc4cb9763cfbb2eb17f2b0d0eab7246ad | e3259d8f489b093b246fe2fd0c4fb6999d6466bf | /man/strmean.dt.Rd | d6fbebc8393cfb5cf9ac806d361a340db3b04274 | [] | no_license | Franvgls/CampR | 7baf0e8213993db85004b95d009bec33570c0407 | 48987b9f49ea492c043a5c5ec3b85eb76605b137 | refs/heads/master | 2023-09-04T00:13:54.220440 | 2023-08-22T14:20:40 | 2023-08-22T14:20:40 | 93,841,088 | 0 | 2 | null | null | null | null | UTF-8 | R | false | true | 985 | rd | strmean.dt.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/strmean.dt.R
\name{strmean.dt}
\alias{strmean.dt}
\title{Medias intraestrato y estratificadas por estratos geográfico y batimétrico}
\usage{
strmean.dt(x, sector, area, Nas = FALSE)
}
\arguments{
\item{x}{Es el vector de abundancias en peso o número}
\item{sector}{Es la asignación al sector de cada muestra igual que en strmean {\link{strmean}} pero ha de tener dos caracteres, primero el sector geográfico y segundo el estrato batimétrico}
\item{area}{El área del sector correspondiente}
\item{Nas}{Permite obtener desviación estandar cuando sólo existe un dato, dando una desviación standard de 0. Es util para obtener una estimacion de la variabilidad aunque sólo haya un lance en un estrato}
}
\value{
Devuelve la media estratificadda ponderada al área dentro de cada estrato y subestrato y la total del área
}
\description{
Función interna para cálculos y aplicación de bootstrap
}
|
5152092f557b3d0f38ddf25d682c90303aa82fd6 | 959e7ec322afde2310f97ce493626676da5a2673 | /hosp_workers.R | 5871ec33be46e68db00c350f6e8a0d0377425f16 | [] | no_license | aseemdeodhar/hosp-workers-bos | c45cae6031aef526933b5f7f5d31d6a9759c848a | 45b1002d6cf90e038fb3a8627040978290baae3f | refs/heads/master | 2022-12-01T09:24:47.346787 | 2020-08-07T17:49:08 | 2020-08-07T17:49:08 | 285,401,737 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,244 | r | hosp_workers.R | # This analysis is to try and identify hospital shift workers by analysing usage statistics from
# the Transit App.
# loading required packages:
library(tidyverse)
library(lubridate)
library(sf)
library(leaflet)
# Loading required data:
bos <- read_sf("shapefiles/i95_tracts/i95_tracts.shp")
hosp <- read_sf("shapefiles/hospitals/HOSPITALS_PT.shp")
hosp <- st_transform(hosp, crs=4269)
bos <- st_transform(bos, crs=4269)
# Plotting these two shapes together:
boshosp <- st_filter(hosp, bos)
bos_1 <- st_join(bos, boshosp)
ggplot()+geom_sf(data = bos)+geom_sf(data = boshosp)+
theme_minimal()+
ggtitle(label = "Hospital Locations in Inner Boston",
subtitle = "Optimizing Bus Routes for Boston's Healthcare workers")
# Join the hospitals dataset to their respective census block groups
ggplot()+geom_sf(data = bos_1, aes(fill = BEDCOUNT))+
theme_minimal()+
ggtitle("CBGs with Hospitals & Bedcount")
# Get all work locations from Transit App usage data.
# Create a subset of device_ids for work locations falling within hospital tracts
# Loading all favorite work locations data:
work_locs <- read.csv("work_locs.csv", stringsAsFactors = FALSE)
work_locs <- work_locs %>% select(device_id, address, favorite_type, latitude, longitude, locality) %>%
distinct() %>%
filter(latitude < 43.3192 & latitude > 41.0484) %>%
filter(longitude < -69.799 & longitude > -72.0842)
work_locs <- st_as_sf(work_locs, coords = c("longitude", "latitude"), crs = 4269)
# We get a subset of block groups which contain hospitals
hosp_blocks <- filter(bos_1, BEDCOUNT >= 1)
# Subsetting the work locations file to get only those locations which fall within block groups
# which contain hospitals
workhosp <- st_filter(work_locs, hosp_blocks)
ggplot()+
geom_sf(data = bos_1)+
geom_sf(data = workhosp)
length(unique(workhosp$device_id))
bos_hospworks_locs <- st_join(bos, workhosp)
# Get list of unique device IDs. Get the home saved locations of these device IDs
uq_hosp_workers <- workhosp %>% select(device_id, address, favorite_type, geometry) %>% unique()
ggplot()+
geom_sf(data = bos_1)+
geom_sf(data = uq_hosp_workers)
uq_hosp_workers$work_GEOID <- bos_hospworks_locs$GEOID[match(uq_hosp_workers$device_id,
bos_hospworks_locs$device_id)]
ggplot(data = uq_hosp_workers, aes(x = work_GEOID))+geom_bar()+
geom_text(stat='count', aes(label=..count..), vjust= -.5, size = 3)+
scale_x_discrete(guide = guide_axis(angle = 45))+
ggtitle("CBGs with Hospitals, & Worker Count",
subtitle = "through App data")+
theme_minimal()
# Work CBG Counts:
work_cbg <- st_join(bos, uq_hosp_workers)
work_cbg_counts <- work_cbg %>%
group_by(GEOID) %>%
count()
ggplot()+geom_sf(data = work_cbg_counts, aes( fill = log(n)))+
ggtitle("Work Locations of Hospital Workers")+
theme_minimal()
######################################################################################
# loading Boston area data and filtering out duplicates:
home_locs <- read.csv("home_locs.csv", stringsAsFactors = FALSE)
home_locs <- home_locs %>% select(device_id, address, favorite_type, latitude, longitude, locality) %>%
distinct() %>%
filter(latitude < 43.3192 & latitude > 41.0484) %>%
filter(longitude < -69.799 & longitude > -72.0842)
home_locs <- st_as_sf(home_locs, coords = c("longitude", "latitude"), crs = 4269)
# inner join home_locs to work_locs to get a subset of home_locations with corresponding deviceIDs
home_hosp <- inner_join(home_locs %>% as.data.frame() %>% select(device_id,
address,
favorite_type,
locality,
geometry),
uq_hosp_workers %>% as.data.frame() %>% select(device_id) ,
by = "device_id")
home_hosp <- st_as_sf(home_hosp, sf_column_name = "geometry")
uq_home_hosp <- home_hosp %>% unique()
uq_home_hosp$work_GEOID <- bos_hospworks_locs$GEOID[match(uq_home_hosp$device_id,
bos_hospworks_locs$device_id)]
ggplot()+geom_sf(data = bos)+geom_sf(data = uq_home_hosp)
# We now have the home locations of all the device_ids who's work location is in the
# CBGs of hospitals.
# Our next goal is to group these home locations by their respective work location's CBG.
# Why? So we can guess where each hospital's workers live.
home_cbg <- st_join(bos, uq_home_hosp)
home_cbg$work_GEOID <- bos_hospworks_locs$GEOID[match(home_cbg$device_id,
bos_hospworks_locs$device_id)]
# get a count of 'homes' in each represented CBG
home_cbg_counts <- home_cbg %>%
group_by(GEOID) %>%
count()
ggplot()+geom_sf(data = home_cbg_counts, aes( fill = n))+
ggtitle("Home Locations of Hospital Workers")+
theme_minimal()
summary(as.factor(uq_home_hosp$work_GEOID))
ggplot()+
geom_sf(data = bos_1)+
geom_sf(data = uq_home_hosp, aes(color = work_GEOID), size = 1)+
ggtitle("Home locations grouped by CBG of hospital location")+
theme_minimal()
ggplot(data = home_cbg_counts, aes(x = GEOID))+geom_bar()+
geom_text(stat='count', aes(label=..count..), vjust= -.5, size = 3)+
scale_x_discrete(guide = guide_axis(angle = 45))+
ggtitle("CBGs with Hospitals, & Worker Count",
subtitle = "through App data")+
theme_minimal()
ggplot()+geom_sf(data = uq_home_hosp, aes(color = work_GEOID))
uq_home_hosp %>% group_by(work_GEOID)
# Reading in favorite lines data to attach to selected device_ids
uq_devices <- as.data.frame(unique(uq_home_hosp$device_id))
gid_lines <- read.csv("gid_rt_id.csv", stringsAsFactors = FALSE)
favlines <- read.csv("fav_lines.csv", stringsAsFactors = FALSE)
x <- left_join(favlines %>% select(device_id, global_route_id), gid_lines %>% select(global_route_id, service_type, route_id),
by = "global_route_id") %>% drop_na()
y <- left_join(x,
uq_home_hosp %>% as.data.frame() %>% select(device_id, work_GEOID),
by = "device_id") %>% drop_na() %>% distinct()
#y <- left_join(x, gid_lines %>% select(global_route_id, service_type, route_id),
# by = "global_route_id") %>% drop_na()
# Seeing which lines have max usage.
ggplot()+geom_bar(data = y, aes(x = route_id, fill = work_GEOID))+
scale_x_discrete(guide = guide_axis(angle = 90))+
theme(legend.position = "none")
# compare this data with general users database:
ggplot()+geom_bar(data = y, aes(x = route_id, fill = work_GEOID))+
scale_x_discrete(guide = guide_axis(angle = 90))+
theme(legend.position = "none")
ggplot()+geom_bar(data = x, aes(x = route_id))+
scale_x_discrete(guide = guide_axis(angle = 90))+
theme(legend.position = "none")
summary(as.factor(y$route_id))
pop_routes <- y %>% group_by(work_GEOID, route_id) %>%
select(route_id, work_GEOID) %>%
count() %>%
arrange(desc(n))
top5pop_routes_groups <- pop_routes %>% group_by(work_GEOID) %>% top_n(5)
top5pop_routes_overall <- pop_routes %>% top_n(5)
top5pop_routes_overall <- y %>% group_by(route_id) %>%
select(route_id) %>%
count() %>%
arrange(desc(n)) %>%
head(20)
# reading in bus+subway spatial data:
bus_shp <- read_sf("shapefiles/mbtabus/MBTABUSROUTES_ARC.shp")
bus_sample <- bus_shp %>%
filter(!grepl("inbound",ROUTE_DESC))
bus_sample <- bus_shp %>% as.data.frame() %>%
mutate(CTPS_ROUTE = as.character(CTPS_ROUTE)) %>%
group_by(CTPS_ROUTE) %>% slice(1) %>% st_as_sf(sf_column_name = "geometry")
asdf <- left_join(top5pop_routes,
bus_sample %>% as.data.frame(),
by = c( "route_id" = "MBTA_ROUTE")) %>% drop_na() %>%
select("route_id", "n", "SHAPE_ID", "SHAPE_LEN", "geometry")
asdf <- st_as_sf(asdf, sf_column_name = "geometry")
ggplot()+
geom_sf(data = bos_1, aes(fill = BEDCOUNT))+
theme_minimal()+
geom_sf(data = asdf, aes(color = n,))+
ggtitle("Bus Routes")+
theme_minimal()
################################################################################
top5pop_routes_overall_shp <- left_join(top5pop_routes_overall,
bus_sample %>% as.data.frame(),
by = c( "route_id" = "CTPS_ROUTE")) %>% drop_na() %>%
select("route_id", "n", "SHAPE_ID", "SHAPE_LEN", "geometry") %>%
st_as_sf(sf_column_name = "geometry")
ggplot()+
geom_sf(data = bos_1, aes(fill = BEDCOUNT))+
theme_minimal()+
geom_sf(data = top5pop_routes_overall_shp, aes(color = n,), size = 2)+
geom_sf_label(data = top5pop_routes_overall_shp, aes(label = route_id))+
ggtitle("Top 17 Bus Routes Overall")+
scale_color_gradient(low = "green", high = "red")+
theme_minimal()
|
b9aef07bbce5b1c62b48a17fc9274fbfe1fb4013 | 5aa94eaf6fb56d125933f32a25e65ca6862ddb9a | /man/tfcalculus.Rd | bfb5c78e7e9c6d0e7a87e95d8a3734d12a931d92 | [] | no_license | HerrMo/tidyfun | a938149c69a7db6d9b4cae94f0d435a4dcad0450 | 8d7be6b48ac59bf1e34822baee760902a284540f | refs/heads/master | 2020-04-15T10:27:17.481261 | 2018-11-26T23:16:22 | 2018-11-26T23:16:22 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,718 | rd | tfcalculus.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/calculus.R
\name{tf_derive}
\alias{tf_derive}
\alias{tf_derive.default}
\alias{tf_derive.tfd}
\alias{tf_derive.tfb}
\alias{tf_derive.tfb_fpc}
\alias{tf_integrate}
\alias{tf_integrate.tfd}
\alias{tf_integrate.tfb}
\title{Derivatives and integrals of functional data}
\usage{
tf_derive(f, ...)
\method{tf_derive}{default}(f, ...)
\method{tf_derive}{tfd}(f, order = 1, arg = NULL, ...)
\method{tf_derive}{tfb}(f, order = 1, ...)
\method{tf_derive}{tfb_fpc}(f, order = 1, ...)
tf_integrate(f, lower, upper, ...)
\method{tf_integrate}{tfd}(f, lower = tf_domain(f)[1],
upper = tf_domain(f)[2], definite = TRUE, arg, ...)
\method{tf_integrate}{tfb}(f, lower = tf_domain(f)[1],
upper = tf_domain(f)[2], definite = TRUE, arg, ...)
}
\arguments{
\item{f}{a \code{tf}-object}
\item{...}{not used}
\item{order}{order of differentiation. Maximally 2 for \code{tfb} with \code{mgcv}-spline bases.}
\item{arg}{grid to use for the finite differences or quadrature.
Not the \code{arg} of the returned object.}
\item{lower}{lower limits of the integration range. For \code{definite=TRUE}, this can be
a vector of the same length as \code{f}.}
\item{upper}{upper limits of the integration range (but see \code{definite} arg / Description).
For \code{definite=TRUE}, this can be a vector of the same length as \code{f}.}
\item{definite}{should the definite integral be returned (default)
or the antiderivative. See Description.}
}
\value{
a \code{tf} with (slightly) different \code{arg} (and \code{basis}), or the definite integrals of the functions in \code{f}
}
\description{
\strong{Derivatives} of \code{tf}-objects use finite differences of the evaluations
for \code{tfd} and finite differences of the basis functions for \code{tfb}.
Note that, for some spline bases like \code{"cr"} or \code{"tp"} which always begin/end linearly,
computing second derivatives will produce artefacts at the outer limits
of the functions' domain due to these boundary constraints. Basis \code{"bs"} does
not have this problem, but tends to yield slightly less stable fits.
\strong{Integrals} of \code{tf}-objects are computed by simple quadrature (trapezoid rule, specifically).
By default the scalar definite integral \eqn{\int^{upper}_{lower}f(s)ds} is returned
(option \code{definite = TRUE}), alternatively for \code{definite = FALSE} something
like the \emph{anti-derivative} on \code{[lower, upper]}, e.g. an \code{tfd} object
representing \eqn{F(t) = \int^{t}_{lower}f(s)ds}, for \eqn{t \in}\code{[lower, upper]},
is returned.
}
\details{
\code{tf_integrate.function} is simply a wrapper for \code{\link[stats:integrate]{stats::integrate()}}.
}
|
93219884c0a387d4964a0a451037303e51d3b212 | 9a7d07e326185c1b2314c3adb3c3f99edaaadb12 | /inst/scripts/align.R | 3c0b4948981f11e532fa245cfefb085b6f9f1d10 | [] | no_license | lawremi/VariantToolsData | 59ef507f511da90314a4663c84b9787136adfe9f | c52bbad1584a43b7af6fe9c43860376776599e91 | refs/heads/master | 2021-01-12T08:11:57.456380 | 2018-02-02T16:57:07 | 2018-02-02T16:57:51 | 76,499,706 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 415 | r | align.R | library(gmapR)
param <- GsnapParam(TP53Genome(), unique_only = TRUE, molecule = "DNA",
batch = "4")
extdata.dir <- system.file("extdata", package="VariantToolsTutorial")
extdata.dir <- "../extdata"
first.fastq <- dir(extdata.dir, "first.fastq", full.names=TRUE)
last.fastq <- dir(extdata.dir, "last.fastq", full.names=TRUE)
output <- mapply(gsnap, first.fastq, last.fastq, MoreArgs=list(param))
|
06b935085ab05acc9fafad6d3832822c0f3b9f1d | 5f684a2c4d0360faf50fe055c1147af80527c6cb | /2019/2019-week-17/anime2.R | bda1d4c6448f0a0ca1fa79a920ba9fe9ca8723ab | [
"MIT"
] | permissive | gkaramanis/tidytuesday | 5e553f895e0a038e4ab4d484ee4ea0505eebd6d5 | dbdada3c6cf022243f2c3058363e0ef3394bd618 | refs/heads/master | 2023-08-03T12:16:30.875503 | 2023-08-02T18:18:21 | 2023-08-02T18:18:21 | 174,157,655 | 630 | 117 | MIT | 2020-12-27T21:41:00 | 2019-03-06T14:11:15 | R | UTF-8 | R | false | false | 1,695 | r | anime2.R | library(tidyverse)
library(wesanderson)
tidy_anime <- readr::read_csv("https://raw.githubusercontent.com/rfordatascience/tidytuesday/master/data/2019/2019-04-23/tidy_anime.csv")
# remove NA
tidyA <- tidy_anime %>%
# set end_date to today for titles still airing and NA as end_date
# mutate(end_date = replace(end_date, which(airing & is.na(end_date)), Sys.Date())) %>%
# remove titles with NA end_date, remove movies and music
filter(!is.na(end_date), type != "Movie", type != "Music") %>%
select(animeID, name, type, episodes, start_date, end_date) %>%
mutate(diff_days = difftime(end_date, start_date,
units = c("days"))) %>%
mutate(startDateAsNum = as.numeric(start_date)) %>%
distinct(name, .keep_all = TRUE)
pal <- wes_palette("Zissou1", 10, type = "continuous")
tidyA %>%
ggplot() +
geom_vline(xintercept = as.Date("2019-04-30"), alpha = 0.2) +
geom_rect(aes(xmin = start_date, xmax = end_date,
ymin = startDateAsNum - 30, ymax = startDateAsNum + 30,
fill = as.numeric(diff_days), alpha = 0.5)) +
scale_y_reverse() +
scale_x_date(limits = c(as.Date("1960-01-01"), Sys.Date()), date_breaks = "10 years", date_labels = "%Y") +
theme_bw() +
scale_fill_gradientn(colours = pal) +
theme(
panel.background = element_rect(fill = "gray90"),
axis.text.y = element_blank(),
axis.ticks.y = element_blank(),
panel.border = element_blank(),
# panel.grid = element_blank(),
legend.position="none",
text = element_text(family = "IBM Plex Sans", size = 6)
)
ggsave("/Users/Georgios/Documents/Projects/tidytuesday/week 17/anime2.png", dpi = 600, height = 6, width = 4)
|
4a27ef8124431b74a232c2bd763e68bb03cd4ebe | 3c449b1c7af361336841a55d347cb1bca1adab81 | /man/L1_reg.Rd | c003e3fd8fc8d37c52218be49daa980bdd545bb2 | [] | no_license | kcf-jackson/wandeRer | 600b3cdc1b76e72ff1a1e9bff060aa7d7559bf56 | 90a3066e35cc68366625d6d018ff4a810a834b6e | refs/heads/master | 2020-04-02T05:45:37.956462 | 2016-08-02T11:38:35 | 2016-08-02T11:38:35 | 64,651,023 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 235 | rd | L1_reg.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/loss_and_reg_functions.R
\name{L1_reg}
\alias{L1_reg}
\title{L1 regularisation function}
\usage{
L1_reg(param)
}
\description{
L1 regularisation function
}
|
9e08878f7a6727c80081860e6988b70ca5004d10 | 292402192edd7cbde0f840efb82f09ea7ca92c15 | /R/optimise_point.R | e397781166cfb1607037a95134ee274261c76d83 | [
"MIT"
] | permissive | tanxuezhi/hydrostreamer | 94a466da9a3b19ad23d6c0b77bbb6203cb3f3372 | 4c78053ed175dffebfd486124bc0acdd3bb522a6 | refs/heads/master | 2023-04-18T14:26:32.983942 | 2021-05-05T15:23:29 | 2021-05-05T15:23:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,177 | r | optimise_point.R | #' Computes optimal estimate at a specific river segment(s)
#'
#' Function performs data assimilation by combining timeseries of downscaled
#' discharge estimates against observed streamflow timeseries at all river
#' segments with observations.
#'
#' Optimisation can be performed either using ordinary least squares (OLS),
#' Constrained Least Squares (CLS; coefficients positive, add to unity),
#' Non-negative Least Squares (NNLS; coefficients positive), Least Squares
#' without intercept (GRA), Least squares with no intercept, and coefficients
#' sum to unity (GRB), Bates-Granger (BG), Standard Eigenvector (EIG1),
#' Bias-corrected Eigenvector (EIG2) or selecting the best performing ensemble
#' member (best).
#'
#' Alternatively, R's \code{\link[stats]{optim}} can be used. In
#' that case, \code{optim_method} should be a function which \code{optim} should
#' attempt to optimise. The function should accept three inputs: \code{par},
#' \code{obs} and \code{pred}. \code{par} is the vector of coefficients
#' \code{optim} optimises, obs is the observation timeseries, and pred is a
#' matrix of inputs to be optimised. Additional arguments passed to \code{optim}
#' can also be defined.
#'
#' @param HS An \code{HS} object with observation_ts and discharge_ts
#' @param optim_method A character object giving the optimisation method to be
#' used, or a function to be passed to \code{\link[stats]{optim}}.
#' See details.
#' @param combination Whether to do the forecast combination for the entire
#' timeseries, or each month of the year individually, or for full calendar
#' years. Accepts \code{"timeseries"}, \code{"ts"}, or \code{"monthly"},
#' \code{"mon"}, or \code{"annual"}, \code{"ann"}.
#' @param sampling How to sample training and testing periods. \code{"series"}
#' for training serially from beginning, or \code{"random"} for a random
#' sample for both training and testing periods.
#' @param train The share of timeseries used for training period.
#' @param ... parameters passed to \code{\link[stats]{optim}}, if optim_method
#' input is a function.
#'
#' @return Returns an object of class \code{HSoptim}, which is a list of
#' results for each observation station. Each list ielement contains
#' \itemize{
#' \item riverID
#' \item Method: Model averaging method used.
#' \item Weights: Vector of optimised model averaging weights.
#' \item Intercept: Intercept from the combination. \code{NA}, if not
#' applicable to the method.
#' \item Optimised_ts: A \code{tibble} consisting of date, observation and
#' optimised timeseries.
#' \item Goodness_of_fit. Goodness of fit of the forecast combination
#' obtained using \code{\link[hydroGOF]{gof}}.
#' }
#' @export
optimise_point <- function(HS,
optim_method="CLS",
combination = "ts",
sampling = "random",
train = 0.5,
...) {
warned_overfit <- FALSE
warned_train <- FALSE
bias_correction <- FALSE # disabled currently, likely to be removed
log <- FALSE # disabled currently, likely to be removed
riverIDs <- lapply(HS$observation_ts, is.null)
riverIDs <- which(!unlist(riverIDs))
stat_names <- HS$observation_station[ riverIDs ]
##########################################
# do forecast combination for each station
##########################################
combs <- list()
for (rID in seq_along(riverIDs)) {
flow <- dplyr::left_join(HS$discharge_ts[[ riverIDs[rID] ]],
HS$observation_ts[[ riverIDs[rID] ]],
by="Date")
flow <- flow[!is.na(flow$observations),]
colremove <- apply(flow, 2, FUN=function(x) all(is.na(x)))
if(any(colremove)) {
flow <- flow[,names(colremove)[!colremove]]
}
if(nrow(flow) == 0) {
message("Skipping station ", stat_names[rID], " for missing ",
"observation data.")
next
}
############################################
# Forecast combination entire timeseries or monthly or annually
if(combination %in% c("timeseries", "ts")) {
combs[[rID]] <- combine_timeseries(flow,
optim_method,
sampling,
train,
bias_correction,
log,
warned_overfit,
warned_train,
...)
warned_overfit <- combs[[rID]]$warned_overfit
warned_train <- combs[[rID]]$warned_train
} else if(combination %in% c("monthly", "mon")) {
combs[[rID]] <- combine_monthly(flow,
optim_method,
sampling,
train,
bias_correction,
log,
warned_overfit,
warned_train,
...)
warned_overfit <- combs[[rID]]$warned_overfit
warned_train <- combs[[rID]]$warned_train
} else if(combination %in% c("annual", "ann")) {
combs[[rID]] <- combine_annual(flow,
optim_method,
sampling,
train,
bias_correction,
log,
warned_overfit,
warned_train,
...)
warned_overfit <- combs[[rID]]$warned_overfit
warned_train <- combs[[rID]]$warned_train
}
}
######################
# create output
######################
output <- list()
for(i in seq_along(combs)) {
output[[ stat_names[i] ]] <- list(riverID = HS$riverID[riverIDs[i]],
Method = combs[[i]]$Method,
Weights = combs[[i]]$Weights,
Intercept = combs[[i]]$Intercept,
Bias_correction = combs[[i]]$Bias_correction,
Optimized_ts = combs[[i]]$Optimized_ts,
Goodness_of_fit = combs[[i]]$Goodness_of_fit)
}
output <- assign_class(output, "HSoptim")
return(output)
}
|
64b3b117d432677ac13cee30d611defc9d715408 | 831d4a79ea5954ad7f52729b254587dd444761a7 | /test.R | 762569a01ad25add0f30543586655d47a0842c80 | [] | no_license | karlkwon/Cosera_practical_machine_learning | 6a09755c86b1c8cca1e7d53538171157b11bcd5d | 412804041a24cdfcd638cda3e8a5fe424475a0e9 | refs/heads/master | 2021-01-10T08:46:44.387062 | 2015-11-23T10:15:39 | 2015-11-23T10:15:39 | 46,615,139 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,956 | r | test.R | install.packages("caret")
install.packages("rmarkdown")
install.packages("data.table")
install.packages("list")
#library(data.table)
library(caret)
library(list)
training <- read.csv("pml-training.csv", stringsAsFactors=FALSE, na.strings = c("NA", ""))
head(training)
## when I look over training data, there is a empty cell
## so, I set this cell as NA
training_tf <- ifelse(is.na(training), 1, 0)
training_tf_cml <- colSums(training_tf)
training_tf_cml_nor <- training_tf_cml / dim(training_tf)[1]
## I only care the variable, which has less than 0.1 NA ratio.
hist(training_tf_cml_nor)
validColumns <- training_tf_cml_nor < 0.1
training2 <- training[, validColumns]
dim(training2)
## and I ignore the variable which has meaningless data.
training3 <- training2[,8:60]
dim(training3)
head(training3)
## data balance check
type_list <- unique(training3$classe)
table(training3$classe)
min_sample <- min(table(training3$classe))
training4 <- NULL
for(str in type_list) {
ttt <- training3[training3$classe==str,]
tmp <- sample(c(1:(dim(ttt)[1])), min_sample, replace=FALSE)
tmp2 <- ttt[tmp,]
if(is.null(training4)) {
training4 <- tmp2
}else {
print(table(tmp2$classe))
training4 <- rbind(training4, tmp2)
}
}
table(training4$classe)
## make training data to make model
inTrain <- createDataPartition(y=training4$classe, p=0.7, list=FALSE)
m_train <- training4[inTrain,]
m_test <- training4[-inTrain,]
table(m_train$classe)
table(m_test$classe)
## training with lda
modelLda <- train(classe ~., data=m_train, method="lda")
modelLda
## Accuracy with training data is 69.1%
## so, I try another method
## training with rt
modelRf <- train(classe ~., data=m_train, method="rf")
modelRf$finalModel
m_pred <- predict(modelRf$finalModel, newdata=m_test)
a <- cbind(m_test$classe, as.character(m_pred))
m_pred_tf <- ifelse(m_test$classe == as.character(m_pred), 1, 0)
table(m_pred_tf)[2]/sum(table(m_pred_tf))
## 99.3%
|
11b453d1f119f13fdc01e7dc689cc74df6bc8b19 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/globals/tests/globalsOf.R | bec84e33df2a0e5ab7d1ad127ec6879f0cb31e92 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,672 | r | globalsOf.R | library("globals")
## WORKAROUND: Make sure tests also work with 'covr' package
covr <- ("covr" %in% loadedNamespaces())
if (covr) {
globalenv <- function() parent.frame()
baseenv <- function() environment(base::sample)
}
a <- 0
b <- 2
c <- 3
d <- NULL
e <- function() TRUE
expr <- substitute({ x <- b; b <- 1; y <- c; z <- d; a <- a + 1; e <- e() }, env = list())
message("*** findGlobals() ...")
message(" ** findGlobals(..., method = 'conservative'):")
globals_c <- findGlobals(expr, method = "conservative")
print(globals_c)
stopifnot(all(globals_c %in% c("{", "<-", "c", "d", "+")))
message(" ** findGlobals(..., method = 'liberal'):")
globals_l <- findGlobals(expr, method = "liberal")
print(globals_l)
stopifnot(all(globals_l %in% c("{", "<-", "b", "c", "d", "+", "a", "e")))
message(" ** findGlobals(..., method = 'ordered'):")
globals_i <- findGlobals(expr, method = "ordered")
print(globals_i)
stopifnot(all(globals_i %in% c("{", "<-", "b", "c", "d", "+", "a", "e")))
message(" ** findGlobals(..., tweak):")
tweak_another_expression <- function(expr) {
substitute({ x <- B; B <- 1; y <- C; z <- D }, env = list())
}
globals_i <- findGlobals(expr, tweak = tweak_another_expression)
stopifnot(all(globals_i %in% c("{", "<-", "B", "C", "D")))
message(" ** findGlobals(..., trace = TRUE):")
globals_i <- findGlobals(expr, trace = TRUE)
print(globals_i)
stopifnot(all(globals_i %in% c("{", "<-", "b", "c", "d", "+", "a", "e")))
message(" ** findGlobals(a <- pkg::a):")
expr2 <- parse(text = "a <- pkg::a") ## To please R CMD check
globals_i <- findGlobals(expr2)
print(globals_i)
stopifnot(all(globals_i %in% c("<-", "::")))
message(" ** findGlobals(a[1] <- 0) etc.:")
globals_i <- findGlobals(a[1] <- 0, substitute = TRUE)
print(globals_i)
false_globals <- "["
stopifnot(all(setdiff(globals_i, false_globals) %in% c("<-", "a", "[<-")))
globals_i <- findGlobals({ a[1] = 0 }, substitute = TRUE)
print(globals_i)
false_globals <- "["
stopifnot(all(setdiff(globals_i, false_globals) %in% c("{", "=", "a", "[<-")))
globals_i <- findGlobals(a[b <- 1] <- 0, substitute = TRUE)
print(globals_i)
false_globals <- "["
stopifnot(all(setdiff(globals_i, false_globals) %in% c("<-", "a", "[<-")))
globals_i <- findGlobals(a[b = 1] <- 0, substitute = TRUE)
print(globals_i)
false_globals <- "["
stopifnot(all(setdiff(globals_i, false_globals) %in% c("<-", "a", "[<-")))
globals_i <- findGlobals({ a[b <- 1] = 0 }, substitute = TRUE)
print(globals_i)
false_globals <- "["
stopifnot(all(setdiff(globals_i, false_globals) %in% c("{", "=", "a", "<-", "[<-")))
globals_i <- findGlobals(a$b <- 0, substitute = TRUE)
print(globals_i)
false_globals <- "$"
stopifnot(all(setdiff(globals_i, false_globals) %in% c("<-", "a", "$<-")))
globals_i <- findGlobals({ a$b = 0 }, substitute = TRUE)
print(globals_i)
false_globals <- "$"
stopifnot(all(setdiff(globals_i, false_globals) %in% c("{", "=", "a", "$<-")))
globals_i <- findGlobals(names(a) <- "A", substitute = TRUE)
print(globals_i)
stopifnot(all(globals_i %in% c("<-", "a", "names", "names<-")))
globals_i <- findGlobals({ names(a) = "A" }, substitute = TRUE)
print(globals_i)
stopifnot(all(globals_i %in% c("{", "=", "a", "names", "names<-")))
## In order to handle the following case, we have to accept a few
## false positives (`[`, `[[`, `$`, `[<-`, `[[<-`)
globals_i <- findGlobals(names(a)[1] <- "A", substitute = TRUE)
print(globals_i)
false_globals <- c("[", "[<-")
stopifnot(all(setdiff(globals_i, false_globals) %in% c("<-", "a", "names", "names<-")))
globals_i <- findGlobals({ names(a)[1] = "A" }, substitute = TRUE)
print(globals_i)
false_globals <- c("[", "[<-")
stopifnot(all(setdiff(globals_i, false_globals) %in% c("{", "=", "a", "names", "names<-")))
message("*** findGlobals() ... DONE")
message("*** globalsByName() ...")
globals_c <- globalsByName(c("{", "<-", "c", "d"))
str(globals_c)
stopifnot(all(names(globals_c) %in% c("{", "<-", "c", "d")))
globals_c <- cleanup(globals_c)
str(globals_c)
stopifnot(all(names(globals_c) %in% c("c", "d")))
where <- attr(globals_c, "where")
stopifnot(
length(where) == length(globals_c),
identical(where$c, globalenv()),
identical(where$d, globalenv())
)
foo <- globals::Globals
globals <- globalsByName(c("{", "foo", "list"), recursive = FALSE)
str(globals)
stopifnot(all(names(globals) %in% c("{", "foo", "list")))
where <- attr(globals, "where")
stopifnot(length(where) == length(globals))
if (!covr) stopifnot(
identical(where$`{`, baseenv()),
identical(where$foo, globalenv()),
identical(where$list, baseenv())
)
globals <- cleanup(globals)
str(globals)
stopifnot(all(names(globals) %in% c("foo")))
globals <- cleanup(globals, drop = "internals")
str(globals)
stopifnot(all(names(globals) %in% c("foo")))
pkgs <- packagesOf(globals)
stopifnot(pkgs == "globals")
## Also '...'
myGlobals <- function(x, ...) {
globalsByName(c("a", "x", "..."))
}
globals <- myGlobals(x = 2, y = 3, z = 4)
str(globals)
stopifnot(all(names(globals) %in% c("a", "x", "...")),
all(names(globals[["..."]]) %in% c("y", "z")))
## BUG FIX: Assert that '...' does not have to be specified at the end
myGlobals <- function(x, ...) {
globalsByName(c("a", "...", "x"))
}
globals <- myGlobals(x = 2, y = 3, z = 4)
str(globals)
stopifnot(all(names(globals) %in% c("a", "x", "...")),
all(names(globals[["..."]]) %in% c("y", "z")))
## Test with arguments defaulting to other arguments
myGlobals <- function(x, y, z = y) {
globalsByName(c("a", "x", "y", "z"))
}
globals <- myGlobals(x = 2, y = 3)
stopifnot(all(names(globals) %in% c("a", "x", "y", "z")),
globals$y == 3, identical(globals$z, globals$y))
globals <- myGlobals(x = 2, y = 3, z = 4)
stopifnot(all(names(globals) %in% c("a", "x", "y", "z")),
globals$y == 3, globals$z == 4)
myGlobals <- function(x, ...) {
globalsByName(c("a", "x", "..."))
}
globals <- myGlobals(x = 2, y = 3)
stopifnot(all(names(globals) %in% c("a", "x", "...")),
all(names(globals[["..."]]) %in% c("y")),
globals[["..."]]$y == 3)
globals <- myGlobals(x = 2, y = 3, z = 4)
stopifnot(all(names(globals) %in% c("a", "x", "...")),
all(names(globals[["..."]]) %in% c("y", "z")),
globals[["..."]]$y == 3, globals[["..."]]$z == 4)
message("*** globalsByName() ... DONE")
message("*** globalsOf() ...")
message(" ** globalsOf(..., method = 'conservative'):")
globals_c <- globalsOf(expr, method = "conservative")
str(globals_c)
stopifnot(all(names(globals_c) %in% c("{", "<-", "c", "d", "+")))
globals_c <- cleanup(globals_c)
str(globals_c)
stopifnot(all(names(globals_c) %in% c("c", "d")))
where <- attr(globals_c, "where")
stopifnot(
length(where) == length(globals_c),
identical(where$c, globalenv()),
identical(where$d, globalenv())
)
message(" ** globalsOf(..., method = 'liberal'):")
globals_l <- globalsOf(expr, method = "liberal")
str(globals_l)
stopifnot(all(names(globals_l) %in% c("{", "<-", "b", "c", "d", "+", "a", "e")))
globals_l <- cleanup(globals_l)
str(globals_l)
stopifnot(all(names(globals_l) %in% c("b", "c", "d", "a", "e")))
where <- attr(globals_l, "where")
stopifnot(
length(where) == length(globals_l),
identical(where$b, globalenv()),
identical(where$c, globalenv()),
identical(where$d, globalenv())
)
message(" ** globalsOf(..., method = 'ordered'):")
globals_i <- globalsOf(expr, method = "ordered")
str(globals_i)
stopifnot(all(names(globals_i) %in% c("{", "<-", "b", "c", "d", "+", "a", "e")))
globals_i <- cleanup(globals_i)
str(globals_i)
stopifnot(all(names(globals_i) %in% c("b", "c", "d", "a", "e")))
where <- attr(globals_i, "where")
stopifnot(
length(where) == length(globals_i),
identical(where$b, globalenv()),
identical(where$c, globalenv()),
identical(where$d, globalenv())
)
message(" ** globalsOf() w/ globals in local functions:")
a <- 1
bar <- function(x) x - a
foo <- function(x) bar(x)
for (method in c("ordered", "conservative", "liberal")) {
globals <- globalsOf({ foo(3) }, substitute = TRUE, method = method,
recursive = FALSE, mustExist = FALSE)
stopifnot(all(names(globals) %in% c("{", "foo")),
!any("a" %in% names(globals)))
globals <- cleanup(globals)
str(globals)
stopifnot(all(names(globals) %in% c("foo"),
!any("a" %in% names(globals))))
globals <- globalsOf({ foo(3) }, substitute = TRUE, method = "ordered",
recursive = TRUE, mustExist = FALSE)
stopifnot(all(names(globals) %in% c("{", "foo", "bar", "-", "a")))
globals <- cleanup(globals)
str(globals)
stopifnot(all(names(globals) %in% c("foo", "bar", "a")))
globals <- globalsOf({ foo(3) }, substitute = TRUE,
recursive = TRUE, mustExist = FALSE)
stopifnot(all(names(globals) %in% c("{", "foo", "bar", "-", "a")))
globals <- cleanup(globals)
str(globals)
stopifnot(all(names(globals) %in% c("foo", "bar", "a")))
}
message(" ** globalsOf() w/ recursive functions:")
## "Easy"
f <- function() Recall()
globals <- globalsOf(f)
str(globals)
## Direct recursive call
f <- function() f()
globals <- globalsOf(f)
str(globals)
## Indirect recursive call
f <- function() g()
g <- function() f()
globals_f <- globalsOf(f)
str(globals_f)
globals_g <- globalsOf(g)
str(globals_g)
globals_f <- globals_f[order(names(globals_f))]
globals_g <- globals_g[order(names(globals_g))]
stopifnot(identical(globals_g, globals_f))
message("*** globalsOf() ... DONE")
message("*** Subsetting of Globals:")
globals_l <- globalsOf(expr, method = "liberal")
globals_s <- globals_l[-1]
stopifnot(length(globals_s) == length(globals_l) - 1L)
stopifnot(identical(class(globals_s), class(globals_l)))
where_l <- attr(globals_l, "where")
where_s <- attr(globals_s, "where")
stopifnot(length(where_s) == length(where_l) - 1L)
stopifnot(identical(where_s, where_l[-1]))
message("*** cleanup() & packagesOf():")
globals <- globalsOf(expr, method = "conservative")
str(globals)
stopifnot(all(names(globals) %in% c("{", "<-", "c", "d", "+")))
globals <- as.Globals(globals)
str(globals)
stopifnot(all(names(globals) %in% c("{", "<-", "c", "d", "+")))
globals <- as.Globals(unclass(globals))
str(globals)
stopifnot(all(names(globals) %in% c("{", "<-", "c", "d", "+")))
pkgs <- packagesOf(globals)
print(pkgs)
stopifnot(length(pkgs) == 0L)
globals <- cleanup(globals)
str(globals)
stopifnot(all(names(globals) %in% c("c", "d")))
pkgs <- packagesOf(globals)
print(pkgs)
stopifnot(length(pkgs) == 0L)
message("*** globalsOf() and package functions:")
foo <- globals::Globals
expr <- substitute({ foo(list(a = 1)) })
globals <- globalsOf(expr, recursive = FALSE)
str(globals)
stopifnot(all(names(globals) %in% c("{", "foo", "list")))
where <- attr(globals, "where")
stopifnot(length(where) == length(globals))
if (!covr) stopifnot(
identical(where$`{`, baseenv()),
identical(where$foo, globalenv()),
identical(where$list, baseenv())
)
globals <- cleanup(globals)
str(globals)
stopifnot(all(names(globals) %in% c("foo")))
pkgs <- packagesOf(globals)
stopifnot(pkgs == "globals")
message("*** globalsOf() and core-package functions:")
sample2 <- base::sample
sum2 <- base::sum
expr <- substitute({
x <- sample(10)
y <- sum(x)
x2 <- sample2(10)
y2 <- sum2(x)
s <- sessionInfo()
}, env = list())
globals <- globalsOf(expr, recursive = FALSE)
str(globals)
stopifnot(all(names(globals) %in%
c("{", "<-", "sample", "sample2", "sessionInfo", "sum", "sum2")))
where <- attr(globals, "where")
stopifnot(length(where) == length(globals))
if (!covr) stopifnot(
identical(where$`<-`, baseenv()),
identical(where$sample, baseenv()),
identical(where$sample2, globalenv())
)
globals <- cleanup(globals)
str(globals)
stopifnot(all(names(globals) %in% c("sample2", "sum2")))
where <- attr(globals, "where")
stopifnot(length(where) == length(globals))
if (!covr) stopifnot(identical(where$sample2, globalenv()))
globals <- cleanup(globals, drop = "primitives")
str(globals)
stopifnot(all(names(globals) %in% c("sample2")))
message("*** globalsOf() - exceptions ...")
rm(list = "a")
res <- try({
globals <- globalsOf({ x <- a }, substitute = TRUE, mustExist = TRUE)
}, silent = TRUE)
stopifnot(inherits(res, "try-error"))
message("*** globalsOf() - exceptions ... DONE")
message("*** Globals() - exceptions ...")
res <- tryCatch({ Globals(NULL) }, error = identity)
stopifnot(inherits(res, "simpleError"))
res <- tryCatch({ Globals(list(1, 2)) }, error = identity)
stopifnot(inherits(res, "simpleError"))
res <- tryCatch({ Globals(list(a = 1, 2)) }, error = identity)
stopifnot(inherits(res, "simpleError"))
message("*** Globals() - exceptions ... DONE")
|
e3ef08a240926199397091ad293442324e4231b3 | 5ac6affeacb3e5214e4eb5e1423f6ac616676a0e | /StructuralVariants/NISTv0.6/171212_SV_merging_analyses_benchmarkoutputs_addbionano_addnabsys_v0.6.R | d7f713743601974810c756742625c9372393b8fe | [] | no_license | jzook/genome-data-integration | 7314f91e0560fc2b18c2ccf4a3ce681faf63099e | cfc5e261d9fc98cccd3de4648fe3805f256f2f1e | refs/heads/master | 2022-02-16T02:02:30.093298 | 2021-09-27T21:01:39 | 2021-09-27T21:01:39 | 6,076,562 | 39 | 14 | null | 2018-09-28T09:47:13 | 2012-10-04T14:31:16 | Shell | UTF-8 | R | false | false | 57,209 | r | 171212_SV_merging_analyses_benchmarkoutputs_addbionano_addnabsys_v0.6.R | library(ggplot2)
library(reshape)
library(data.table)
# #SURVIVOR merge if start and end are within 1000bp
# survivor1k<-read.delim("/Users/jzook/Documents/AJTrio/SVs/triounion_171212/SURVIVOR_dist1k_min20bp_suppvec_quotehead.csv", stringsAsFactors=FALSE, header = FALSE, sep = ",", na.strings = c("NA", NA, "NaN"))
# survivor1kvcf<-read.delim("/Users/jzook/Documents/AJTrio/SVs/triounion_171212/SURVIVOR_dist1k_min20bp.tsv", stringsAsFactors=FALSE, header = TRUE, sep = "\t", na.strings = c("NA", NA, "NaN"))
# colnames(survivor1k)<-survivor1k[1,]
# survivor1k<-survivor1k[2:nrow(survivor1k),1:118]
# indiv<-sapply(strsplit(as.character(sapply(strsplit(as.character(colnames(survivor1k)),'_'), "[", 1)),'_'), "[", 1)
# tech<-sub("10X","TenX",sapply(strsplit(as.character(sapply(strsplit(as.character(colnames(survivor1k)),'_'), "[", 2)),'_'), "[", 1))
# caller<-sub(".*_.*_(.*)","\\1",colnames(survivor1k))
# techs<-unique(tech)
# indivs<-unique(indiv)
# for (i in techs) {
# survivor1k[,paste0(i,"calls")]<-rowSums(survivor1k[,tech==i]==1)
# tech=c(tech,"") #these lines keep tech and indiv vectors the same length as the columns in suvivor1k
# indiv=c(indiv,"")
# survivor1kvcf$INFO<-paste0(survivor1kvcf$INFO,";",paste0(i,"calls"),"=",survivor1k[,paste0(i,"calls")])
# }
#
# for (i in indivs) {
# survivor1k[,paste0(i,"count")]<-rowSums(survivor1k[,indiv==i]==1)
# indiv=c(indiv,"")
# survivor1kvcf$INFO<-paste0(survivor1kvcf$INFO,";",paste0(i,"count"),"=",survivor1k[,paste0(i,"count")])
# }
#
# survivor1k$NumTechs<-(survivor1k$TenXcalls>0)+(survivor1k$Bionanocalls>0)+(survivor1k$CGcalls>0)+(survivor1k$Illcalls>0)+(survivor1k$PBcalls>0)
# survivor1k$MultiTech<-(survivor1k$NumTechs>1)
#
# survivor1kvcf$INFO<-paste0(survivor1kvcf$INFO,";NumTechs=",survivor1k$NumTechs,";MultiTech=",survivor1k$MultiTech,";ClusterIDs=")
# for (i in 1:118) {
# survivor1kvcf[survivor1k[,i]==1,]$INFO<-paste0(survivor1kvcf[survivor1k[,i]==1,]$INFO,colnames(survivor1k)[i],",")
# }
# survivor1kvcf$INFO<-sub(",$","",survivor1kvcf$INFO)
# survivor1kvcf$INFO<-sub("SUPP=","NumClusterSVs=",survivor1kvcf$INFO)
# survivor1kvcf[1:5,]$INFO
#
# survivor1kvcf$FILTER<-ifelse(survivor1k$MultiTech,"PASS","NOT2TECH")
# survivor1kvcf$QUAL<-ifelse(survivor1k$MultiTech,20,10)
#
# contingencytable <- xtabs(~(TenXcalls>0)+(Bionanocalls>0)+(CGcalls>0)+(Illcalls>0)+(PBcalls>0), data=survivor1k)
# ftable(contingencytable,row.vars=c(2,5))
#
# hist(survivor1k$NumTechs)
# sum(survivor1k$NumTechs>1)
#
# survivor1kSVTYPE<-read.delim("/Users/jzook/Documents/AJTrio/SVs/triounion_171212/SURVIVOR_dist1k_min20bp_SVTYPE.csv", stringsAsFactors=FALSE, header = TRUE, sep = ",")
# survivor1k$SVTYPE <- survivor1kSVTYPE$ALT
# sum(survivor1k$NumTechs>1 & survivor1k$SVTYPE=="DEL")
# sum(survivor1k$NumTechs>1 & survivor1k$SVTYPE=="INS")
# sum(survivor1k$NumTechs>1 & survivor1k$SVTYPE=="DUP")
# sum(survivor1k$NumTechs>1 & survivor1k$SVTYPE=="INV")
# sum(survivor1k$NumTechs>1 & survivor1k$SVTYPE=="OTHER")
#
# survivor1kvcf$FORMAT<-"."
# survivor1kvcf$AJTRIO<-"."
# #write.table(survivor1kvcf[,c("CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT","AJTRIO")],sep="\t",row.names=FALSE,col.names=FALSE,file="/Users/jzook/Documents/AJTrio/SVs/triounion_171212/SURVIVOR_dist1k_min20bp.techcounts.vcf",quote=FALSE)
#
# # sed 's/^X/23/;s/^Y/24/' SURVIVOR_dist1k_min20bp.techcounts.vcf | sort -k1,1n -k2,2n | sed 's/^23/X/;s/^24/Y/' > SURVIVOR_dist1k_min20bp.techcounts.sort.vcf
# # /Applications/bioinfo/tabix/tabix -f SURVIVOR_dist1k_min20bp.techcounts.vcf.gz
# # rm SURVIVOR_dist1k_min20bp.techcounts.TR*
# # /Applications/bioinfo/rtg-tools-3.7.1/rtg vcfannotate --bed-info AllRepeats_gt95percidentity_slop5.annotate.bed.gz -i SURVIVOR_dist1k_min20bp.techcounts.vcf.gz -o SURVIVOR_dist1k_min20bp.techcounts.TR.vcf.gz
# # /Applications/bioinfo/rtg-tools-3.7.1/rtg vcfannotate --bed-info segdupall.annotate.bed.gz -i SURVIVOR_dist1k_min20bp.techcounts.TR.vcf.gz -o SURVIVOR_union_171212_v0.3.0a.vcf.gz
##SVcomp analysis
#vcf clustering output for all distance metrics within 0.2
# zgrep -v ^# union_171212_refalt.2.2.2.clustered.vcf.gz | awk 'BEGIN {FS=OFS="\t"} {if(!($1 ~ /^#/)) $7="PASS"; print}' | sed 's/PASS.*ClusterIDs=\(.*\);NumClusterSVs=\(.*\);ExactMatchIDs=\(.*\);NumExactMatchSVs=\(.*\);ClusterMaxShiftDist=\(.*\);ClusterMaxSizeDiff=\(.*\);ClusterMaxEditDist=\(.*\) .* .*/PASS \1 \2 \3 \4 \5 \6 \7/;' > union_171212_refalt.2.2.2.clustered.simpleINFO.tsv
# cut -f8 union_171212_refalt.2.2.2.clustered.simpleINFO.tsv | awk -F '_PB_' '{print NF-1}' > union_171212_refalt.2.2.2.clustered.simpleINFO.PBcount.tsv
# cut -f8 union_171212_refalt.2.2.2.clustered.simpleINFO.tsv | awk -F '_Ill' '{print NF-1}' > union_171212_refalt.2.2.2.clustered.simpleINFO.Illcount.tsv
# cut -f8 union_171212_refalt.2.2.2.clustered.simpleINFO.tsv | awk -F '_10X_' '{print NF-1}' > union_171212_refalt.2.2.2.clustered.simpleINFO.10Xcount.tsv
# cut -f8 union_171212_refalt.2.2.2.clustered.simpleINFO.tsv | awk -F '_CG_' '{print NF-1}' > union_171212_refalt.2.2.2.clustered.simpleINFO.CGcount.tsv
# cut -f10 union_171212_refalt.2.2.2.clustered.simpleINFO.tsv | awk -F '_PB_' '{print NF-1}' > union_171212_refalt.2.2.2.clustered.simpleINFO.PBexactcount.tsv
# cut -f10 union_171212_refalt.2.2.2.clustered.simpleINFO.tsv | awk -F '_Ill' '{print NF-1}' > union_171212_refalt.2.2.2.clustered.simpleINFO.Illexactcount.tsv
# cut -f10 union_171212_refalt.2.2.2.clustered.simpleINFO.tsv | awk -F '_10X_' '{print NF-1}' > union_171212_refalt.2.2.2.clustered.simpleINFO.10Xexactcount.tsv
# cut -f10 union_171212_refalt.2.2.2.clustered.simpleINFO.tsv | awk -F '_CG_' '{print NF-1}' > union_171212_refalt.2.2.2.clustered.simpleINFO.CGexactcount.tsv
# cut -f8 union_171212_refalt.2.2.2.clustered.simpleINFO.tsv | awk -F 'HG2_' '{print NF-1}' > union_171212_refalt.2.2.2.clustered.simpleINFO.HG2count.tsv
# cut -f8 union_171212_refalt.2.2.2.clustered.simpleINFO.tsv | awk -F 'HG3_' '{print NF-1}' > union_171212_refalt.2.2.2.clustered.simpleINFO.HG3count.tsv
# cut -f8 union_171212_refalt.2.2.2.clustered.simpleINFO.tsv | awk -F 'HG4_' '{print NF-1}' > union_171212_refalt.2.2.2.clustered.simpleINFO.HG4count.tsv
# paste union_171212_refalt.2.2.2.clustered.simpleINFO.tsv union_171212_refalt.2.2.2.clustered.simpleINFO.PBcount.tsv union_171212_refalt.2.2.2.clustered.simpleINFO.Illcount.tsv union_171212_refalt.2.2.2.clustered.simpleINFO.10Xcount.tsv union_171212_refalt.2.2.2.clustered.simpleINFO.CGcount.tsv union_171212_refalt.2.2.2.clustered.simpleINFO.PBexactcount.tsv union_171212_refalt.2.2.2.clustered.simpleINFO.Illexactcount.tsv union_171212_refalt.2.2.2.clustered.simpleINFO.10Xexactcount.tsv union_171212_refalt.2.2.2.clustered.simpleINFO.CGexactcount.tsv union_171212_refalt.2.2.2.clustered.simpleINFO.HG2count.tsv union_171212_refalt.2.2.2.clustered.simpleINFO.HG3count.tsv union_171212_refalt.2.2.2.clustered.simpleINFO.HG4count.tsv | awk '$9>1' > union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.tsv
#note that awk removes sites from one callset that are not within 20% of any other callset
# paste union_171212_refalt.2.2.2.clustered.simpleINFO.tsv union_171212_refalt.2.2.2.clustered.simpleINFO.PBcount.tsv union_171212_refalt.2.2.2.clustered.simpleINFO.Illcount.tsv union_171212_refalt.2.2.2.clustered.simpleINFO.10Xcount.tsv union_171212_refalt.2.2.2.clustered.simpleINFO.CGcount.tsv union_171212_refalt.2.2.2.clustered.simpleINFO.PBexactcount.tsv union_171212_refalt.2.2.2.clustered.simpleINFO.Illexactcount.tsv union_171212_refalt.2.2.2.clustered.simpleINFO.10Xexactcount.tsv union_171212_refalt.2.2.2.clustered.simpleINFO.CGexactcount.tsv union_171212_refalt.2.2.2.clustered.simpleINFO.HG2count.tsv union_171212_refalt.2.2.2.clustered.simpleINFO.HG3count.tsv union_171212_refalt.2.2.2.clustered.simpleINFO.HG4count.tsv > union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.tsv
#note that this command does not remove sites from one callset that are not within 20% of any other callset
svcompthresh2<-fread("/Users/jzook/Documents/AJTrio/SVs/triounion_171212/union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.tsv", header = FALSE, sep = "\t")
colnames(svcompthresh2)<-c("CHROM","POS","ID","REF","ALT","QUAL","FILTER","ClusterIDs","NumClusterSVs","ExactMatchIDs","NumExactMatchSVs","ClusterMaxShiftDist","ClusterMaxSizeDiff","ClusterMaxEditDist","PBcalls","Illcalls","TenXcalls","CGcalls","PBexactcalls","Illexactcalls","TenXexactcalls","CGexactcalls","HG2count","HG3count","HG4count")
svcompthresh2$NumTechs<-(svcompthresh2$PBcalls>0)+(svcompthresh2$Illcalls>0)+(svcompthresh2$TenXcalls>0)+(svcompthresh2$CGcalls>0)
svcompthresh2$NumTechsExact<-(svcompthresh2$PBexactcalls>0)+(svcompthresh2$Illexactcalls>0)+(svcompthresh2$TenXexactcalls>0)+(svcompthresh2$CGexactcalls>0)
svcompthresh2$SVLEN<-(nchar(svcompthresh2$ALT)-nchar(svcompthresh2$REF))
svcompthresh2$DistBack<-c(99999,svcompthresh2[2:nrow(svcompthresh2),]$POS-svcompthresh2[1:(nrow(svcompthresh2)-1),]$POS-nchar(svcompthresh2[1:(nrow(svcompthresh2)-1),]$REF))
svcompthresh2$DistForward<-c(svcompthresh2[2:nrow(svcompthresh2),]$POS-svcompthresh2[1:(nrow(svcompthresh2)-1),]$POS-nchar(svcompthresh2[1:(nrow(svcompthresh2)-1),]$REF),99999)
svcompthresh2$DistMin<-pmin(svcompthresh2$DistBack,svcompthresh2$DistForward)
svcompthresh2$DistMinlt1000<-(svcompthresh2$DistMin<1000)
svcompthresh2$MultiTech<-(svcompthresh2$NumTechs>1)
svcompthresh2$MultiTechExact<-(svcompthresh2$NumTechsExact>1)
svcompthresh2$SVTYPE<-ifelse(nchar(svcompthresh2$ALT)>19 & nchar(svcompthresh2$REF)>19,"COMPLEX",ifelse(nchar(svcompthresh2$ALT)>19,"INS","DEL"))
svcompthresh2$END<-svcompthresh2$POS + nchar(svcompthresh2$REF) - 1
svcompthresh2$FILTER<-ifelse(svcompthresh2$MultiTech,"PASS","NOT2TECH")
svcompthresh2$QUAL<-ifelse(svcompthresh2$MultiTech,20,10)
svcompthresh2$sizecat <- ifelse(abs(svcompthresh2$SVLEN)>=1000,"gt1000",ifelse(abs(svcompthresh2$SVLEN)>=300,"300to999",ifelse(abs(svcompthresh2$SVLEN)>=100,"100to299",ifelse(abs(svcompthresh2$SVLEN)>=50,"50to99","20to49"))))
#svcompthresh2[svcompthresh2$ID=="HG4_Ill_FB_8673","ALT"]
hist(svcompthresh2$NumTechs)
hist(svcompthresh2$NumTechsExact)
sum(svcompthresh2$NumTechs>1)
sum(svcompthresh2$NumTechsExact>1)
sum(svcompthresh2[svcompthresh2$SVLEN>0,]$NumTechs>1)
sum(svcompthresh2[svcompthresh2$SVLEN<0,]$NumTechs>1)
sum(svcompthresh2[svcompthresh2$SVLEN>0,]$NumTechsExact>1)
sum(svcompthresh2[svcompthresh2$SVLEN<0,]$NumTechsExact>1)
hist(svcompthresh2[svcompthresh2$NumTechs>1,]$NumClusterSVs,breaks=(-1:400),xlim=c(0,60))
sum(svcompthresh2$SVLEN<(-299))
sum(svcompthresh2$SVLEN>0 & (svcompthresh2$NumTechs>1 ))
sum(svcompthresh2$SVLEN<0 & (svcompthresh2$NumTechs>1 ))
sum(svcompthresh2$SVLEN>0 & (svcompthresh2$NumTechs>1 | svcompthresh2$HG2count>=5 | svcompthresh2$HG3count>=5 | svcompthresh2$HG4count>=5))
sum(svcompthresh2$SVLEN<0 & (svcompthresh2$NumTechs>1 | svcompthresh2$HG2count>=5 | svcompthresh2$HG3count>=5 | svcompthresh2$HG4count>=5))
sum(svcompthresh2$SVLEN>0 & !(svcompthresh2$NumTechs>1) & (svcompthresh2$HG2count>=5 | svcompthresh2$HG3count>=5 | svcompthresh2$HG4count>=5))
sum(svcompthresh2$SVLEN<0 & !(svcompthresh2$NumTechs>1) & (svcompthresh2$HG2count>=5 | svcompthresh2$HG3count>=5 | svcompthresh2$HG4count>=5))
sum(svcompthresh2$SVLEN>300 & !(svcompthresh2$NumTechs>1) & (svcompthresh2$HG2count>=5 | svcompthresh2$HG3count>=5 | svcompthresh2$HG4count>=5))
sum(svcompthresh2$SVLEN< -300 & !(svcompthresh2$NumTechs>1) & (svcompthresh2$HG2count>=5 | svcompthresh2$HG3count>=5 | svcompthresh2$HG4count>=5))
sum(svcompthresh2$SVLEN>0 & !(svcompthresh2$NumTechs>1) & (svcompthresh2$NumClusterSVs>=4))
sum(svcompthresh2$SVLEN<0 & !(svcompthresh2$NumTechs>1) & (svcompthresh2$NumClusterSVs>=4))
sum(svcompthresh2$SVLEN>0 & (svcompthresh2$NumTechs>1 | svcompthresh2$NumClusterSVs>=4))
sum(svcompthresh2$SVLEN<0 & (svcompthresh2$NumTechs>1 | svcompthresh2$NumClusterSVs>=4))
sum(svcompthresh2$SVLEN>5000 & (svcompthresh2$NumTechs>1 | svcompthresh2$NumClusterSVs>=4))
sum(svcompthresh2$SVLEN< -5000 & (svcompthresh2$NumTechs>1 | svcompthresh2$NumClusterSVs>=4))
#how many bps?
sum(svcompthresh2[svcompthresh2$SVLEN>0 & svcompthresh2$NumTechs>1,]$SVLEN)
sum(svcompthresh2[svcompthresh2$SVLEN<0 & svcompthresh2$NumTechs>1,]$SVLEN)
sum(svcompthresh2[svcompthresh2$SVLEN>50 & svcompthresh2$NumTechs>1,]$SVLEN)
sum(svcompthresh2[svcompthresh2$SVLEN< -50 & svcompthresh2$NumTechs>1,]$SVLEN)
sum(svcompthresh2[svcompthresh2$SVLEN>0 & (svcompthresh2$NumTechs>1 | svcompthresh2$NumClusterSVs>=4),]$SVLEN)
sum(svcompthresh2[svcompthresh2$SVLEN<0 & (svcompthresh2$NumTechs>1 | svcompthresh2$NumClusterSVs>=4),]$SVLEN)
sum(svcompthresh2[svcompthresh2$SVLEN>50 & (svcompthresh2$NumTechs>1 | svcompthresh2$NumClusterSVs>=4),]$SVLEN)
sum(svcompthresh2[svcompthresh2$SVLEN< -50 & (svcompthresh2$NumTechs>1 | svcompthresh2$NumClusterSVs>=4),]$SVLEN)
hist(log10(svcompthresh2$DistMin),breaks=(-1:200)/20,xlim=c(0,6))
hist(log10(svcompthresh2[svcompthresh2$SVLEN>0,]$DistMin),breaks=(-1:200)/20,xlim=c(0,6))
sum(svcompthresh2$DistMin>1000)
sum(svcompthresh2$DistMin>1000 & svcompthresh2$NumTechs>1)
sum(svcompthresh2$DistMin>1000 & svcompthresh2$NumTechsExact>1)
sum(svcompthresh2$DistMin>1000 & svcompthresh2$NumTechsExact>1 & svcompthresh2$SVLEN>0)
sum(svcompthresh2$DistMin>1000 & svcompthresh2$NumTechsExact>1 & svcompthresh2$SVLEN<0)
hist(svcompthresh2[svcompthresh2$NumTechs>1,]$ClusterMaxShiftDist,breaks=(-1:400)/200,xlim=c(0,.5))
hist(svcompthresh2[svcompthresh2$NumTechsExact>1,]$ClusterMaxShiftDist,breaks=(-1:400)/200,xlim=c(0,.5))
hist(log10(svcompthresh2[svcompthresh2$SVLEN>0,]$SVLEN),breaks=(-1:200)/20,xlim=c(1,5),main="All candidate insertions",xlab="log10(size)")
hist(log10(-svcompthresh2[svcompthresh2$SVLEN<0,]$SVLEN),breaks=(-1:200)/20,xlim=c(1,5),main="All candidate deletions",xlab="log10(size)")
hist(log10(svcompthresh2[svcompthresh2$SVLEN>0 & svcompthresh2$NumTechs>1,]$SVLEN),breaks=(-1:200)/20,xlim=c(1,5),main="Insertions supported by 2+ techs",xlab="log10(size)")
hist(log10(-svcompthresh2[svcompthresh2$SVLEN<0 & svcompthresh2$NumTechs>1,]$SVLEN),breaks=(-1:200)/20,xlim=c(1,5),main="Deletions supported by 2+ techs",xlab="log10(size)")
hist(log10(svcompthresh2[svcompthresh2$SVLEN>0 & svcompthresh2$NumTechsExact>1,]$SVLEN),breaks=(-1:200)/20,xlim=c(1,5),main="Insertions supported by 2+ techs",xlab="log10(size)")
hist(log10(-svcompthresh2[svcompthresh2$SVLEN<0 & svcompthresh2$NumTechsExact>1,]$SVLEN),breaks=(-1:200)/20,xlim=c(1,5),main="Deletions supported by 2+ techs",xlab="log10(size)")
hist(log10(svcompthresh2[svcompthresh2$SVLEN>0 & svcompthresh2$DistMin>1000 & svcompthresh2$NumTechsExact>1,]$SVLEN),breaks=(-1:200)/20,xlim=c(1,5),main="Insertions supported by 2+ techs",xlab="log10(size)")
hist(log10(-svcompthresh2[svcompthresh2$SVLEN<0 & svcompthresh2$DistMin>1000 & svcompthresh2$NumTechsExact>1,]$SVLEN),breaks=(-1:200)/20,xlim=c(1,5),main="Deletions supported by 2+ techs",xlab="log10(size)")
#display contingency table for support from each tech, with short reads as rows and long reads/linked reads as columns, only for high-confidence calls
contingencytable <- xtabs(~(svcompthresh2$PBcalls>0)+(svcompthresh2$Illcalls>0)+(svcompthresh2$TenXcalls>0)+(svcompthresh2$CGcalls>0), data=svcompthresh2)
ftable(contingencytable,row.vars=c(1,3))
svcompthresh2<-data.frame(svcompthresh2)
svcompthresh2$INFO<-""
colno<-ncol(svcompthresh2)
for (i in 8:(colno-1)) {
svcompthresh2$INFO<-paste0(svcompthresh2$INFO,colnames(svcompthresh2)[i],"=",svcompthresh2[,i],";")
}
svcompthresh2$INFO<-sub(";;$","",svcompthresh2$INFO)
svcompthresh2$FORMAT<-"."
svcompthresh2$AJTRIO<-"."
svcompthresh2<-data.table(svcompthresh2)
#output new vcf
write.table(svcompthresh2[,c("CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT","AJTRIO")],sep="\t",row.names=FALSE,col.names=FALSE,file="/Users/jzook/Documents/AJTrio/SVs/triounion_171212/union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.vcf",quote=FALSE)
# cat header.txt union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.vcf | /Applications/bioinfo/htslib-1.3/bgzip -c > union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.vcf.gz
# /Applications/bioinfo/htslib-1.3/tabix -f union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.vcf.gz
# /Applications/bioinfo/bcftools/bcftools norm -D -c x -f /Users/jzook/Documents/references/human_g1k_v37.fasta union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.vcf.gz | /Applications/bioinfo/htslib-1.3/bgzip -c > union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.norm.vcf.gz
# /Applications/bioinfo/htslib-1.3/tabix -f union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.norm.vcf.gz
# rm union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.norm.TR*
# /Applications/bioinfo/rtg-tools-3.7.1/rtg vcfannotate --bed-info AllRepeats_gt95percidentity_slop5.annotate.bed.gz -i union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.norm.vcf.gz -o union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.norm.TR.vcf.gz
# /Applications/bioinfo/rtg-tools-3.7.1/rtg vcfannotate --bed-info segdupall.annotate.bed.gz -i union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.norm.TR.vcf.gz -o svanalyzer_union_171212_v0.5.0.vcf.gz
# awk '$3-$2>10000' /Users/jzook/Downloads/segdups_selfchain_merged50.bed | awk '{FS=OFS="\t"} { print $1, $2-50, $3+50}' | awk '{FS=OFS="\t"} { if($2<0) $2=0; print}' > /Users/jzook/Downloads/segdups_selfchain_merged50_slop50_gt10k.bed
# awk '$3-$2>10000' /Users/jzook/Downloads/repeats_trfSimplerepLowcomplex_merged50.bed | awk '{FS=OFS="\t"} { print $1, $2-50, $3+50}' | awk '{FS=OFS="\t"} { if($2<0) $2=0; print}' > /Users/jzook/Downloads/repeats_trfSimplerepLowcomplex_merged50_slop50_gt10k.bed
# awk '$3-$2>100' /Users/jzook/Downloads/repeats_trfSimplerepLowcomplex_merged50.bed | awk '{FS=OFS="\t"} { print $1, $2-50, $3+50}' | awk '{FS=OFS="\t"} { if($2<0) $2=0; print}' > /Users/jzook/Downloads/repeats_trfSimplerepLowcomplex_merged50_slop50_gt100.bed
# awk '{FS=OFS="\t"} { print $1, $2-5, $3+5}' /Users/jzook/Downloads/repeats_trfSimplerepLowcomplex_merged50.bed | awk '{FS=OFS="\t"} { if($2<0) $2=0; print}' > /Users/jzook/Downloads/repeats_trfSimplerepLowcomplex_merged50_slop5.bed
# awk '{FS="\t";OFS="\t"} {print $1,$2,$2+length($4),$3}' union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.vcf > svanalyzer_union_171212_v0.5.0.bed
# /Applications/bioinfo/bedtools2.26.0/bin/annotateBed -i svanalyzer_union_171212_v0.5.0.bed -both -files /Users/jzook/Downloads/repeats_trfSimplerepLowcomplex_merged50_slop5.bed /Users/jzook/Downloads/repeats_trfSimplerepLowcomplex_merged50_slop50_gt100.bed /Users/jzook/Downloads/repeats_trfSimplerepLowcomplex_merged50_slop50_gt10k.bed /Users/jzook/Downloads/segdups_selfchain_merged50_slop50_gt10k.bed /Applications/bioinfo/nist-integration-v3.2.2/resources/example_of_no_ref_regions_input_file_b37.bed -names TRall TRgt100 TRgt10k segdup refNs > svanalyzer_union_171212_v0.5.0_TRall_TRgt100_TRgt10k_segdup_refN.bed
annbeds <- fread("/Users/jzook/Documents/AJTrio/SVs/triounion_171212/svanalyzer_union_171212_v0.5.0_TRall_TRgt100_TRgt10k_segdup_refN.bed", stringsAsFactors=FALSE, header = FALSE, sep = "\t")
colnames(annbeds) <- c("1","2","3","ID","TRall_cnt","TRall_pct","TRgt100_cnt","TRgt100_pct","TRgt10k_cnt","TRgt10k_pct","segdup_cnt", "segdup_pct", "refNs_cnt", "refNs_pct")
setkey(annbeds,"ID")
setkey(svcompthresh2,"ID")
svcompthresh2annbeds <- merge(svcompthresh2,annbeds,by="ID",all.x=TRUE,sort=FALSE)
svcompthresh2annbeds$TRall <- (svcompthresh2annbeds$TRall_pct>0.2)
svcompthresh2annbeds$TRgt100 <- (svcompthresh2annbeds$TRgt100_pct>0.2)
svcompthresh2annbeds$TRgt10k <- (svcompthresh2annbeds$TRgt10k_pct>0.2)
svcompthresh2annbeds$segdup <- (svcompthresh2annbeds$segdup_pct>0.2)
sum(svcompthresh2annbeds$TRall,na.rm=TRUE)
sum(svcompthresh2annbeds$TRgt100,na.rm=TRUE)
sum(svcompthresh2annbeds$TRgt10k,na.rm=TRUE)
sum(svcompthresh2annbeds$segdup,na.rm=TRUE)
#output vcf with only 2+ techs or 4+ callsets for svviz eval
write.table(svcompthresh2[(svcompthresh2$NumTechs>1 | svcompthresh2$NumClusterSVs>=4),c("CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT","AJTRIO")][order(CHROM,POS)],sep="\t",row.names=FALSE,col.names=FALSE,file="/Users/jzook/Documents/AJTrio/SVs/triounion_171212/union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.2techor4callset.vcf",quote=FALSE)
sum((svcompthresh2$NumTechs>1 | svcompthresh2$NumClusterSVs>=4))
# cat header.txt union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.2techor5caller.vcf | /Applications/bioinfo/htslib-1.3/bgzip -c > union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.2techor5caller.vcf.gz
# /Applications/bioinfo/htslib-1.3/tabix -f union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.2techor5caller.vcf.gz
#add bionano comparison performed by Alex Hastie and Joyce Lee
#/Applications/bioinfo/bedtools2.26.0/bin/intersectBed -wa -wb -a BNG_overlap_NIST_UnionsSV_v0.3.0/GM24385_HG2/Seq_based/GM24385_HG2_seq_overlap_deletion.bed -b BNG_overlap_NIST_UnionsSV_v0.3.0/GM24385_HG2/BNG_based/GM24385_HG2_BNG_overlap_deletion.bed > BNG_overlap_NIST_UnionsSV_v0.3.0/GM24385_HG2/GM24385_HG2_seq_BNG_overlap_deletion.bed
#/Applications/bioinfo/bedtools2.26.0/bin/intersectBed -wa -wb -a BNG_overlap_NIST_UnionsSV_v0.3.0/GM24385_HG2/Seq_based/GM24385_HG2_seq_overlap_insertion.bed -b BNG_overlap_NIST_UnionsSV_v0.3.0/GM24385_HG2/BNG_based/GM24385_HG2_BNG_overlap_insertion.bed > BNG_overlap_NIST_UnionsSV_v0.3.0/GM24385_HG2/GM24385_HG2_seq_BNG_overlap_insertion.bed
# cat BNG_overlap_NIST_UnionsSV_v0.3.0/GM24385_HG2/BNG_based/GM24385_HG2_BNG_overlap_deletion.bed BNG_overlap_NIST_UnionsSV_v0.3.0/GM24385_HG2/BNG_based/GM24385_HG2_BNG_uniq_deletion.bed | sed 's/^X/23/;s/^Y/24/' | sort -k1,1n -k2,2n -k3,3n | sed 's/^23/X/;s/^24/Y/' > BNG_overlap_NIST_UnionsSV_v0.3.0/GM24385_HG2/BNG_based/GM24385_HG2_BNG_deletion.bed
#sed 's/_/***tab****/g' BNG_overlap_NIST_UnionsSV_v0.3.0/GM24385_HG2/BNG_based/GM24385_HG2_BNG_deletion.bed > BNG_overlap_NIST_UnionsSV_v0.3.0/GM24385_HG2/BNG_based/GM24385_HG2_BNG_deletion_sep.bed
#awk '$4>0.9' BNG_overlap_NIST_UnionsSV_v0.3.0/GM24385_HG2/BNG_based/GM24385_HG2_BNG_deletion_sep.bed | /Applications/bioinfo/bedtools2.26.0/bin/mergeBed -c 4,5,6,7,8 -o max,collapse,collapse,distinct,max -i stdin > BNG_overlap_NIST_UnionsSV_v0.3.0/GM24385_HG2/BNG_based/GM24385_HG2_BNG_deletion_sep_mergedmaxsize.bed
#/Applications/bioinfo/bedtools2.26.0/bin/intersectBed -wa -wb -a svanalyzer_union_171212_v0.5.0.bed -b ../triounion_170313/BNG_overlap_NIST_UnionsSV_v0.3.0/GM24385_HG2/BNG_based/GM24385_HG2_BNG_deletion_sep_mergedmaxsize.bed > svanalyzer_union_171212_v0.5.0_HG2_BNG_overlap_deletion.bed
# cat BNG_overlap_NIST_UnionsSV_v0.3.0/GM24385_HG2/BNG_based/GM24385_HG2_BNG_overlap_insertion.bed BNG_overlap_NIST_UnionsSV_v0.3.0/GM24385_HG2/BNG_based/GM24385_HG2_BNG_uniq_insertion.bed | sed 's/^X/23/;s/^Y/24/' | sort -k1,1n -k2,2n -k3,3n | sed 's/^23/X/;s/^24/Y/' > BNG_overlap_NIST_UnionsSV_v0.3.0/GM24385_HG2/BNG_based/GM24385_HG2_BNG_insertion.bed
#sed 's/_/***tab****/g' BNG_overlap_NIST_UnionsSV_v0.3.0/GM24385_HG2/BNG_based/GM24385_HG2_BNG_insertion.bed > BNG_overlap_NIST_UnionsSV_v0.3.0/GM24385_HG2/BNG_based/GM24385_HG2_BNG_insertion_sep.bed
#awk '$4>0.9' BNG_overlap_NIST_UnionsSV_v0.3.0/GM24385_HG2/BNG_based/GM24385_HG2_BNG_insertion_sep.bed | /Applications/bioinfo/bedtools2.26.0/bin/mergeBed -c 4,5,6,7,8 -o max,collapse,collapse,distinct,max -i stdin > BNG_overlap_NIST_UnionsSV_v0.3.0/GM24385_HG2/BNG_based/GM24385_HG2_BNG_insertion_sep_mergedmaxsize.bed
#/Applications/bioinfo/bedtools2.26.0/bin/intersectBed -wa -wb -a svanalyzer_union_171212_v0.5.0.bed -b ../triounion_170313/BNG_overlap_NIST_UnionsSV_v0.3.0/GM24385_HG2/BNG_based/GM24385_HG2_BNG_insertion_sep_mergedmaxsize.bed > svanalyzer_union_171212_v0.5.0_HG2_BNG_overlap_insertion.bed
# cat BNG_overlap_NIST_UnionsSV_v0.3.0/GM24149_HG3/BNG_based/GM24149_HG3_BNG_overlap_deletion.bed BNG_overlap_NIST_UnionsSV_v0.3.0/GM24149_HG3/BNG_based/GM24149_HG3_BNG_uniq_deletion.bed | sed 's/^X/23/;s/^Y/24/' | sort -k1,1n -k2,2n -k3,3n | sed 's/^23/X/;s/^24/Y/' > BNG_overlap_NIST_UnionsSV_v0.3.0/GM24149_HG3/BNG_based/GM24149_HG3_BNG_deletion.bed
#sed 's/_/***tab****/g' BNG_overlap_NIST_UnionsSV_v0.3.0/GM24149_HG3/BNG_based/GM24149_HG3_BNG_deletion.bed > BNG_overlap_NIST_UnionsSV_v0.3.0/GM24149_HG3/BNG_based/GM24149_HG3_BNG_deletion_sep.bed
#awk '$4>0.9' BNG_overlap_NIST_UnionsSV_v0.3.0/GM24149_HG3/BNG_based/GM24149_HG3_BNG_deletion_sep.bed | /Applications/bioinfo/bedtools2.26.0/bin/mergeBed -c 4,5,6,7,8 -o max,collapse,collapse,distinct,max -i stdin > BNG_overlap_NIST_UnionsSV_v0.3.0/GM24149_HG3/BNG_based/GM24149_HG3_BNG_deletion_sep_mergedmaxsize.bed
#/Applications/bioinfo/bedtools2.26.0/bin/intersectBed -wa -wb -a svanalyzer_union_171212_v0.5.0.bed -b ../triounion_170313/BNG_overlap_NIST_UnionsSV_v0.3.0/GM24149_HG3/BNG_based/GM24149_HG3_BNG_deletion_sep_mergedmaxsize.bed > svanalyzer_union_171212_v0.5.0_HG3_BNG_overlap_deletion.bed
# cat BNG_overlap_NIST_UnionsSV_v0.3.0/GM24149_HG3/BNG_based/GM24149_HG3_BNG_overlap_insertion.bed BNG_overlap_NIST_UnionsSV_v0.3.0/GM24149_HG3/BNG_based/GM24149_HG3_BNG_uniq_insertion.bed | sed 's/^X/23/;s/^Y/24/' | sort -k1,1n -k2,2n -k3,3n | sed 's/^23/X/;s/^24/Y/' > BNG_overlap_NIST_UnionsSV_v0.3.0/GM24149_HG3/BNG_based/GM24149_HG3_BNG_insertion.bed
#sed 's/_/***tab****/g' BNG_overlap_NIST_UnionsSV_v0.3.0/GM24149_HG3/BNG_based/GM24149_HG3_BNG_insertion.bed > BNG_overlap_NIST_UnionsSV_v0.3.0/GM24149_HG3/BNG_based/GM24149_HG3_BNG_insertion_sep.bed
# awk '$4>0.9' BNG_overlap_NIST_UnionsSV_v0.3.0/GM24149_HG3/BNG_based/GM24149_HG3_BNG_insertion_sep.bed | /Applications/bioinfo/bedtools2.26.0/bin/mergeBed -c 4,5,6,7,8 -o max,collapse,collapse,distinct,max -i stdin > BNG_overlap_NIST_UnionsSV_v0.3.0/GM24149_HG3/BNG_based/GM24149_HG3_BNG_insertion_sep_mergedmaxsize.bed
# /Applications/bioinfo/bedtools2.26.0/bin/intersectBed -wa -wb -a svanalyzer_union_171212_v0.5.0.bed -b ../triounion_170313/BNG_overlap_NIST_UnionsSV_v0.3.0/GM24149_HG3/BNG_based/GM24149_HG3_BNG_insertion_sep_mergedmaxsize.bed > svanalyzer_union_171212_v0.5.0_HG3_BNG_overlap_insertion.bed
# cat BNG_overlap_NIST_UnionsSV_v0.3.0/GM24143_HG4/BNG_based/GM24143_HG4_BNG_overlap_deletion.bed BNG_overlap_NIST_UnionsSV_v0.3.0/GM24143_HG4/BNG_based/GM24143_HG4_BNG_uniq_deletion.bed | sed 's/^X/23/;s/^Y/24/' | sort -k1,1n -k2,2n -k3,3n | sed 's/^23/X/;s/^24/Y/' > BNG_overlap_NIST_UnionsSV_v0.3.0/GM24143_HG4/BNG_based/GM24143_HG4_BNG_deletion.bed
# sed 's/_/***tab****/g' BNG_overlap_NIST_UnionsSV_v0.3.0/GM24143_HG4/BNG_based/GM24143_HG4_BNG_deletion.bed > BNG_overlap_NIST_UnionsSV_v0.3.0/GM24143_HG4/BNG_based/GM24143_HG4_BNG_deletion_sep.bed
# awk '$4>0.9' BNG_overlap_NIST_UnionsSV_v0.3.0/GM24143_HG4/BNG_based/GM24143_HG4_BNG_deletion_sep.bed | /Applications/bioinfo/bedtools2.26.0/bin/mergeBed -c 4,5,6,7,8 -o max,collapse,collapse,distinct,max -i stdin > BNG_overlap_NIST_UnionsSV_v0.3.0/GM24143_HG4/BNG_based/GM24143_HG4_BNG_deletion_sep_mergedmaxsize.bed
# /Applications/bioinfo/bedtools2.26.0/bin/intersectBed -wa -wb -a svanalyzer_union_171212_v0.5.0.bed -b ../triounion_170313/BNG_overlap_NIST_UnionsSV_v0.3.0/GM24143_HG4/BNG_based/GM24143_HG4_BNG_deletion_sep_mergedmaxsize.bed > svanalyzer_union_171212_v0.5.0_HG4_BNG_overlap_deletion.bed
# cat BNG_overlap_NIST_UnionsSV_v0.3.0/GM24143_HG4/BNG_based/GM24143_HG4_BNG_overlap_insertion.bed BNG_overlap_NIST_UnionsSV_v0.3.0/GM24143_HG4/BNG_based/GM24143_HG4_BNG_uniq_insertion.bed | sed 's/^X/23/;s/^Y/24/' | sort -k1,1n -k2,2n -k3,3n | sed 's/^23/X/;s/^24/Y/' > BNG_overlap_NIST_UnionsSV_v0.3.0/GM24143_HG4/BNG_based/GM24143_HG4_BNG_insertion.bed
# sed 's/_/***tab****/g' BNG_overlap_NIST_UnionsSV_v0.3.0/GM24143_HG4/BNG_based/GM24143_HG4_BNG_insertion.bed > BNG_overlap_NIST_UnionsSV_v0.3.0/GM24143_HG4/BNG_based/GM24143_HG4_BNG_insertion_sep.bed
# awk '$4>0.9' BNG_overlap_NIST_UnionsSV_v0.3.0/GM24143_HG4/BNG_based/GM24143_HG4_BNG_insertion_sep.bed | /Applications/bioinfo/bedtools2.26.0/bin/mergeBed -c 4,5,6,7,8 -o max,collapse,collapse,distinct,max -i stdin > BNG_overlap_NIST_UnionsSV_v0.3.0/GM24143_HG4/BNG_based/GM24143_HG4_BNG_insertion_sep_mergedmaxsize.bed
# /Applications/bioinfo/bedtools2.26.0/bin/intersectBed -wa -wb -a svanalyzer_union_171212_v0.5.0.bed -b ../triounion_170313/BNG_overlap_NIST_UnionsSV_v0.3.0/GM24143_HG4/BNG_based/GM24143_HG4_BNG_insertion_sep_mergedmaxsize.bed > svanalyzer_union_171212_v0.5.0_HG4_BNG_overlap_insertion.bed
#make bed for svrefine
# cat BNG_overlap_NIST_UnionsSV_v0.3.0/GM24385_HG2/BNG_based/GM24385_HG2_BNG_uniq_deletion.bed BNG_overlap_NIST_UnionsSV_v0.3.0/GM24385_HG2/BNG_based/GM24385_HG2_BNG_overlap_deletion.bed BNG_overlap_NIST_UnionsSV_v0.3.0/GM24385_HG2/BNG_based/GM24385_HG2_BNG_uniq_insertion.bed BNG_overlap_NIST_UnionsSV_v0.3.0/GM24385_HG2/BNG_based/GM24385_HG2_BNG_overlap_insertion.bed BNG_overlap_NIST_UnionsSV_v0.3.0/GM24149_HG3/BNG_based/GM24149_HG3_BNG_uniq_deletion.bed BNG_overlap_NIST_UnionsSV_v0.3.0/GM24149_HG3/BNG_based/GM24149_HG3_BNG_overlap_deletion.bed BNG_overlap_NIST_UnionsSV_v0.3.0/GM24149_HG3/BNG_based/GM24149_HG3_BNG_uniq_insertion.bed BNG_overlap_NIST_UnionsSV_v0.3.0/GM24149_HG3/BNG_based/GM24149_HG3_BNG_overlap_insertion.bed BNG_overlap_NIST_UnionsSV_v0.3.0/GM24143_HG4/BNG_based/GM24143_HG4_BNG_uniq_deletion.bed BNG_overlap_NIST_UnionsSV_v0.3.0/GM24143_HG4/BNG_based/GM24143_HG4_BNG_overlap_deletion.bed BNG_overlap_NIST_UnionsSV_v0.3.0/GM24143_HG4/BNG_based/GM24143_HG4_BNG_uniq_insertion.bed BNG_overlap_NIST_UnionsSV_v0.3.0/GM24143_HG4/BNG_based/GM24143_HG4_BNG_overlap_insertion.bed | sed 's/^X/23/;s/^Y/24/' | sort -k1,1n -k2,2n -k3,3n | sed 's/^23/X/;s/^24/Y/' > BNG_overlap_NIST_UnionsSV_v0.3.0/AJTrio_BNGbased_union.bed
#bionanoHG2del<-fread("/Users/jzook/Documents/AJTrio/SVs/triounion_171212/BNG_overlap_NIST_UnionsSV_v0.3.0/HG2BNG_overlap_all_seq/GM24385_overlap_all_seq_del.bed", stringsAsFactors=FALSE, header = FALSE, sep = "\t")
#bionanoHG2ins<-fread("/Users/jzook/Documents/AJTrio/SVs/triounion_171212/BNG_overlap_NIST_UnionsSV_v0.3.0/HG2BNG_overlap_all_seq/GM24385_overlap_all_seq_ins.bed", stringsAsFactors=FALSE, header = FALSE, sep = "\t")
bionanoHG2del<-fread("/Users/jzook/Documents/AJTrio/SVs/triounion_171212/svanalyzer_union_171212_v0.5.0_HG2_BNG_overlap_deletion.bed", stringsAsFactors=FALSE, header = FALSE, sep = "\t")
bionanoHG2ins<-fread("/Users/jzook/Documents/AJTrio/SVs/triounion_171212/svanalyzer_union_171212_v0.5.0_HG2_BNG_overlap_insertion.bed", stringsAsFactors=FALSE, header = FALSE, sep = "\t")
colnames(bionanoHG2del)<-c("SEQ_CHROM","SEQ_START","SEQ_END","ID","BNG_CHROM_HG2DEL","BNG_START_HG2DEL","BNG_END_HG2DEL","BNG_QUAL_MAX_HG2DEL","BspQIIDs_HG2DEL","BssSIIDs_HG2DEL","BNG_SVTYPE_HG2DEL","BNG_LEN_HG2DEL")
colnames(bionanoHG2ins)<-c("SEQ_CHROM","SEQ_START","SEQ_END","ID","BNG_CHROM_HG2INS","BNG_START_HG2INS","BNG_END_HG2INS","BNG_QUAL_MAX_HG2INS","BspQIIDs_HG2INS","BssSIIDs_HG2INS","BNG_SVTYPE_HG2INS","BNG_LEN_HG2INS")
bionanoHG3del<-fread("/Users/jzook/Documents/AJTrio/SVs/triounion_171212/svanalyzer_union_171212_v0.5.0_HG3_BNG_overlap_deletion.bed", stringsAsFactors=FALSE, header = FALSE, sep = "\t")
bionanoHG3ins<-fread("/Users/jzook/Documents/AJTrio/SVs/triounion_171212/svanalyzer_union_171212_v0.5.0_HG3_BNG_overlap_insertion.bed", stringsAsFactors=FALSE, header = FALSE, sep = "\t")
colnames(bionanoHG3del)<-c("SEQ_CHROM","SEQ_START","SEQ_END","ID","BNG_CHROM_HG3DEL","BNG_START_HG3DEL","BNG_END_HG3DEL","BNG_QUAL_MAX_HG3DEL","BspQIIDs_HG3DEL","BssSIIDs_HG3DEL","BNG_SVTYPE_HG3DEL","BNG_LEN_HG3DEL")
colnames(bionanoHG3ins)<-c("SEQ_CHROM","SEQ_START","SEQ_END","ID","BNG_CHROM_HG3INS","BNG_START_HG3INS","BNG_END_HG3INS","BNG_QUAL_MAX_HG3INS","BspQIIDs_HG3INS","BssSIIDs_HG3INS","BNG_SVTYPE_HG3INS","BNG_LEN_HG3INS")
bionanoHG4del<-fread("/Users/jzook/Documents/AJTrio/SVs/triounion_171212/svanalyzer_union_171212_v0.5.0_HG4_BNG_overlap_deletion.bed", stringsAsFactors=FALSE, header = FALSE, sep = "\t")
bionanoHG4ins<-fread("/Users/jzook/Documents/AJTrio/SVs/triounion_171212/svanalyzer_union_171212_v0.5.0_HG4_BNG_overlap_insertion.bed", stringsAsFactors=FALSE, header = FALSE, sep = "\t")
colnames(bionanoHG4del)<-c("SEQ_CHROM","SEQ_START","SEQ_END","ID","BNG_CHROM_HG4DEL","BNG_START_HG4DEL","BNG_END_HG4DEL","BNG_QUAL_MAX_HG4DEL","BspQIIDs_HG4DEL","BssSIIDs_HG4DEL","BNG_SVTYPE_HG4DEL","BNG_LEN_HG4DEL")
colnames(bionanoHG4ins)<-c("SEQ_CHROM","SEQ_START","SEQ_END","ID","BNG_CHROM_HG4INS","BNG_START_HG4INS","BNG_END_HG4INS","BNG_QUAL_MAX_HG4INS","BspQIIDs_HG4INS","BssSIIDs_HG4INS","BNG_SVTYPE_HG4INS","BNG_LEN_HG4INS")
bionanomatch<-merge(merge(merge(merge(merge(bionanoHG2del,bionanoHG2ins,by=c("SEQ_CHROM","SEQ_START","SEQ_END","ID"),all=TRUE,sort=FALSE),bionanoHG3del,by=c("SEQ_CHROM","SEQ_START","SEQ_END","ID"),all=TRUE,sort=FALSE),bionanoHG3ins,by=c("SEQ_CHROM","SEQ_START","SEQ_END","ID"),all=TRUE,sort=FALSE),bionanoHG4del,by=c("SEQ_CHROM","SEQ_START","SEQ_END","ID"),all=TRUE,sort=FALSE),bionanoHG4ins,by=c("SEQ_CHROM","SEQ_START","SEQ_END","ID"),all=TRUE,sort=FALSE)
setkey(bionanomatch, "ID")
svcompthresh2bionano <- merge(svcompthresh2annbeds,bionanomatch,by="ID",all.x=TRUE,sort=FALSE)
svcompthresh2bionano$BNGLENHG2DELvsSEQLEN <- 1-(svcompthresh2bionano$BNG_LEN_HG2DEL/-svcompthresh2bionano$SVLEN)
svcompthresh2bionano$BNGLENHG2INSvsSEQLEN <- 1-(svcompthresh2bionano$BNG_LEN_HG2INS/svcompthresh2bionano$SVLEN)
svcompthresh2bionano$BNGLENHG2DELdiffSEQLEN <- log10(abs((svcompthresh2bionano$BNG_LEN_HG2DEL + svcompthresh2bionano$SVLEN)))
svcompthresh2bionano$BNGLENHG2INSdiffSEQLEN <- log10(abs((svcompthresh2bionano$BNG_LEN_HG2INS - svcompthresh2bionano$SVLEN)))
plot(log10(svcompthresh2bionano$BNG_LEN_HG2DEL),log10(-svcompthresh2bionano$SVLEN))
plot(log10(svcompthresh2bionano$BNG_LEN_HG2INS),log10(svcompthresh2bionano$SVLEN))
svcompthresh2bionano$BNGLENHG3DELvsSEQLEN <- 1-(svcompthresh2bionano$BNG_LEN_HG3DEL/-svcompthresh2bionano$SVLEN)
svcompthresh2bionano$BNGLENHG3INSvsSEQLEN <- 1-(svcompthresh2bionano$BNG_LEN_HG3INS/svcompthresh2bionano$SVLEN)
svcompthresh2bionano$BNGLENHG3DELdiffSEQLEN <- log10(abs((svcompthresh2bionano$BNG_LEN_HG3DEL + svcompthresh2bionano$SVLEN)))
svcompthresh2bionano$BNGLENHG3INSdiffSEQLEN <- log10(abs((svcompthresh2bionano$BNG_LEN_HG3INS - svcompthresh2bionano$SVLEN)))
plot(log10(svcompthresh2bionano$BNG_LEN_HG3DEL),log10(-svcompthresh2bionano$SVLEN))
plot(log10(svcompthresh2bionano$BNG_LEN_HG3INS),log10(svcompthresh2bionano$SVLEN))
svcompthresh2bionano$BNGLENHG4DELvsSEQLEN <- 1-(svcompthresh2bionano$BNG_LEN_HG4DEL/-svcompthresh2bionano$SVLEN)
svcompthresh2bionano$BNGLENHG4INSvsSEQLEN <- 1-(svcompthresh2bionano$BNG_LEN_HG4INS/svcompthresh2bionano$SVLEN)
svcompthresh2bionano$BNGLENHG4DELdiffSEQLEN <- log10(abs((svcompthresh2bionano$BNG_LEN_HG4DEL + svcompthresh2bionano$SVLEN)))
svcompthresh2bionano$BNGLENHG4INSdiffSEQLEN <- log10(abs((svcompthresh2bionano$BNG_LEN_HG4INS - svcompthresh2bionano$SVLEN)))
plot(log10(svcompthresh2bionano$BNG_LEN_HG4DEL),log10(-svcompthresh2bionano$SVLEN))
plot(log10(svcompthresh2bionano$BNG_LEN_HG4INS),log10(svcompthresh2bionano$SVLEN))
contingencytable <- xtabs(~(svcompthresh2[abs(svcompthresh2$SVLEN)>999,]$PBcalls>0)+(svcompthresh2[abs(svcompthresh2$SVLEN)>999,]$Illcalls>0)+(svcompthresh2[abs(svcompthresh2$SVLEN)>999,]$TenXcalls>0)+(svcompthresh2[abs(svcompthresh2$SVLEN)>999,]$CGcalls>0), data=svcompthresh2)
ftable(contingencytable,row.vars=c(1,3))
contingencytable <- xtabs(~(svcompthresh2bionano$PBcalls>0)+(svcompthresh2bionano$Illcalls>0)+(svcompthresh2bionano$TenXcalls>0)+(svcompthresh2bionano$CGcalls>0)+(svcompthresh2bionano$BNGLENHG2DELdiffSEQLEN<log10(300)), data=svcompthresh2bionano)
ftable(contingencytable,row.vars=c(1,3,5))
contingencytable <- xtabs(~(svcompthresh2bionano$PBcalls>0)+(svcompthresh2bionano$Illcalls>0)+(svcompthresh2bionano$TenXcalls>0)+(svcompthresh2bionano$CGcalls>0)+(svcompthresh2bionano$BNGLENHG2INSdiffSEQLEN<log10(300)), data=svcompthresh2bionano)
ftable(contingencytable,row.vars=c(1,3,5))
sum(svcompthresh2$NumTechs>1 & svcompthresh2$SVLEN>999)
sum(svcompthresh2$NumTechs>1 & svcompthresh2$SVLEN < -999)
sum(svcompthresh2bionano$NumTechs==1 & svcompthresh2bionano$NumClusterSVs > 2 & svcompthresh2bionano$SVLEN>999)
sum(svcompthresh2bionano$NumTechs==1 & svcompthresh2bionano$NumClusterSVs > 2 & svcompthresh2bionano$SVLEN < -999)
length(unique(svcompthresh2bionano[svcompthresh2bionano$NumTechs==1 & svcompthresh2bionano$NumClusterSVs > 2 & svcompthresh2bionano$SVLEN>999,]$ID))
length(unique(svcompthresh2bionano[svcompthresh2bionano$NumTechs==1 & svcompthresh2bionano$NumClusterSVs > 2 & svcompthresh2bionano$SVLEN>999,]$BNG_QUAL_BspQIID_BssSIID_SVTYPE))
#dedup BNG INS confirm
length(unique(svcompthresh2bionano[svcompthresh2bionano$NumTechs==1 & svcompthresh2bionano$NumClusterSVs > 2 & svcompthresh2bionano$SVLEN>999,]$BNG_QUAL_BspQIID_BssSIID_SVTYPE))-sum(svcompthresh2bionano$NumTechs==1 & svcompthresh2bionano$NumClusterSVs > 2 & svcompthresh2bionano$SVLEN>999)+length(unique(svcompthresh2bionano[svcompthresh2bionano$NumTechs==1 & svcompthresh2bionano$NumClusterSVs > 2 & svcompthresh2bionano$SVLEN>999,]$ID))
#dedup BNG DEL confirm
length(unique(svcompthresh2bionano[svcompthresh2bionano$NumTechs==1 & svcompthresh2bionano$NumClusterSVs > 2 & svcompthresh2bionano$SVLEN< -999,]$BNG_QUAL_BspQIID_BssSIID_SVTYPE))-sum(svcompthresh2bionano$NumTechs==1 & svcompthresh2bionano$NumClusterSVs > 2 & svcompthresh2bionano$SVLEN< -999)+length(unique(svcompthresh2bionano[svcompthresh2bionano$NumTechs==1 & svcompthresh2bionano$NumClusterSVs > 2 & svcompthresh2bionano$SVLEN< -999,]$ID))
hist(log10(-svcompthresh2bionano[svcompthresh2bionano$NumTechs==1 & svcompthresh2bionano$BNGLENdiffSEQLEN<log10(300) & svcompthresh2bionano$NumClusterSVs > 2,]$SVLEN))
hist(log10(svcompthresh2bionano[svcompthresh2bionano$NumTechs==1 & svcompthresh2bionano$BNGLENdiffSEQLEN<log10(300) & svcompthresh2bionano$NumClusterSVs > 2,]$SVLEN))
sum(svcompthresh2bionano$NumTechs==1 & svcompthresh2bionano$BNGLENdiffSEQLEN<log10(300) & svcompthresh2bionano$NumClusterSVs > 2 & svcompthresh2bionano$SVLEN>999,na.rm =TRUE)
sum(svcompthresh2bionano$NumTechs==1 & svcompthresh2bionano$BNGLENdiffSEQLEN<log10(300) & svcompthresh2bionano$NumClusterSVs > 2 & svcompthresh2bionano$SVLEN < -999,na.rm =TRUE)
length(unique(svcompthresh2bionano[svcompthresh2bionano$NumTechs>1 & svcompthresh2bionano$HG2count>0 & svcompthresh2bionano$BNGLENdiffSEQLEN<log10(300) & svcompthresh2bionano$SVLEN>999,]$ID))
length(unique(svcompthresh2bionano[svcompthresh2bionano$NumTechs>1 & svcompthresh2bionano$HG2count>0 & svcompthresh2bionano$BNGLENdiffSEQLEN<log10(300) & svcompthresh2bionano$SVLEN < -999,]$ID))
length(unique(svcompthresh2bionano[svcompthresh2bionano$NumTechs>1 & svcompthresh2bionano$HG2count>0 & svcompthresh2bionano$BNGLENdiffSEQLEN>=log10(300) & svcompthresh2bionano$SVLEN>999,]$ID))
length(unique(svcompthresh2bionano[svcompthresh2bionano$NumTechs>1 & svcompthresh2bionano$HG2count>0 & svcompthresh2bionano$BNGLENdiffSEQLEN>=log10(300) & svcompthresh2bionano$SVLEN < -999,]$ID))
sum(svcompthresh2bionano$NumTechs>1 & svcompthresh2bionano$HG2count>0 & is.na(svcompthresh2bionano$BNGLENdiffSEQLEN) & svcompthresh2bionano$SVLEN>999,na.rm =TRUE)
sum(svcompthresh2bionano$NumTechs>1 & svcompthresh2bionano$HG2count>0 & is.na(svcompthresh2bionano$BNGLENdiffSEQLEN) & svcompthresh2bionano$SVLEN < -999,na.rm =TRUE)
test<- svcompthresh2bionano[svcompthresh2bionano$NumTechs>1 & svcompthresh2bionano$HG2count>0 & is.na(svcompthresh2bionano$BNGLENHG2DELdiffSEQLEN) & svcompthresh2bionano$SVLEN< -999,]
test<- svcompthresh2bionano[svcompthresh2bionano$NumTechs>1 & svcompthresh2bionano$HG2count>0 & is.na(svcompthresh2bionano$BNGLENHG2INSdiffSEQLEN) & svcompthresh2bionano$SVLEN> 999,]
#add Nabsys HG2 deletion evaluations
nabsysHG2del<-fread("/Users/jzook/Documents/AJTrio/SVs/triounion_170313/nabsys/ALLTN_Results_sizing.bed", stringsAsFactors=FALSE, header = TRUE, sep = "\t")
setkey(nabsysHG2del, "CustomerSVID")
svcompthresh2bionanonabsys <- unique(merge(svcompthresh2bionano,nabsysHG2del,all.x=TRUE,sort=FALSE,by.x="ID",by.y="CustomerSVID"),by="ID")
svcompthresh2bionanonabsys$svm_score<- (as.numeric(as.character(svcompthresh2bionanonabsys$svm_score)))
svcompthresh2bionanonabsys$MeasDelSize<- (as.numeric(as.character(svcompthresh2bionanonabsys$MeasDelSize)))
svcompthresh2bionanonabsys$NABSYSLENdiffSEQLEN <- log10(abs((svcompthresh2bionanonabsys$MeasDelSize + svcompthresh2bionanonabsys$SVLEN)))
svcompthresh2bionanonabsys$noHG2 <- (svcompthresh2bionanonabsys$HG2count==0 & svcompthresh2bionanonabsys$DistMinlt1000==FALSE)
plot(log10(svcompthresh2bionanonabsys$MeasDelSize),log10(-(svcompthresh2bionanonabsys$SVLEN)))
ggplot(svcompthresh2bionanonabsys, aes(x=NABSYSLENdiffSEQLEN)) + geom_histogram(binwidth=0.1,aes(colour = factor(MultiTech))) + facet_grid( noHG2 ~ MultiTech)
#nrow(duplicated(svcompthresh2bionanonabsys, by="ID"))
#nrow(unique(svcompthresh2bionanonabsys, by=c("ID")))
ggplot(svcompthresh2bionanonabsys, aes(x=BNGLENHG2DELdiffSEQLEN)) + geom_histogram(binwidth=0.1,aes(colour = factor(MultiTech))) + facet_grid( noHG2 ~ MultiTech)
ggplot(svcompthresh2bionanonabsys[!is.na(svcompthresh2bionanonabsys$BNG_LEN_HG2DEL),], aes(x=log10(BNG_LEN_HG2DEL),y=log10(-SVLEN))) + geom_point(aes(colour = factor(MultiTech)),alpha = 1/10) + facet_grid( HG2count ~ MultiTech)
ggplot(svcompthresh2bionanonabsys[!is.na(svcompthresh2bionanonabsys$BNG_LEN_HG2DEL) & svcompthresh2bionanonabsys$HG2count<10,], aes(x=log10(BNG_LEN_HG2DEL),y=log10(-SVLEN))) + geom_point(aes(colour = factor(MultiTech)),alpha = 1/10) + facet_grid( HG2count ~ MultiTech)
ggplot(svcompthresh2bionanonabsys, aes(x=BNGLENHG2INSdiffSEQLEN)) + geom_histogram(binwidth=0.1,aes(colour = factor(MultiTech))) + facet_grid( noHG2 ~ MultiTech)
ggplot(svcompthresh2bionanonabsys[!is.na(svcompthresh2bionanonabsys$BNG_LEN_HG2INS),], aes(x=log10(BNG_LEN_HG2INS),y=log10(SVLEN))) + geom_point(aes(colour = factor(MultiTech)),alpha = 1/10) + facet_grid( HG2count ~ MultiTech)
ggplot(svcompthresh2bionanonabsys[!is.na(svcompthresh2bionanonabsys$BNG_LEN_HG2INS) & svcompthresh2bionanonabsys$HG2count<10,], aes(x=log10(BNG_LEN_HG2INS),y=log10(SVLEN))) + geom_point(aes(colour = factor(MultiTech)),alpha = 1/20) + facet_grid( HG2count ~ MultiTech + TR)
ggplot(svcompthresh2bionanonabsys[!is.na(svcompthresh2bionanonabsys$BNG_LEN_HG2DEL),], aes(x=log10(BNG_LEN_HG2DEL),y=log10(abs(SVLEN)))) + geom_point(aes(colour = factor(MultiTech)),alpha = 1/10) + facet_grid( SVTYPE ~ MultiTech)
ggplot(svcompthresh2bionanonabsys[!is.na(svcompthresh2bionanonabsys$BNG_LEN_HG2INS),], aes(x=log10(BNG_LEN_HG2INS),y=log10(abs(SVLEN)))) + geom_point(aes(colour = factor(MultiTech)),alpha = 1/10) + facet_grid( SVTYPE ~ MultiTech)
ggplot(svcompthresh2bionanonabsys[(svcompthresh2bionano$BNGLENHG2INSdiffSEQLEN<log10(300) | svcompthresh2bionano$BNGLENHG2DELdiffSEQLEN<log10(300)) & abs(svcompthresh2bionanonabsys$SVLEN)>100,], aes(x=log10(abs(SVLEN)))) + geom_histogram(binwidth=0.1,aes(colour = factor(MultiTech))) + facet_grid( SVTYPE ~ MultiTech + TR)
sum(svcompthresh2bionanonabsys$NumTechs>1 | svcompthresh2bionanonabsys$NumClusterSVs>=4)
sum(svcompthresh2bionanonabsys$NumTechs>1 | svcompthresh2bionanonabsys$HG2count>=5 | svcompthresh2bionanonabsys$HG3count>=5 | svcompthresh2bionanonabsys$HG4count>=5 | svcompthresh2bionano$BNGLENHG2INSdiffSEQLEN<log10(300) | svcompthresh2bionano$BNGLENHG2DELdiffSEQLEN<log10(300),na.rm=TRUE)
sum(svcompthresh2bionanonabsys$NumTechs>1 | svcompthresh2bionanonabsys$HG2count>=5 | svcompthresh2bionanonabsys$HG3count>=5 | svcompthresh2bionanonabsys$HG4count>=5 | svcompthresh2bionano$BNGLENHG2INSdiffSEQLEN<log10(300) | svcompthresh2bionano$BNGLENHG2DELdiffSEQLEN<log10(300) | svcompthresh2bionanonabsys$svm_score> 0.9,na.rm=TRUE)
test<-svcompthresh2bionanonabsys[!(svcompthresh2bionanonabsys$NumTechs>1 | svcompthresh2bionanonabsys$NumClusterSVs>=4) & (svcompthresh2bionano$BNGLENHG2INSdiffSEQLEN<log10(500) | svcompthresh2bionano$BNGLENHG2DELdiffSEQLEN<log10(500) | svcompthresh2bionanonabsys$svm_score> 0.9),]
setorder(test,CHROM,POS)
sum(svcompthresh2bionanonabsys$NumTechs==1 & svcompthresh2bionanonabsys$NumClusterSVs > 2 & svcompthresh2bionanonabsys$svm_score> 0.9 & svcompthresh2bionanonabsys$SVLEN < -299,na.rm =TRUE)
sum(svcompthresh2bionanonabsys$NumTechs==1 & svcompthresh2bionano$BNGLENdiffSEQLEN<log10(300) & svcompthresh2bionanonabsys$NumClusterSVs > 2 & svcompthresh2bionanonabsys$svm_score> 0.9 & svcompthresh2bionanonabsys$SVLEN < -999,na.rm =TRUE)
sum(svcompthresh2bionanonabsys$NumTechs>1 & svcompthresh2bionanonabsys$SVLEN < -299,na.rm =TRUE)
sum(svcompthresh2bionanonabsys$NumTechs>1 & svcompthresh2bionanonabsys$svm_score> 0.9 & svcompthresh2bionanonabsys$SVLEN < -299,na.rm =TRUE)
sum(svcompthresh2bionanonabsys$NumTechs>1 & svcompthresh2bionanonabsys$svm_score<= 0.9 & svcompthresh2bionanonabsys$SVLEN < -299,na.rm =TRUE)
sum(svcompthresh2bionanonabsys$NumTechs>1 & svcompthresh2bionanonabsys$svm_score< 0.2 & svcompthresh2bionanonabsys$SVLEN < -299,na.rm =TRUE)
sum(svcompthresh2bionanonabsys$NumTechs>1 & svcompthresh2bionanonabsys$HG2count>0 & svcompthresh2bionanonabsys$svm_score< 0.2 & svcompthresh2bionanonabsys$SVLEN < -299,na.rm =TRUE)
length(unique(svcompthresh2bionanonabsys[svcompthresh2bionanonabsys$NumTechs==1 & svcompthresh2bionano$BNGLENdiffSEQLEN<log10(300) & svcompthresh2bionanonabsys$NumClusterSVs > 2 & svcompthresh2bionanonabsys$svm_score> 0.9 & svcompthresh2bionanonabsys$SVLEN < -999,]$BNG_QUAL_BspQIID_BssSIID_SVTYPE))-sum(svcompthresh2bionanonabsys$NumTechs==1 & svcompthresh2bionano$BNGLENdiffSEQLEN<log10(300) & svcompthresh2bionanonabsys$NumClusterSVs > 2 & svcompthresh2bionanonabsys$svm_score> 0.9 & svcompthresh2bionanonabsys$SVLEN < -999,na.rm =TRUE)+length(unique(svcompthresh2bionanonabsys[svcompthresh2bionanonabsys$NumTechs==1 & svcompthresh2bionano$BNGLENdiffSEQLEN<log10(300) & svcompthresh2bionanonabsys$NumClusterSVs > 2 & svcompthresh2bionanonabsys$svm_score> 0.9 & svcompthresh2bionanonabsys$SVLEN < -999,]$ID))
length(unique(svcompthresh2bionanonabsys[svcompthresh2bionanonabsys$NumTechs==1 & svcompthresh2bionanonabsys$NumClusterSVs > 2 & svcompthresh2bionanonabsys$svm_score> 0.9 & svcompthresh2bionanonabsys$SVLEN < -999,]$BNG_QUAL_BspQIID_BssSIID_SVTYPE))-sum(svcompthresh2bionanonabsys$NumTechs==1 & svcompthresh2bionanonabsys$NumClusterSVs > 2 & svcompthresh2bionanonabsys$svm_score> 0.9 & svcompthresh2bionanonabsys$SVLEN < -999,na.rm =TRUE)+length(unique(svcompthresh2bionanonabsys[svcompthresh2bionanonabsys$NumTechs==1 & svcompthresh2bionanonabsys$NumClusterSVs > 2 & svcompthresh2bionanonabsys$svm_score> 0.9 & svcompthresh2bionanonabsys$SVLEN < -999,]$ID))
length(unique(svcompthresh2bionanonabsys[svcompthresh2bionanonabsys$NumTechs==1 & svcompthresh2bionanonabsys$NumClusterSVs > 2 & svcompthresh2bionanonabsys$svm_score> 0.9 & svcompthresh2bionanonabsys$SVLEN >= -999,]$ID))
length(unique(svcompthresh2bionanonabsys[svcompthresh2bionanonabsys$NumTechs==1 & svcompthresh2bionanonabsys$NumClusterSVs > 2 & svcompthresh2bionanonabsys$svm_score> 0.9 & svcompthresh2bionanonabsys$SVLEN < -999,]$ID))
ggplot(svcompthresh2bionanonabsys[svcompthresh2bionanonabsys$NumClusterSVs > 2 & svcompthresh2bionanonabsys$svm_score> 0.9,], aes(x=log10(-SVLEN))) + geom_histogram(binwidth=0.1,colour="white") + facet_grid( ~ MultiTech)
ggplot(svcompthresh2bionanonabsys[svcompthresh2bionanonabsys$BNGLENdiffSEQLEN<log10(300) & svcompthresh2bionanonabsys$NumClusterSVs > 2,], aes(x=log10(abs(SVLEN)))) + geom_histogram(binwidth=0.1,colour="white") + facet_grid( SVTYPE ~ MultiTech)
ggplot(svcompthresh2bionanonabsys, aes(x=svm_score)) + geom_histogram(binwidth=0.05,colour="white") + facet_grid( noHG2 ~ MultiTech)
ggplot(svcompthresh2bionanonabsys, aes(x=BNGLENdiffSEQLEN)) + geom_histogram(binwidth=0.2,colour="white") + facet_grid( SVTYPE ~ MultiTech )
ggplot(svcompthresh2bionanonabsys, aes(x=svm_score, ..density..)) + geom_histogram(binwidth=0.05,colour="white") + facet_grid( noHG2 ~ MultiTech)
ggplot(svcompthresh2bionanonabsys, aes(x=BNGLENdiffSEQLEN, ..density..)) + geom_histogram(binwidth=0.2,colour="white") + facet_grid( SVTYPE ~ MultiTech )
#output new vcfs
#non-PASS sites supported by BioNano or Nabsys but not called by 5 callers
sum(!(svcompthresh2bionanonabsys$NumTechs>1 | svcompthresh2bionanonabsys$NumClusterSVs>=4) & (svcompthresh2bionano$BNGLENHG2INSdiffSEQLEN<log10(500) | svcompthresh2bionano$BNGLENHG2DELdiffSEQLEN<log10(500) | svcompthresh2bionanonabsys$svm_score> 0.9),na.rm=TRUE)
write.table(svcompthresh2bionanonabsys[!(svcompthresh2bionanonabsys$NumTechs>1 | svcompthresh2bionanonabsys$NumClusterSVs>=4) & (svcompthresh2bionano$BNGLENHG2INSdiffSEQLEN<log10(500) | svcompthresh2bionano$BNGLENHG2DELdiffSEQLEN<log10(500) | svcompthresh2bionanonabsys$svm_score> 0.9),c("CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT","AJTRIO")][order(CHROM,POS)],sep="\t",row.names=FALSE,col.names=FALSE,file="/Users/jzook/Documents/AJTrio/SVs/triounion_171212/union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.nabsysorbionanoandnot2techor4callset.vcf",quote=FALSE)
# sed 's/^X/23/;s/^Y/24/;s/^MT/25/' union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.nabsysorbionanoandnot2techor5caller.vcf | sort -k1,1n -k2,2n -k4,4 -k5,5 | sed 's/^23/X/;s/^24/Y/;s/^25/MT/' > union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.nabsysorbionanoandnot2techor5caller.sort.vcf
# cat header.txt union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.nabsysorbionanoandnot2techor5caller.sort.vcf | /Applications/bioinfo/htslib-1.3/bgzip -c > union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.nabsysorbionanoandnot2techor5caller.vcf.gz
# /Applications/bioinfo/htslib-1.3/tabix -f union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.nabsysorbionanoandnot2techor5caller.vcf.gz
#test<-svcompthresh2bionanonabsys[!(svcompthresh2bionanonabsys$NumTechs>1 | svcompthresh2bionanonabsys$HG2count>=5 | svcompthresh2bionanonabsys$HG3count>=5 | svcompthresh2bionanonabsys$HG4count>=5) & (svcompthresh2bionano$BNGLENHG2INSdiffSEQLEN<log10(500) | svcompthresh2bionano$BNGLENHG2DELdiffSEQLEN<log10(500) | svcompthresh2bionanonabsys$svm_score> 0.9),]
ggplot(svcompthresh2bionanonabsys, aes(x=NumClusterSVs, ..density..)) + geom_histogram(binwidth=1,colour="black") + facet_grid( SVTYPE ~ sizecat) + xlim(c(0,50))
ggplot(svcompthresh2bionanonabsys, aes(x=log10(abs(SVLEN)), ..density..)) + geom_histogram(binwidth=0.03,colour="black") + facet_grid( SVTYPE ~ sizecat, scales = "free")
ggplot(svcompthresh2bionanonabsys[abs(svcompthresh2bionanonabsys$SVLEN)>999], aes(x=(abs(SVLEN)))) + geom_histogram(binwidth=100,colour="black") + facet_grid( SVTYPE ~ sizecat, scales = "free") + xlim(c(0,20000))
ggplot(svcompthresh2bionanonabsys[(svcompthresh2bionanonabsys$NumTechs>1 | svcompthresh2bionanonabsys$NumClusterSVs>=4) & abs(svcompthresh2bionanonabsys$SVLEN)>999,], aes(x=(abs(SVLEN)))) + geom_histogram(binwidth=100,colour="black") + facet_grid( SVTYPE ~ sizecat, scales = "free") + xlim(c(0,20000))
ggplot(svcompthresh2bionanonabsys[(svcompthresh2bionanonabsys$NumTechs>1 | svcompthresh2bionanonabsys$NumClusterSVs>=4 | svcompthresh2bionano$BNGLENHG2INSdiffSEQLEN<log10(500) | svcompthresh2bionano$BNGLENHG2DELdiffSEQLEN<log10(500) | svcompthresh2bionanonabsys$svm_score> 0.9) & abs(svcompthresh2bionanonabsys$SVLEN)>999,], aes(x=(abs(SVLEN)))) + geom_histogram(binwidth=100,colour="black") + facet_grid( SVTYPE ~ sizecat) + xlim(c(0,20000))
#PASS dels called as incorrect by nabsys (svmscore<0.2)
write.table(svcompthresh2bionanonabsys[svcompthresh2bionanonabsys$NumTechs>1 & !is.na(svcompthresh2bionanonabsys$svm_score) & svcompthresh2bionanonabsys$svm_score< 0.2 & svcompthresh2bionanonabsys$SVLEN < -299,c("CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT","AJTRIO")],sep="\t",row.names=FALSE,col.names=FALSE,file="/Users/jzook/Documents/AJTrio/SVs/triounion_171212/union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts_HG2DELgt299.nabsyssvmlt0.2.vcf",quote=FALSE)
# sed 's/^X/23/;s/^Y/24/;s/^MT/25/' union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts_HG2DELgt299.nabsyssvmlt0.2.vcf | sort -k1,1n -k2,2n -k4,4 -k5,5 | sed 's/^23/X/;s/^24/Y/;s/^25/MT/' > union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts_HG2DELgt299.nabsyssvmlt0.2.sort.vcf
# cat header.txt union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts_HG2DELgt299.nabsyssvmlt0.2.sort.vcf | /Applications/bioinfo/htslib-1.3/bgzip -c > union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts_HG2DELgt299.nabsyssvmlt0.2.vcf.gz
# /Applications/bioinfo/htslib-1.3/tabix -f union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts_HG2DELgt299.nabsyssvmlt0.2.vcf.gz
View(svcompthresh2bionanonabsys[svcompthresh2bionanonabsys$NumTechs>1 & !is.na(svcompthresh2bionanonabsys$svm_score) & svcompthresh2bionanonabsys$svm_score< 0.2 & svcompthresh2bionanonabsys$SVLEN < -599,c("CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","PBcalls","ClusterIDs","SVLEN")])
#PASS ins missing from bionano
write.table(svcompthresh2bionano[svcompthresh2bionano$NumTechs>1 & svcompthresh2bionano$HG2count>0 & !is.na(svcompthresh2bionano$BNGLENdiffSEQLEN) & svcompthresh2bionano$BNGLENdiffSEQLEN>=log10(300),c("CHROM","POS","ID","REF","ALT","QUAL","FILTER","INFO","FORMAT","AJTRIO")],sep="\t",row.names=FALSE,col.names=FALSE,file="/Users/jzook/Documents/AJTrio/SVs/triounion_171212/union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.bionanodiffgt300.vcf",quote=FALSE)
# sed 's/^X/23/;s/^Y/24/;s/^MT/25/' union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.bionanodiffgt300.vcf | sort -k1,1n -k2,2n -k4,4 -k5,5 | sed 's/^23/X/;s/^24/Y/;s/^25/MT/' > union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.bionanodiffgt300.sort.vcf
# cat header.txt union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.bionanodiffgt300.sort.vcf | /Applications/bioinfo/htslib-1.3/bgzip -c > union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.bionanodiffgt300.vcf.gz
# /Applications/bioinfo/htslib-1.3/tabix -f union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.bionanodiffgt300.vcf.gz
length(unique(svcompthresh2bionano[svcompthresh2bionano$NumTechs>1 & svcompthresh2bionano$HG2count>0 & svcompthresh2bionano$BNGLENdiffSEQLEN>=log10(300),]$ID))
length(unique(svcompthresh2bionano[svcompthresh2bionano$NumTechs>1 & svcompthresh2bionano$HG2count>0 & svcompthresh2bionano$BNGLENdiffSEQLEN>=log10(300) & svcompthresh2bionano$SVLEN < -999,]$ID))
length(unique(svcompthresh2bionano[svcompthresh2bionano$NumTechs>1 & svcompthresh2bionano$HG2count>0 & is.na(svcompthresh2bionano$BNGLENdiffSEQLEN) & svcompthresh2bionano$SVLEN < -999,]$ID))
length(unique(svcompthresh2bionano[svcompthresh2bionano$NumTechs>1 & svcompthresh2bionano$HG2count>0 & is.na(svcompthresh2bionano$BNGLENdiffSEQLEN) & svcompthresh2bionano$SVLEN < -999,]$ID))/length(unique(svcompthresh2bionano[svcompthresh2bionano$NumTechs>1 & svcompthresh2bionano$HG2count>0 & svcompthresh2bionano$SVLEN < -999,]$ID))
hist(log10(-svcompthresh2bionano[svcompthresh2bionano$NumTechs>1 & svcompthresh2bionano$HG2count>0 & is.na(svcompthresh2bionano$BNGLENdiffSEQLEN) & svcompthresh2bionano$SVLEN < -999,]$SVLEN))
hist(log10(-svcompthresh2bionano[svcompthresh2bionano$NumTechs>1 & svcompthresh2bionano$HG2count>0 & !is.na(svcompthresh2bionano$BNGLENdiffSEQLEN) & svcompthresh2bionano$SVLEN < -999,]$SVLEN))
sum(svcompthresh2bionanonabsys$NumTechs>1 & svcompthresh2bionanonabsys$PBcalls==0 & svcompthresh2bionanonabsys$SVLEN < -299,na.rm =TRUE)
sum(svcompthresh2bionanonabsys$NumTechs>1 & svcompthresh2bionanonabsys$PBcalls==0 & svcompthresh2bionanonabsys$SVLEN > 299,na.rm =TRUE)
sum(svcompthresh2bionanonabsys$NumTechs>1 & svcompthresh2bionanonabsys$PBcalls==0 & svcompthresh2bionanonabsys$SVLEN < -49,na.rm =TRUE)
sum(svcompthresh2bionanonabsys$NumTechs>1 & svcompthresh2bionanonabsys$PBcalls==0 & svcompthresh2bionanonabsys$SVLEN > 49,na.rm =TRUE)
sum(svcompthresh2bionanonabsys$NumTechs>1 & svcompthresh2bionanonabsys$PBcalls==0 & svcompthresh2bionanonabsys$SVLEN < 0,na.rm =TRUE)
sum(svcompthresh2bionanonabsys$NumTechs>1 & svcompthresh2bionanonabsys$PBcalls==0 & svcompthresh2bionanonabsys$SVLEN > 0,na.rm =TRUE)
#Output bed for merging calls not tested by genosv
write.table(svcompthresh2bionanonabsys[!((svcompthresh2bionanonabsys$NumTechs>1 | svcompthresh2bionanonabsys$NumClusterSVs>=4) | ((!is.na(svcompthresh2bionanonabsys$BNGLENHG2INSdiffSEQLEN) & svcompthresh2bionano$BNGLENHG2INSdiffSEQLEN<log10(500)) | (!is.na(svcompthresh2bionanonabsys$BNGLENHG2DELdiffSEQLEN) & svcompthresh2bionano$BNGLENHG2DELdiffSEQLEN<log10(500)) | (!is.na(svcompthresh2bionanonabsys$svm_score) & svcompthresh2bionanonabsys$svm_score> 0.9))),c("CHROM","POS","END","SVLEN","HG2count","PBcalls","Illcalls","TenXcalls","CGcalls")][order(CHROM,POS,END)],sep="\t",row.names=FALSE,col.names=FALSE,file="/Users/jzook/Documents/AJTrio/SVs/triounion_171212/union_171212_refalt.2.2.2.clustered.simpleINFO.techcounts.notnabsysorbionanoor2techor4callset.bed",quote=FALSE)
|
bfac550a5281bb09cc2e2433a5791c4400a42574 | f63a9c1887ec71cae6d65f88c33ddc99f3fded4a | /man/mongo.index.TTLcreate.Rd | ef761d26c9f2c2505b65794c1d9e39e73ca90fef | [] | no_license | agnaldodasilva/rmongodb | 41b337c42b4b6e1fb41b9ad2949fab1e6a850fb0 | 8eb2bca2d9c88f542832d1bcb6ccd209fdfc676c | refs/heads/master | 2020-08-07T15:39:41.703738 | 2016-03-21T10:36:28 | 2016-03-21T10:36:28 | 213,510,405 | 1 | 0 | null | 2019-10-08T00:10:59 | 2019-10-08T00:10:59 | null | UTF-8 | R | false | false | 1,757 | rd | mongo.index.TTLcreate.Rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\name{mongo.index.TTLcreate}
\alias{mongo.index.TTLcreate}
\title{Add a time to live (TTL) index to a collection}
\usage{
mongo.index.TTLcreate(mongo, ns, key, expireAfterSeconds, index_name = NULL)
}
\arguments{
\item{mongo}{(\link{mongo}) A mongo connection object.}
\item{ns}{(string) The namespace of the collection to add a TTL index to.}
\item{key}{(\link{mongo.bson}) The desired field(s) to use as the basis for expiration time. The field should be of type 'Date'.
Alternately, \code{key} may be a list which will be converted to a
mongo.bson object by \code{\link{mongo.bson.from.list}()}.
Alternately, \code{key} may be a valid JSON character string which will be converted to a
mongo.bson object by \code{\link{mongo.bson.from.JSON}()}.}
\item{expireAfterSeconds}{(Numeric or Integer) The time in seconds after which records should be removed.}
\item{index_name}{(string) The name of the index to be created.}
}
\value{
NULL if the command failed. \code{\link{mongo.get.err}()} may be
MONGO_COMMAND_FAILED.
(\link{mongo.bson}) The server's response if successful.
}
\description{
Add a time to live (TTL) index to a collection
}
\details{
See \url{http://docs.mongodb.org/manual/tutorial/expire-data}.
}
\examples{
mongo <- mongo.create()
if (mongo.is.connected(mongo)) {
for (i in 1:10) mongo.insert(mongo, ns = 'test.testTTL', b = list(a = i, last_updated = i))
res_bson <- mongo.index.TTLcreate (mongo, ns = 'test.testTTL', key = list(last_updated = 1),
expireAfterSeconds = 3600, index_name = 'last_updated_1')
print(res_bson);
mongo.drop(mongo, ns = 'test.testTTL')
}
mongo.destroy(mongo);
}
\seealso{
\code{\link{mongo.index.create}}
}
|
459b450daf8f05e7714b190166764e0acf4dfd76 | 0d60e7809acc3ca8c4461469802f3d9bb50d3352 | /climatology_compile.R | c924c375071c8d5161e345746752d1e4caca1bf3 | [] | no_license | lark-gorilla/paper2 | 191a9e60bd07926ca7236c2d4402f9cee6d84ad0 | fd9d3fc39499067b2d13036c9afff8d32f262140 | refs/heads/master | 2021-01-22T07:03:33.841439 | 2020-03-03T12:09:16 | 2020-03-03T12:09:16 | 68,729,147 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,788 | r | climatology_compile.R | # 11/09/16 Lancaster, UK
#read in, select and compile hdf data for kernal-based extraction
rm(list=ls())
setwd("~/grive/phd/sourced_data/env_data/climatologies")
library("curl")
#Download data from bloomwatch 360 using get query
# REMEMBER you can grab the get query link by right clicking
# once parameters have been set the .nc download option
#http://coastwatch.pfeg.noaa.gov/coastwatch/CWBrowserWW360.jsp?get=gridData&dataSet=TNCwwww&timePeriod=1month¢eredTime=1995-12-30T00:00:00&minLon=0&maxLon=360&minLat=-90&maxLat=90&fileType=.nc
#http://coastwatch.pfeg.noaa.gov/coastwatch/CWBrowserWW360.jsp?get=gridData&dataSet=CPCsstn&timePeriod=monthly¢eredTime=0001-12-16T12:00:00&minLon=0&maxLon=360&minLat=-90&maxLat=90&fileType=.nc
#http://coastwatch.pfeg.noaa.gov/coastwatch/CWBrowserWW360.jsp?get=gridData&dataSet=CTCsshd&timePeriod=monthly¢eredTime=0001-12-16T12:00:00&minLon=0&maxLon=360&minLat=-90&maxLat=90&fileType=.nc
#http://coastwatch.pfeg.noaa.gov/coastwatch/CWBrowserWW360.jsp?get=gridData&dataSet=CQCwekm&timePeriod=monthly¢eredTime=0001-12-16T12:00:00&minLon=0&maxLon=360&minLat=-90&maxLat=90&fileType=.nc
#http://coastwatch.pfeg.noaa.gov/coastwatch/CWBrowserWW360.jsp?get=gridData&dataSet=CPMbfp1&timePeriod=monthly¢eredTime=0001-12-16T12:00:00&minLon=0&maxLon=360&minLat=-90&maxLat=90&fileType=.nc
dset=c("CPCsstn","CTCsshd","CQCwekm", "CPMbfp1", "TNCwwww")
for(i in dset)
{
if(i=="TNCwwww")
{
for(j in c("01-29","03-01","03-31", "04-30", "05-30","06-30", "07-30",
"08-30", "09-30", "10-30", "11-30", "12-30")){
curl_download(url=paste("http://coastwatch.pfeg.noaa.gov/coastwatch/CWBrowserWW360.jsp?get=gridData&dataSet=TNCwwww&timePeriod=1month¢eredTime=1995-",
j, "T00:00:00&minLon=100&maxLon=180&minLat=-45&maxLat=45&fileType=.nc", sep=""), destfile=paste(i, "1995-", j, ".nc", sep=""))
print(paste(i, j))}
}else{
for(j in c("01","02","03", "04", "05","06", "07",
"08", "09", "10", "11", "12")){
curl_download(url=paste("http://coastwatch.pfeg.noaa.gov/coastwatch/CWBrowserWW360.jsp?get=gridData&dataSet=",
i,"&timePeriod=monthly¢eredTime=~0001-", j, "-16T12:00:00&minLon=100&maxLon=180&minLat=-45&maxLat=45&fileType=.nc",sep=""), destfile=paste(i, "2016-", j, "-01", ".nc", sep=""))
print(paste(i, j))} # the curl above has a ~ before centered time to approx next best time, need to check it got it right
}
}
library(ncdf4)
library(raster)
ncz<-list.files()
nc_open(ncz[1])
ras<-raster(ncz[1])
# now get thermocline data
nc1<-nc_open("~/grive/phd/sourced_data/env_data/thermocline/ttd_DTm02_c1m_reg2.0.nc")
print(nc1)
ttd=ncvar_get(nc1, varid="ttd") # extracts top of thermocline depth (estimated by kriging of ttd_smth) data
## from .nc file note gives XYZ matrix of values for long, lats and months [1:180, 1:90, 12]
for(i in 1:12){
r1<-raster(nrows=90, ncols=180, resolution=2,
vals=as.vector(ttd[1:180, 1:90, i])) # 3 is cos we want March
r1@extent<-extent(0,360,-90,90) # global coords
r1<-flip(r1,2)
r1<-rotate(r1)
#plot(r1)
#plot(log(r1))
r1[r1==1.000000e+09]<-NA
#plot(r1)
writeRaster(r1, paste("~/grive/phd/sourced_data/env_data/climatologies/thermocline_",
i, ".tif", sep=""))
}
### finally we better make a distance to seamount layer
bathy<-raster("~/grive/phd/sourced_data/env_data/phd_bathy/GRIDONE_2D_100.0_-45.0_180.0_40.0.nc")
smounts<-read.table("~/grive/phd/sourced_data/env_data/seamounts/seamounts_KWSMTSv01.txt",
skip=16, header=T, sep = "\t", comment.char=">")
qplot(data=smounts[smounts$Latitude< 0 & smounts$X..Longitude>150,],
y=Latitude, x=X..Longitude, colour=Height)
write.csv(smounts[smounts$Latitude< 0 & smounts$X..Longitude>150,],
"~/grive/phd/sourced_data/env_data/seamounts/smounts_regional.csv",
quote=F, row.names=F)
maps(world, add=T)
sm_ras_wgs<-rasterize(data.frame(smounts$Longitude,
smounts$Latitude),bathy, field=1, background=NA)
sm_ras_merc<-projectRaster(sm_ras_wgs, crs="+proj=merc +lon_0=140 +k=1 +x_0=0 +y_0=0 +ellps=WGS84 +datum=WGS84 +units=m +no_defs")
#project to mercator with centre at 140
writeRaster(sm_ras_merc, "~/grive/phd/sourced_data/env_data/seamounts/seamounts_ras_merc.tif", overwrite=T)
## distance (even when projected) in R is pretty slow, if you write to QGIS to use GDAL = v. fast :)
#Now read in QGIS product and reproject
dsmount_merc<-raster("~/grive/phd/sourced_data/env_data/seamounts/d_seamounts_merc.tif")
dsmount_wgs<-projectRaster(dsmount_merc, crs="+proj=longlat +ellps=WGS84")
#convert to km
dsmount_wgs<-dsmount_wgs/1000
writeRaster(dsmount_wgs, "~/grive/phd/sourced_data/env_data/seamounts/d_seamounts_wgs.tif", overwrite=T)
|
871c47fff2c4c0a73ce3beeaa2fec5dc75955aea | 81071399eea63bdcce4f43c1bd6186ac9e43a384 | /inst/shiny/gpApp/server.R | cbe931369d1928d7d8e46fc9d2bae6666fb1f0b1 | [] | no_license | computingway/gazepath | 8b5b7ff391f74e07a65f1740664f469e992c7b31 | 69383e53dd708de4f9436a6197fc98397ad4585f | refs/heads/master | 2021-03-31T17:44:12.190829 | 2020-02-09T14:20:02 | 2020-02-09T14:20:02 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 9,051 | r | server.R | library(shiny)
library(gazepath)
library(jpeg)
library(scales)
options(shiny.maxRequestSize = 50 * 1024^2)
options(trace = FALSE)
shinyServer(function(input, output) {
dataInput <- reactive({
inFile <- input$file1
if (is.null(inFile))
return(NULL)
data <- list()
for(i in 1:nrow(inFile)){
data[[i]] <- read.csv(inFile[i,4], header=input$header, sep=input$sep, quote=input$quote, na.strings = input$na)
row.names(data[[i]]) <- 1:dim(data[[i]])[1]
}
names(data) <- as.vector(as.character(inFile[,1]))
return(data)
})
rowNames <- reactive({
names(dataInput()[[1]])
})
output$nameXVariables <- renderUI({
selectInput('nameX', 'Name of the column with the left eye x-coordinates', rowNames())
})
output$nameYVariables <- renderUI({
selectInput('nameY', 'Name of the column with the left eye y-coordinates', rowNames())
})
output$nameDVariables <- renderUI({
selectInput('nameD', 'Name of the column with the distance', rowNames())
})
output$nameX2Variables <- renderUI({
selectInput('nameX2', 'Name of the column with the right eye x-coordinates', rowNames())
})
output$nameY2Variables <- renderUI({
selectInput('nameY2', 'Name of the column with the right eye y-coordinates', rowNames())
})
output$nameD2Variables <- renderUI({
selectInput('nameD2', 'Name of the column with the distance', rowNames())
})
output$nameTVariables <- renderUI({
selectInput('nameT', 'Name of the column with the trial index', rowNames())
})
plot_sizew <- function(){
input$p_w
}
plot_sizeh <- function(){
input$p_h
}
output$ppiVariables <- renderUI({
selectInput('ppi', 'Participant', nameVar())
})
output$image <- renderPlot({
infile <- input$image
x <- readJPEG(infile$datapath, native = TRUE)
res <- dim(x)[1:2]
plot(1, 1, xlim = c((input$res_x - res[2]) / 2, input$res_x - ((input$res_x - res[2]) / 2)), ylim = c(input$res_y - ((input$res_y - res[1]) / 2), (input$res_y - res[1]) / 2), col = 'white', xlab = '', ylab = '', xaxt = 'n', yaxt = 'n', bty = 'n')
rasterImage(x, (input$res_x - res[2]) / 2, input$res_y - ((input$res_y - res[1]) / 2), input$res_x - ((input$res_x - res[2]) / 2), (input$res_y - res[1]) / 2)
sim <- gazepathInput()[[which(nameVar() == input$ppi)]][[16]][[as.numeric(input$ip)]]
lines(gazepathInput()[[which(nameVar() == input$ppi)]][[2]][[as.numeric(input$ip)]], gazepathInput()[[which(nameVar() == input$ppi)]][[3]][[as.numeric(input$ip)]])
points(sim[sim[,1] == 'f', 9:10], cex = sim[sim[,1] == 'f', 2] / 50, col = alpha(2, .5), pch = 16)
points(sim[sim[,1] == 'f', 9:10], col = 3, pch = letters, cex = 2)
}, width = plot_sizew, height = plot_sizeh)
outVar <- reactive({
if(length(which(nameVar() == input$ppi)) != 0){
unique(summary(gazepathInput()[[which(nameVar() == input$ppi)]])$Trial)
}
})
outVarppp <- reactive({
if(length(which(nameVar() == input$ppp)) != 0){
unique(summary(gazepathInput()[[which(nameVar() == input$ppp)]])$Trial)
}
})
outVarpti <- reactive({
if(length(which(nameVar() == input$pti)) != 0){
unique(summary(gazepathInput()[[which(nameVar() == input$pti)]])$Trial)
}
})
output$trialVariables <- renderUI({
selectInput('i', 'Trial Number', outVarppp())
})
output$treshVariables <- renderUI({
selectInput('ii', 'Trial Number', outVarpti())
})
output$plotVariables <- renderUI({
selectInput('ip', 'Select the corresponding trial', outVar())
})
output$textR <- renderText({
'Click browse to select data from one or multiple participants, once the data is loaded you can see the first 10 and last 10 rows of the raw data of that participant. Use the options on the left to make sure the data is loaded correctly. If your data is loaded correctly you can proceed to the analyze data tab.'
})
output$textF <- renderText({
'Select to correct variables and press the go button to perform the analyses, the data of all participants that were loaded in the previous step is analyzed, therefore it may take a while. \n\n After the analyses are done you can select the participants to see his or her first 20 fixations and saccades. \n\n Move on to the next tab to visualize the data.'
})
nameVar <- reactive({
names(dataInput())
})
output$ppVariables <- renderUI({
selectInput('pp', 'Participant', nameVar())
})
output$ptVariables <- renderUI({
selectInput('pt', 'Participant', nameVar())
})
output$contenth <- renderTable({
withProgress(
if(length(which(nameVar() == input$pp)) != 0) head(dataInput()[[which(nameVar() == input$pp)]], 10),
min = .3, max = 1,
message = 'Loading data, this may take some time, please wait'
)
})
output$contentt <- renderTable({
withProgress(
if(length(which(nameVar() == input$pp)) != 0) tail(dataInput()[[which(nameVar() == input$pp)]], 10),
min = .3, max = 1,
message = 'Loading data, this may take some time, please wait'
)
})
gazepathInput <- eventReactive(input$go, {
out <- list()
withProgress(
for(i in 1:length(nameVar())){
incProgress(1/length(nameVar()), message = paste('Running the analysis of participant', nameVar()[i]), 'Please wait')
out[[i]] <- gazepath(data = dataInput()[[i]],
x1 = input$nameX,
y1 = input$nameY,
x2 = input$nameX2,
y2 = input$nameY2,
d1 = input$nameD,
d2 = input$nameD2,
trial = input$nameT,
height_px = input$height_px,
height_mm = input$height_mm,
width_px = input$width_px,
width_mm = input$width_mm,
res_x = input$res_x,
res_y = input$res_y,
samplerate = input$samplerate,
method = input$method,
extra_var = ifelse(is.null(input$extra_var), NULL, strsplit(input$extra_var, ',')[[1]]))
})
return(out)
})
output$data <- renderTable({
if(length(which(nameVar() == input$pt)) != 0) head(summary(gazepathInput()[[which(nameVar() == input$pt)]]), 20)
})
outData <- reactive({
if(input$out == 'All fixations and saccades'){
df <- numeric()
for(i in 1:length(nameVar())){
df <- rbind(df, cbind(nameVar()[i], summary(gazepathInput()[[i]])))
}
} else {
if(input$out == 'Only complete fixations and saccades'){
df <- numeric()
for(i in 1:length(nameVar())){
df <- rbind(df, cbind(nameVar()[i], summary(gazepathInput()[[i]], complete_only = TRUE)))
}
} else {
if(input$out == 'Fixations only'){
df <- numeric()
for(i in 1:length(nameVar())){
df <- rbind(df, cbind(nameVar()[i], summary(gazepathInput()[[i]], fixations_only = TRUE)))
}
} else {
df <- numeric()
for(i in 1:length(nameVar())){
df <- rbind(df, cbind(nameVar()[i], summary(gazepathInput()[[i]], complete_only = TRUE, fixations_only = TRUE)))
}
}
}
}
names(df)[1] <- 'Participant'
return(df)
})
output$datasum <- renderTable({
head(outData(), 20)
})
plot_size_w <- function(){
input$plot_w
}
plot_size_h <- function(){
input$plot_h
}
output$pppVariables <- renderUI({
selectInput('ppp', 'Participant', nameVar())
})
output$plot <- renderPlot({
if(length(which(nameVar() == input$ppp)) != 0) plot(gazepathInput()[[which(nameVar() == input$ppp)]], trial_index = as.numeric(input$i))
}, width = plot_size_w, height = plot_size_h
)
output$ptiVariables <- renderUI({
selectInput('pti', 'Participant', nameVar())
})
output$plotMould <- renderPlot({
if(length(which(nameVar() == input$pti)) != 0) gazepath:::Mould_vel(gazepathInput()[[which(nameVar() == input$pti)]][[9]][[as.numeric(input$ii)]], gazepathInput()[[which(nameVar() == input$pti)]][[10]], TRUE)
})
output$rob <- renderPlot({
ROB <- sapply(1:length(nameVar()), function(i) mean(gazepathInput()[[i]][[5]], na.rm = T))
MFD <- sapply(1:length(nameVar()), function(i) median(summary(gazepathInput()[[i]], fixations_only = T)[,2]))
plot(ROB, MFD)
})
output$pre <- renderPlot({
PRE <- sapply(1:length(nameVar()), function(i) mean(gazepathInput()[[i]][[6]], na.rm = T))
MFD <- sapply(1:length(nameVar()), function(i) median(summary(gazepathInput()[[i]], fixations_only = T)[,2]))
plot(PRE, MFD)
})
output$downloadData <- downloadHandler(
filename = function() {
paste('data', Sys.Date(), '.csv', sep = '')
},
content = function(file) {
write.csv(outData(), file)
}
)
})
|
89c9a7846ee72f8eb0b9c3f1b20c8b7b096fc75e | ffcac99b2e266eadf9b2f284f002baa46e7f7303 | /01.Clinical Trial Study/Q1 Solution.r | 8f2fef079d7fb5c93560dc455420cda6331bfc37 | [] | no_license | haithamzd/Clinical-Trials-Using-R | b970c3aab719612f6f7ef331555e0b12aca0c0e1 | 1a53ed49328e4eb0cc8a535e14a79f9352e78dfb | refs/heads/master | 2020-08-11T08:04:44.605029 | 2019-05-30T13:37:51 | 2019-05-30T13:37:51 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,579 | r | Q1 Solution.r | # Q1:
# Hypothesis: H0: µ1 = µ2 Ha: µ1 ≠ µ2
# R code and results
# Input data
> data = matrix(0,24,3)
> data[,1]=c(2188,1719,1875,1797,1563,2344,1719,1875,2031,1719,2016,2188,1875,1797,2500,2266,2422,2031,2500,1875,2188,1797,1875,1875)
> data[,2]=c(2461,1641,1641,2215,1559,3117,2051,2215,1805,1969,2264,3117,2051,1969,3035,2953,2625,2379,3035,1723,2297,2215,2264,2215)
> data[,3]=c('B','B','B','A','B','A','A','B','B','A','A','A','B','B','B','A','B','A','A','B','B','A','A','A')
> write.table(data,"data.P1.txt",quote = F)
> data=read.table('data.p1.txt')
> data
V1 V2 V3
1 2188 2461 B
2 1719 1641 B
3 1875 1641 B
4 1797 2215 A
5 1563 1559 B
6 2344 3117 A
7 1719 2051 A
8 1875 2215 B
9 2031 1805 B
10 1719 1969 A
11 2016 2264 A
12 2188 3117 A
13 1875 2051 B
14 1797 1969 B
15 2500 3035 B
16 2266 2953 A
17 2422 2625 B
18 2031 2379 A
19 2500 3035 A
20 1875 1723 B
21 2188 2297 B
22 1797 2215 A
23 1875 2264 A
24 1875 2215 A
### generate subsets of treatment A and B respectively
> id.B=which(data[,3]=='B')
> id.A=which(data[,3]=='A')
> id.A;id.B
[1] 4 6 7 10 11 12 16 18 19 22 23 24
[1] 1 2 3 5 8 9 13 14 15 17 20 21
> data.A=data[id.A,]
> data.B=data[id.B,]
> data.A;data.B
V1 V2 V3
4 1797 2215 A
6 2344 3117 A
7 1719 2051 A
10 1719 1969 A
11 2016 2264 A
12 2188 3117 A
16 2266 2953 A
18 2031 2379 A
19 2500 3035 A
22 1797 2215 A
23 1875 2264 A
24 1875 2215 A
V1 V2 V3
1 2188 2461 B
2 1719 1641 B
3 1875 1641 B
5 1563 1559 B
8 1875 2215 B
9 2031 1805 B
13 1875 2051 B
14 1797 1969 B
15 2500 3035 B
17 2422 2625 B
20 1875 1723 B
21 2188 2297 B
### calculate mean value
> Mean.A=mean(data.A[,2])
> Mean.B=mean(data.B[,2])
> Mean.A;Mean.B
[1] 2482.833
[1] 2085.167
### calculate var value
> Var.A=var(data.A[,2])
> Var.B=var(data.B[,2])
> Var.A;Var.B
[1] 191140.5
[1] 208393.1
### get the number of each subset
> N.A=nrow(data.A)
> N.B=nrow(data.B)
> N.A;N.B
[1] 12
[1] 12
### calculate the pool sem
> pool.sem=sqrt(((N.A-1)*Var.A+(N.B-1)*Var.B)/(N.A+N.B-2)*(1/N.A+1/N.B))
> pool.sem
[1] 182.4677
### calculate the test statistic
> T=(Mean.A-Mean.B)/(pool.sem)
> T
[1] 2.179381
### conclusion
### one sided test
> alpha=0.05
> t.critical.1=qt(1-alpha,df=N.A+N.B-2)
> t.critical.1
[1] 1.717144
### since T =2.18 is greater than t.critical.1 =1.72 we can reject the null hypothesis.
### two sided test
> t.critical.2=qt(1-alpha/2,df=N.A+N.B-2)
> t.critical.2
[1] 2.073873
>
### since T = 2.18 is greater than t.critical.2=2.08, we can reject the null hypothesis.
|
3147991df5b7dc0af9dadd80c54b36b3d200fd8d | 6f74c7cff5963365ab5c37b283a9979edaaf75db | /WQ_get_ereefs.R | acf98534b858b24b062e114c76530af0473b783e | [] | no_license | eatlas/NESP-TWQ-3.2.5_WQ-Metric | e39bbaa9d0a0fe4eaeed5212cdf4b9bfd98ad4dc | 39e82ce002cf7c7f9c1b6ea79b265c9ad901ed72 | refs/heads/master | 2022-05-28T14:15:32.556896 | 2022-05-26T00:09:08 | 2022-05-26T00:09:08 | 131,679,869 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 24,655 | r | WQ_get_ereefs.R | library(ncdf4)
library(sp)
library(raster)
library(dplyr)
library(tidyr)
library(ggplot2)
library(broom)
require(doParallel)
library(gridExtra)
source('WQ_functions.R')
load('data/GIS/Polys.RData')
load('data/GIS/layers.RData')
plot(Polys)
Polys.df = tidy(Polys)
qld.df = tidy(qld)
## Get the 2014-2017 reanalysis data=====================================================================
flist <- list.files(path = "/export/databases2/ereefs/", pattern = "^.*\\.(nc|NC|Nc|Nc)$")
flist
##Chl: Chl_a_sum, Nap: EFI, SD: 1.7/KD490, NO3
meas.ereef = c('Chl_a_sum','EFI','Kd_490','NO3')
meas = c('chl','nap','sd','NOx')
registerDoParallel(cores=20)
foreach(i=1:length(flist)) %dopar% {
year = gsub('.*_all_([0-9]{4}).*','\\1',flist[i])
month = gsub('.*_all_[0-9]{4}-([0-9]{2}).*','\\1',flist[i])
nc <- nc_open(paste0("/export/databases2/ereefs/", flist[i]))
#print(nc)
#attributes(nc)$names
#nc$nvars
foreach(m=meas) %do% {
m.e = meas.ereef[which(meas==m)]
## get lat and long
dat = data.frame(Latitude=as.vector(ncvar_get(nc,'y_centre', start=c(1,1),count=c(-1,-1))),
Longitude=as.vector(ncvar_get(nc,'x_centre', start=c(1,1),count=c(-1,-1))))
tm=ncvar_get(nc,'t', start=c(1),count=c(-1))
tm = as.Date(tm, origin='1990-01-01')
dat = dat %>% expand(tm,dat)
dat = dat %>% mutate(Measure=m,Value=as.vector(ncvar_get(nc,c(m.e), start=c(1,1,44,1),count=c(-1,-1,1,-1))))
if (m=='sd') dat = dat %>% mutate(Value=1.0/Value) #1.7/Value
## if (m=='sd') {
## z_grid=ncvar_get(nc,'z_grid')
## z_thk=diff(z_grid)
## Kd_490=ncvar_get(nc,'Kd_490', start=c(1,1,1,1),count=c(-1,-1,-1,-1))
## PAR=ncvar_get(nc,'PAR', start=c(1,1,1,1),count=c(-1,-1,-1,-1))
## ff = function(p,k,z_thk) {
## if (all(is.na(p)) | sum(p,na.rm=TRUE)==0) return(NA)
## wet=max(which(diff(p)>0))+1
## zz=wet
## if (all(is.infinite(zz))) return(NA)
## while(!is.na(p[zz]) & p[zz]>0.1827*p[wet]) zz=zz-1
## sum(z_thk[(zz-1):wet]*k[(zz-1):wet], na.rm=TRUE)/sum(z_thk[(zz-1):wet],na.rm=TRUE)
## }
## sd = array(0, dim=dim(PAR)[c(1,2,4)])
## pb <- txtProgressBar(max=length(sd), style=3)
## ii = 0
## foreach(y=1:dim(PAR)[1],.combine=c) %do% { #dim(PAR)[1]) %do% {
## foreach(x=1:dim(PAR)[2],.combine=c) %do% { #dim(PAR)[2]) %do% {
## foreach(t=1:dim(PAR)[4], .combine=c) %do% { #dim(PAR)[4]) %do% {
## a=ff(PAR[y,x,,t],Kd_490[y,x,,t],z_thk)
## sd[y,x,t] = 1.7/a
## ii = ii+1
## setTxtProgressBar(pb,ii)
## }
## }
## }
## Value=as.vector(sd)
## close(pb)
## ## dat = data.frame(Latitude=as.vector(ncvar_get(nc,'y_centre', start=c(1,1),count=c(-1,-1))),
## ## Longitude=as.vector(ncvar_get(nc,'x_centre', start=c(1,1),count=c(-1,-1))))
## ## tm=ncvar_get(nc,'t', start=c(1),count=c(-1))
## ## tm = as.Date(tm, origin='1990-01-01')
## ## z=as.vector(ncvar_get(nc,'z_centre', start=c(1),count=c(-1)))
## ## dat = dat %>% expand(tm,z,dat)
## ## z_grid=ncvar_get(nc,'z_grid')
## ## z_thk=diff(z_grid)
## ## Kd_490=ncvar_get(nc,'Kd_490', start=c(1,1,1,1),count=c(-1,-1,-1,-1))
## ## PAR=ncvar_get(nc,'PAR', start=c(1,1,1,1),count=c(-1,-1,-1,-1))
## ## ff = function(p,k,z_thk) {
## ## if (all(is.na(p)) | sum(p,na.rm=TRUE)==0) return(NA)
## ## wet=max(which(diff(p)>0))
## ## zz=wet
## ## if (all(is.infinite(zz))) return(NA)
## ## while(!is.na(p[zz]) & p[zz]>0.1827*p[wet]) zz=zz-1
## ## sum(z_thk[(zz-1):wet]*k[(zz-1):wet], na.rm=TRUE)/sum(z_thk[(zz-1):wet],na.rm=TRUE)
## ## }
## ## sd = array(0, dim=dim(PAR)[c(1,2,4)])
## ## pb <- txtProgressBar(max=length(sd), style=3)
## ## ii = 0
## ## foreach(y=1:dim(PAR)[1],.combine=c) %do% { #dim(PAR)[1]) %do% {
## ## foreach(x=1:dim(PAR)[2],.combine=c) %do% { #dim(PAR)[2]) %do% {
## ## foreach(t=1:dim(PAR)[4], .combine=c) %do% { #dim(PAR)[4]) %do% {
## ## a=ff(PAR[y,x,,t],Kd_490[y,x,,t],z_thk)
## ## sd[y,x,t] = 1.7/a
## ## ii = ii+1
## ## setTxtProgressBar(pb,ii)
## ## }
## ## }
## ## }
## ## a=as.vector(sd)
## ## a= apply(PAR[98:100,98:100,,],3,'[')
## ## ff = function(p,k,z_thk) {
## ## if (all(is.na(p))) return(NA)
## ## wet=max(which(diff(p)>0))
## ## zz=wet
## ## while(p[zz]>0.1827*p[wet]) zz=zz-1
## ## zz
## ## }
## ## a = apply(list(PAR[99:100,99:100,,1:5],Kd_490[99:100,99:100,,1:5]),
## ## MARGIN=c(1,2,3,4,5),
## ## function(x) print(x))
## ## a =apply(PAR[99:100,99:100,,1:5],MARGIN=c(1,2,4),FUN=ff,k=Kd_490[99:100,99:100,,],z_thk=z_thk)
## ## Kd_490[99:100,99:100,,1:5]
## ## a = Map(ff,p=PAR[99:100,99:100,,],k=Kd_490[99:100,99:100,,],z_thk=z_thk)
## ## a=apply(PAR[,,,],c(1,2,4),ff,z_thk=z_thk,Kd_490)
## ## kdmean = sum(z_thk[(zz-1):wet]*Kd_490[(zz-1):wet])/sum(z_thk[(zz-1):wet]))
## dat = dat %>% mutate(Value=Value)
## }
if (m=='nap') dat = dat %>% mutate(Value=Value*1000)
dat = dat %>% filter(!is.na(Latitude),!is.na(Longitude))
coordinates(dat) = ~Longitude+Latitude
pts=sp:::over(dat,Polys)
#pts = pts[!is.na(pts)]
foreach (z=1:length(names(Polys))) %do% {
pts.1 = if_else(is.na(pts),FALSE,if_else(pts==z,TRUE,FALSE))
ereefs=as.data.frame(dat[pts.1,])
save(ereefs,file=paste0('eReefs/data/raw/ereefs_',m,'__',year,'__',month,'___',names(Polys)[[z]],'.RData'))
}
}
}
#load(paste0('eReefs/data/raw/ereefs_',sd,'__',2014,'__',month,'___',names(Polys)[[z]],'.RData'))
## ## Get the 2010-2014 926 control run data=====================================================================
## #flist <- list.files(path = "/export/databases2/ereefs/gbr4_bgc_926/", pattern = "^.*\\.(nc|NC|Nc|Nc)$")
## flist <- list.files(path = "data/eReefs926", pattern = "^.*\\.(nc|NC|Nc|Nc)$")
## flist
## ##Chl: Chl_a_sum, Nap: EFI, SD: 1.7/KD490
## meas.ereef = c('Chl_a_sum','EFI','Kd_490','NO3')
## meas = c('chl','nap','sd','NOx')
## registerDoParallel(cores=10)
## foreach(i=1:length(flist)) %dopar% {
## year = gsub('.*_all_([0-9]{4}).*','\\1',flist[i])
## month = gsub('.*_all_[0-9]{4}-([0-9]{2}).*','\\1',flist[i])
## #nc <- nc_open('data/eReefs926/gbr4_bgc_simple_2012-06.nc')
## #nc <- nc_open(paste0("/export/databases2/ereefs/gbr4_bgc_926/", flist[i]))
## nc <- nc_open(paste0("data/eReefs926/", flist[i]))
## foreach(m=meas[-3]) %do% {
## m.e = meas.ereef[which(meas==m)]
## ## get lat and long
## dat = data.frame(Latitude=as.vector(ncvar_get(nc,'y_centre', start=c(1,1),count=c(-1,-1))),
## Longitude=as.vector(ncvar_get(nc,'x_centre', start=c(1,1),count=c(-1,-1))))
## tm=ncvar_get(nc,'t', start=c(1),count=c(-1))
## tm = as.Date(tm, origin='1990-01-01')
## dat = dat %>% expand(tm,dat)
## dat = dat %>% mutate(Measure=m,Value=as.vector(ncvar_get(nc,c(m.e), start=c(1,1,44,1),count=c(-1,-1,1,-1))))
## if (m=='sd') {
## z_grid=ncvar_get(nc,'z_grid')
## z_thk=diff(z_grid)
## Kd_490=ncvar_get(nc,'Kd_490', start=c(1,1,1,1),count=c(-1,-1,-1,-1))
## PAR=ncvar_get(nc,'PAR', start=c(1,1,1,1),count=c(-1,-1,-1,-1))
## ff = function(p,k,z_thk) {
## if (all(is.na(p)) | sum(p,na.rm=TRUE)==0) return(NA)
## wet=max(which(diff(p)>0))+1
## zz=wet
## if (all(is.infinite(zz))) return(NA)
## while(!is.na(p[zz]) & p[zz]>0.1827*p[wet]) zz=zz-1
## sum(z_thk[(zz-1):wet]*k[(zz-1):wet], na.rm=TRUE)/sum(z_thk[(zz-1):wet],na.rm=TRUE)
## }
## sd = array(0, dim=dim(PAR)[c(1,2,4)])
## pb <- txtProgressBar(max=length(sd), style=3)
## ii = 0
## foreach(y=1:dim(PAR)[1],.combine=c) %do% { #dim(PAR)[1]) %do% {
## foreach(x=1:dim(PAR)[2],.combine=c) %do% { #dim(PAR)[2]) %do% {
## foreach(t=1:dim(PAR)[4], .combine=c) %do% { #dim(PAR)[4]) %do% {
## a=ff(PAR[y,x,,t],Kd_490[y,x,,t],z_thk)
## sd[y,x,t] = 1.7/a
## ii = ii+1
## setTxtProgressBar(pb,ii)
## }
## }
## }
## Value=as.vector(sd)
## close(pb)
## dat = dat %>% mutate(Value=Value)
## }
## if (m=='nap') dat = dat %>% mutate(Value=Value*1000)
## dat = dat %>% filter(!is.na(Latitude),!is.na(Longitude))
## coordinates(dat) = ~Longitude+Latitude
## pts=sp:::over(dat,Polys)
## #pts = pts[!is.na(pts)]
## foreach (z=1:length(names(Polys))) %do% {
## pts.1 = if_else(is.na(pts),FALSE,if_else(pts==z,TRUE,FALSE))
## ereefs=as.data.frame(dat[pts.1,])
## save(ereefs,file=paste0('eReefs/data/raw/ereefs_',m,'__',year,'__',month,'___',names(Polys)[[z]],'.RData'))
## }
## }
## }
## ff = function(x) {
## wet=max(which(diff(x)>0))+1
## if (is.infinite(wet)) return(NA)
## zz=wet
## while(x[zz]>0.1827*x[wet]) zz = zz-1
## return(zz)
## }
## nc <- nc_open(paste0("/export/databases2/ereefs/", flist[i]))
## dat1 = data.frame(Latitude=as.vector(ncvar_get(nc,'y_centre', start=c(50,50),count=c(1,1))),
## Longitude=as.vector(ncvar_get(nc,'x_centre', start=c(50,50),count=c(1,1))))
## (Kd_490=as.vector(ncvar_get(nc,'Kd_490', start=c(50,50,1,1),count=c(1,1,-1,1))))
## (PAR=as.vector(ncvar_get(nc,'PAR', start=c(50,50,1,1),count=c(1,1,-1,1))))
## z_grid=ncvar_get(nc,'z_grid')
## z_thk=diff(z_grid)
## (wet=max(which(diff(PAR)>0))+1)
## zz=wet
## while(PAR[zz]>0.1827*PAR[wet]) zz = zz-1
## (kdmean = sum(z_thk[(zz-1):wet]*Kd_490[(zz-1):wet])/sum(z_thk[(zz-1):wet]))
if (1==2) {
## This first bit is a visual of the first day of the month
lat=as.vector(ncvar_get(nc,'y_centre', start=c(1,1),count=c(-1,-1)))
#lat=as.vector(ncvar_get(nc,'y_grid', start=c(1,1),count=c(-1,-1)))
lon=as.vector(ncvar_get(nc,'x_centre', start=c(1,1),count=c(-1,-1)))
tm=ncvar_get(nc,'t', start=c(1),count=c(1))
tm = as.Date(tm, origin='1990-01-01')
chl=as.vector(ncvar_get(nc,c('Chl_a_sum'), start=c(1,1,44,1),count=c(-1,-1,1,1)))
dat = data.frame(Date=tm, Latitude=lat, Longitude=lon, Chl=chl)
#dat %>% filter(Date==Date[1]) %>%
#ggplot(aes(y=Latitude,x=Longitude)) + geom_tile(aes(fill=Chl))
ggplot(dat, aes(y=Latitude,x=Longitude)) + geom_point(aes(color=Chl), size=1) + scale_color_gradientn(colors=(heat.colors(10))) + coord_equal()
#ggplot(dat, aes(y=Latitude,x=Longitude)) + geom_point(aes(color=Chl), size=0.001) + coord_equal()
## Unfortunately, it is an irregular grid.. This means that we cannot simply put it into a raster...
dat = dat %>% filter(!is.na(Latitude),!is.na(Longitude))
coordinates(dat) = ~Longitude+Latitude
pts=sp:::over(dat,Polys)
gg=list()
for (i in 1:length(names(Polys))) {
print(i)
pts.1 = if_else(is.na(pts),FALSE,if_else(pts==i,TRUE,FALSE))
a=dat[pts.1,]
a=as.data.frame(dat[pts.1,]) %>% filter(!is.na(Chl))
e = with(a, c(min(Longitude), max(Longitude), min(Latitude), max(Latitude)))
ed1 = e[2]-e[1]
ed2= e[4]-e[3]
if (ed2>ed1) {
e[1:2] = mean(e[1:2]) + c(-1,1)*(e[4]-e[3])/2
}else {
e[3:4] = mean(e[3:4]) + c(-1,1)*(e[2]-e[1])/2
}
gg[[i]]=ggplot(a, aes(y=Latitude,x=Longitude)) +
geom_polygon(data=Polys.df, aes(y=lat, x=long, group=group), fill=NA, color='grey') +
geom_polygon(data=qld.df, aes(y=lat, x=long, group=group)) +
geom_point(aes(color=Chl)) +
# geom_raster(aes(fill=Chl), interpolate = TRUE) +
#stat_summary_2d(geom='tile', aes(z=Chl), binwidth=0.03)+
theme_classic() + theme(panel.background = element_rect(color='black',fill='lightblue')) +
coord_equal(, xlim=e[1:2], ylim=e[3:4]) +
scale_color_gradientn(colors=heat.colors(10))+
ggtitle(paste0(names(Polys)[[i]],', ',year,'-',month,'-',01))
ggsave(gg[[i]], file=paste0('eReefs/data/raw/locations_',year,'__',month,'___',names(Polys)[[i]],'.pdf'))
}
library(akima)
library(ggplot2)
my.df.interp <- with(a %>% filter(!is.na(Latitude),!is.na(Longitude), !is.na(Chl)), interp(x = Longitude, y = Latitude, z = Chl, nx = 30, ny = 30) )
my.df.interp.xyz <- as.data.frame(interp2xyz(my.df.interp))
names(my.df.interp.xyz) <- c("Longitude", "Latitude", "Chl")
ggplot(my.df.interp.xyz, aes(y=Latitude,x=Longitude)) + geom_tile(aes(fill=Chl))
pts.1 = if_else(is.na(pts),FALSE,if_else(pts==1,TRUE,FALSE))
a=dat[pts.1,]
a=as.data.frame(dat[pts.1,])
ggplot(a, aes(y=Latitude,x=Longitude)) + geom_point(aes(color=Chl), size=1) + coord_equal()
pts.2 = if_else(is.na(pts),FALSE,if_else(pts==2,TRUE,FALSE))
a=as.data.frame(dat[pts.2,])
ggplot(a, aes(y=Latitude,x=Longitude)) + geom_point(aes(color=Chl), size=1) + coord_equal()
pts.3 = if_else(is.na(pts),FALSE,if_else(pts==3,TRUE,FALSE))
a=as.data.frame(dat[pts.3,])
ggplot(a, aes(y=Latitude,x=Longitude)) + geom_point(aes(color=Chl), size=1) + coord_equal()
pts.4 = if_else(is.na(pts),FALSE,if_else(pts==4,TRUE,FALSE))
a=as.data.frame(dat[pts.4,])
ggplot(a, aes(y=Latitude,x=Longitude)) + geom_point(aes(color=Chl), size=1) + coord_equal()
pts.5 = if_else(is.na(pts),FALSE,if_else(pts==5,TRUE,FALSE))
a=as.data.frame(dat[pts.5,])
ggplot(a, aes(y=Latitude,x=Longitude)) + geom_point(aes(color=Chl), size=1) + coord_equal()
pts.6 = if_else(is.na(pts),FALSE,if_else(pts==6,TRUE,FALSE))
a=as.data.frame(dat[pts.6,])
ggplot(a, aes(y=Latitude,x=Longitude)) + geom_point(aes(color=Chl), size=1) + coord_equal()
pts.7 = if_else(is.na(pts),FALSE,if_else(pts==7,TRUE,FALSE))
a=as.data.frame(dat[pts.7,])
ggplot(a, aes(y=Latitude,x=Longitude)) + geom_point(aes(color=Chl), size=1) + coord_equal()
pts.8 = if_else(is.na(pts),FALSE,if_else(pts==8,TRUE,FALSE))
a=as.data.frame(dat[pts.8,])
ggplot(a, aes(y=Latitude,x=Longitude)) + geom_point(aes(color=Chl), size=1) + coord_equal()
pts.9 = if_else(is.na(pts),FALSE,if_else(pts==9,TRUE,FALSE))
a=as.data.frame(dat[pts.9,])
ggplot(a, aes(y=Latitude,x=Longitude)) + geom_point(aes(color=Chl), size=1) + coord_equal()
pts.10 = if_else(is.na(pts),FALSE,if_else(pts==10,TRUE,FALSE))
a=as.data.frame(dat[pts.10,])
ggplot(a, aes(y=Latitude,x=Longitude)) + geom_point(aes(color=Chl), size=1) + coord_equal()
pts.11 = if_else(is.na(pts),FALSE,if_else(pts==11,TRUE,FALSE))
a=as.data.frame(dat[pts.11,])
ggplot(a, aes(y=Latitude,x=Longitude)) + geom_point(aes(color=Chl), size=1) + coord_equal()
pts.12 = if_else(is.na(pts),FALSE,if_else(pts==12,TRUE,FALSE))
a=as.data.frame(dat[pts.12,])
ggplot(a, aes(y=Latitude,x=Longitude)) + geom_point(aes(color=Chl), size=1) + coord_equal()
pts.13 = if_else(is.na(pts),FALSE,if_else(pts==13,TRUE,FALSE))
a=as.data.frame(dat[pts.13,])
ggplot(a, aes(y=Latitude,x=Longitude)) + geom_point(aes(color=Chl), size=5,shape=16) + coord_equal()
pts.24 = if_else(is.na(pts),FALSE,if_else(pts==24,TRUE,FALSE))
a=as.data.frame(dat[pts.24,])
ggplot(a, aes(y=Latitude,x=Longitude)) + geom_point(aes(color=Chl), size=3) + coord_equal()
spdf <- with(dat %>% filter(!is.na(Latitude),!is.na(Longitude)), SpatialPointsDataFrame( data.frame( x = Longitude , y = Latitude ) , data = data.frame( z = Chl ) ))
r <- raster( nrows = 600 , ncols = 180 , ext = extent(spdf) )
rf <- rasterize( spdf , r , field = "z" , fun = mean )
plot(rf)
d = data.frame(rasterToPoints(rf))
ggplot(d, aes(y=y, x=x)) + geom_tile(aes(fill=layer))
r = raster(ncols=600,nrows=180)
d = dat %>% filter(Date==Date[1],!is.na(Latitude),!is.na(Longitude))
coordinates(d) <- ~Longitude+Latitude
r = rasterize(d,r, fun='mean')
r = dat %>% rename(x=Longitude,y=Latitude,z=Chl) %>%
rasterFromXYZ()
library(sp)
spdf <- with(dat %>% filter(!is.na(Latitude),!is.na(Longitude)), SpatialPointsDataFrame( data.frame( x = Longitude , y = Latitude ) , data = data.frame( z = Chl ) ))
plot(spdf, cex=0.001)
library(raster)
e <- extent(spdf)
b <- brick(paste0("/media/murray/Seagate Expansion Drive/", flist[2]),varname='Chl_a_sum', level=47)
b = t(b)
b=flip(b,'x')
extent(b) <- e
nlayers(b)
names(b)
plot(b)
plot(b[[1]])
d = rasterToPoints(b[[1]])
ggplot(data.frame(d), aes(y=y, x=x)) + geom_tile(aes(fill=X1))
chl=ncvar_get(nc,c('Chl_a_sum'), start=c(1,1,47,1),count=c(-1,-1,1,1))
nc <- nc_open(paste0("/media/murray/Seagate Expansion Drive/", flist[2]))
nc <- nc_open(paste0("/export/databases2/ereefs/", flist[2]))
print(nc)
attributes(nc)$names
nc$nvars
v1 <- nc$var[['y_grid']]
lat=ncvar_get(nc,v1, start=c(1,1),count=c(-1,-1))
lat=as.vector(ncvar_get(nc,'y_centre', start=c(1,1),count=c(-1,-1)))
lon=as.vector(ncvar_get(nc,'x_centre', start=c(1,1),count=c(-1,-1)))
tm=ncvar_get(nc,'t', start=c(1),count=c(1))
tm = as.Date(tm, origin='1990-01-01')
chl=as.vector(ncvar_get(nc,c('Chl_a_sum'), start=c(1,1,47,1),count=c(-1,-1,1,1)))
dat = data.frame(Date=tm, Latitude=lat, Longitude=lon, Chl=chl)
dat %>% filter(Date==Date[1]) %>%
ggplot(aes(y=Latitude,x=Longitude)) + geom_tile(aes(fill=Chl))
ggplot(dat, aes(y=Latitude,x=Longitude)) + geom_point(aes(color=Chl), size=1) + coord_equal()
ggplot(dat, aes(y=Latitude,x=Longitude)) + geom_point(aes(color=Chl), size=0.001) + coord_equal()
ggplot(dat, aes(y=Latitude,x=Longitude)) + geom_point(aes(color=Chl), size=1) + coord_equal(ratio=1/cos(mean(dat$Latitude,na.rm=TRUE)*pi/180))
r<-rasterFromXYZ(data.frame(spdf$x,spdf$y,spdf$z))
##=======
library(sp)
spdf <- with(dat %>% filter(!is.na(Latitude),!is.na(Longitude)), SpatialPointsDataFrame( data.frame( x = Longitude , y = Latitude ) , data = data.frame( z = Chl ) ))
plot(spdf, cex=0.001)
library(raster)
e <- extent(spdf)
# Determine ratio between x and y dimensions
ratio <- ( e@xmax - e@xmin ) / ( e@ymax - e@ymin )
# Create template raster to sample to
r<-rasterFromXYZ(data.frame(spdf$x,spdf$y,spdf$z))
r <- raster( nrows = 100 , ncols = floor( 100 * ratio ) , ext = extent(spdf) )
rf <- rasterize( spdf , r , field = "z" , fun = mean )
# Attributes of our new raster (# cells quite close to original data)
rf
# We can then plot this using `geom_tile()` or `geom_raster()`
rdf <- data.frame( rasterToPoints( rf ) )
ggplot( NULL ) + geom_raster( data = rdf , aes( x , y , fill = layer ) )
ggplot( NULL ) + geom_tile( data = rdf , aes( x , y , fill = layer ) )
b <- brick(paste0("/media/murray/Seagate Expansion Drive/", flist[2]),varname='Chl_a_sum', level=47)
b
nlayers(b)
names(b)
plot(b)
lat=raster(paste0("/media/murray/Seagate Expansion Drive/", flist[2]),varname='y_centre')
lon=raster(paste0("/media/murray/Seagate Expansion Drive/", flist[2]),varname='x_centre')
remap.tbl <- data.frame(coordinates(lon),
lon=as.vector(lon),lat=as.vector(lat))
ggplot(remap.tbl, aes(y=y, x=x)) + geom_point(size=0.0001)
ggplot(remap.tbl, aes(y=lat, x=lon)) + geom_point(size=0.0001)
library(akima)
remap.tbl = remap.tbl %>% filter(!is.na(lat), !is.na(lon))
lon.pts <- with(remap.tbl, seq(min(lon,na.rm=TRUE), max(lon,na.rm=TRUE), l=1000))
lat.pts <- with(remap.tbl, seq(min(lat,na.rm=TRUE), max(lat,na.rm=TRUE), l=1000))
x.pts<-interpp(remap.tbl$lon,remap.tbl$lat,remap.tbl$x,
xo=lon.pts,yo=lat.pts)
y.pts<-interpp(remap.tbl$lon,remap.tbl$lat,remap.tbl$y,
xo=lon.pts,yo=lat.pts)
temp <- extract(b[[1]],data.frame(x.pts$z,y.pts$z),method="bilinear")
d <- data.frame(lon=x.pts$z, lat=y.pts$z, temp)
head(d)
ggplot(d, aes(y=lat,x=lon)) + geom_tile(aes(fill=temp)) + coord_equal()
x.pts<-interp(remap.tbl$lon,remap.tbl$lat,remap.tbl$x,
xo=lon.pts,yo=lat.pts)
y.pts<-interpp(remap.tbl$lon,remap.tbl$lat,remap.tbl$y,
xo=lon.pts,yo=lat.pts)
#pts <- data.frame(lon=lon.pts,lat=lat.pts,x=x.pts$z,y=y.pts$z)
#ggplot(dat, aes(y=lat,x=long)) + geom_tile(aes(fill=Value)) + coord_equal()
temp <- extract(b[[1]],data.frame(as.vector(x.pts$z),as.vector(y.pts$z)),method="bilinear")
d <- data.frame(lon=as.vector(x.pts$z), lat=as.vector(y.pts$z), temp)
head(d)
ggplot(d, aes(y=lat,x=lon)) + geom_tile(aes(fill=temp)) + coord_equal()
geo.r <- raster(e)
res(geo.r) <- c(0.25,0.25)
r.coords.x <- interp(remap.tbl$lon,remap.tbl$lat,remap.tbl$x,
xo=xFromCol(geo.r),yo=yFromRow(geo.r))
r.coords.y <- interp(remap.tbl$lon,remap.tbl$lat,remap.tbl$y,
xo=xFromCol(geo.r),yo=yFromRow(geo.r))
r.coords <- expand.grid(lon=xFromCol(geo.r),
lat=yFromRow(geo.r))
r.coords$x <- as.vector(r.coords.x$z)
r.coords$y <- as.vector(r.coords.y$z)
r.coords.sp <- r.coords
coordinates(r.coords.sp) <- ~lat +lon
r.coords$temp <- extract(b[[1]],r.coords.sp,method="bilinear")
coordinates(remap.tbl) <- ~x +y
temp <- extract(b[[1]],data.frame(x.pts,y.pts),method="bilinear")
#temp <- extract(b[[1]],data.frame(r.coords$lon,r.coords$lat),method="simple")
d <- data.frame(lon=r.coords$lon, lat=r.coords$lat, temp)
ggplot(d, aes(y=lat,x=lon)) + geom_tile(aes(fill=temp)) + coord_equal()
geo.r <- setValues(geo.r,r.coords$temp)
plot(geo.r)
ggplot(r.coords, aes(y=lat,x=long)) + geom_tile(aes(fill=temp)) + coord_equal()
##=========
chl=ncvar_get(nc,c('Chl_a_sum'), start=c(1,1,47,1),count=c(-1,-1,1,1))
dat = data.frame(lat=nrow(chl):1, chl) %>% gather(long,Value,-lat) %>% mutate(long=as.numeric(str_sub(long,2)))
dat$lat = lat
dat$long=lon
ggplot(dat, aes(y=lat,x=long)) + geom_tile(aes(fill=Value)) + coord_equal()
ggplot(dat, aes(y=lat,x=long)) + geom_point(aes(color=Value), size=0.0001) + coord_equal()
dat = data.frame(Date=tm, Latitude=lat, Longitude=lon, Chl=chl)
dat %>% filter(Date==Date[1]) %>%
ggplot(aes(y=Latitude,x=Longitude)) + geom_tile(aes(fill=Chl))
a=ncvar_get(nc,c('Chl_a_sum'), start=c(1,1,47,1),count=c(-1,-1,1,-1))
image(a[,,1])
ncatt_get(nc, attributes(nc$var)$names[1])
chla_mean <- ncvar_get(nc, attributes(nc$var)$names[1])
attributes(nc$dim)$names
nc_lat <- ncvar_get( nc, attributes(nc$dim)$names[1])
nc_lon <- ncvar_get( nc, attributes(nc$dim)$names[2])
library(raster)
b <- brick(paste0("/media/murray/Seagate Expansion Drive/", flist[2]),varname='latitude')
}
|
cde5a0b448ef2f15505c5f2eaddb55b512fb4560 | c8d5e5e8eae8057746f3d72be582c30c1007f285 | /R/06_plot_hydroMet.R | 01474336b4041c1956d63769c0dd30c603ab2c05 | [] | no_license | cran/hydroToolkit | 2c3e3a42fe1cc1f745d5760642d02bf87bf6ea54 | 260c3b1b33cf195317f23d7c5635e6348bbb2472 | refs/heads/master | 2022-07-07T09:56:47.244814 | 2020-05-16T09:00:02 | 2020-05-16T09:00:02 | 264,451,856 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 121,744 | r | 06_plot_hydroMet.R | # **********************************************************
# Author : Ezequiel Toum
# Licence : GPL V3
# Institution : IANIGLA-CONICET
# e-mail : etoum@mendoza-conicet.gob.ar
# **********************************************************
#
#' Methods to easily use \code{ggplot2} or \code{plotly} (interactive)
#'
#' @description This method allows you to make plots (using simple and expressive arguments) of the variables contained inside an \code{hydroMet_XXX} object. The plot outputs can be static (\code{ggplot2}) or interactive (\code{plotly}).
#'
#' @param obj a valid \code{hydroMet_XXX} object.
#' @param slot_name string(s) with the name of the slot(s) to use in plotting.
#' @param col_number numeric (vector) with the column's variable to plot. In case you decide to merge slots you must provide a list in which each element contains the column numbers of the variable to plot.
#' @param interactive logical. Default value, \code{FALSE}, will return a \code{ggplot2} class object. Otherwise you will get a \code{plotly} one.
#' @param line_type string with line dash type (\code{ggplot2}) or mode in \code{plotly} case. \code{ggplot2}: \code{'solid'} (default value), \code{'twodash'}, \code{'longdash'}, \code{'dotted'}, \code{'dotdash'}, \code{'dashed'} or \code{'blank'}. \code{plotly}: \code{'lines'} (default value), \code{'lines+markers'} or \code{'markers'}.
#' @param line_color string with a valid \code{color}. See 'colors()' or \href{http://www.stat.columbia.edu/~tzheng/files/Rcolor.pdf}{Rcolor document}.
#' @param x_lab string with \code{x axis} label.
#' @param y_lab string with \code{y axis} label. In case you use \code{double_yaxis} argument you must supply both \code{c('ylab', 'y2lab')}.
#' @param title_lab string with the title of the plot. Default is a plot without title.
#' @param legend_lab string with plot label(s) name(s). \bold{NOTE}: \code{ggplot2} double_yaxis does not support \code{legend_lab} in this package version, so giving values to this argument will be harmfulness.
#' @param double_yaxis numeric vector with either \code{1} (= main axis - left) or \code{2} (= secondary axis - right) indicating whether the variable should be plotted in either left or right axis. \bold{NOTE}: in this package version \code{ggplot2} supports just one line plot for each 'y' axis.
#' @param list_extra list with the \code{ggplot2} argument to pass. This argument was design to allow the user to modify \code{ggplot2} arguments (you can find nice examples in \href{http://www.sthda.com/english/wiki/ggplot2-title-main-axis-and-legend-titles}{ggplot2 - Essentials}) \bold{NOTE}: in this package version this argument doesn't make sense for \code{plotly} (except for \code{scatter} plot in \code{hydroMet_compact} class).
#' @param from string (or \code{POSIXct} - valid only in 'BDHI' and 'IANIGLA') with the starting \code{Date}. You can use \code{'from'} without \code{'to'}. In this case you will subset your data 'from' till the end.
#' @param to string (or \code{POSIXct} - valid only in 'BDHI' and 'IANIGLA') with the ending \code{Date}. You can use \code{'to'} without \code{'from'}. In this case you will subset your data from the beginning till 'to'.
#' @param scatter numeric vector of length two with the column number to plot as scatter. The first variable (column number) will be the \code{'x'} variable and the second one the \code{'y'} variable. This argument will work just for class \code{hydroMet_compact}.
#'
#' @return A \code{ggplot2} or \code{plotly} objects to analyze your data.
#'
#' @import ggplot2
#' @importFrom plotly plot_ly add_trace layout %>% ggplotly
#' @importFrom reshape2 melt
#' @importFrom grDevices colors
#' @importFrom lubridate is.POSIXct
#'
#' @export
#'
#' @examples
#' # Path to file
#' dgi_path <- system.file('extdata', package = "hydroToolkit")
#' file_name <- list.files(path = dgi_path, pattern = 'Toscas')
#'
#' # Read Toscas
#' var_nom <- list(slotNames(x = 'hydroMet_DGI')[2:7])
#' names(var_nom) <- file_name
#'
#' # Load Toscas meteo station data
#' toscas_dgi <- create_hydroMet(class_name = 'DGI')
#' toscas_dgi <- build_hydroMet(obj = toscas_dgi, slot_list = var_nom, path = dgi_path)
#'
#' # Plot mean air temperature
#' plot_hydroMet(obj = toscas_dgi, col_number = 2, slot_name = 'tmean',
#' legend_lab = 'Tmean(ºC)' )
#'
#' # Now let's plot an interactive graph
#' plot_hydroMet(obj = toscas_dgi, col_number = 2, slot_name = 'tmean',
#' interactive = TRUE, y_lab = 'Tmean(ºC)' )
#'
## Generico
setGeneric(name = 'plot_hydroMet',
def = function(obj, slot_name, col_number, interactive = FALSE,
line_type = NULL, line_color = 'dodgerblue',
x_lab = 'Date', y_lab = 'y', title_lab = NULL,
legend_lab = NULL, double_yaxis = NULL,
list_extra = NULL, from = NULL, to = NULL, scatter = NULL)
{
standardGeneric('plot_hydroMet')
})
#' @describeIn plot_hydroMet plot method for BDHI class
## BDHI
setMethod(f = 'plot_hydroMet',
signature = 'hydroMet_BDHI',
definition = function(obj, slot_name, col_number, interactive = FALSE,
line_type = NULL, line_color = 'dodgerblue',
x_lab = 'Date', y_lab = 'y', title_lab = NULL,
legend_lab = NULL, double_yaxis = NULL,
list_extra = NULL, from = NULL, to = NULL)
{
###
# Datos de prueba
# setwd('/home/ezequiel/Documentos/CONICET/08-R_Packages/hydroToolsKit/Datos_prueba/BDHI')
# list.files()
# qmd <- read_BDHI(file = 'Qmd_Atuel_La_Angostura', colName = 'Qmd', timeStep = 'day' )
# hr <- read_BDHI(file = 'HR_Grande_Los_Mayines', colName = 'HR', timeStep = 'day/3' )
# patm <- read_BDHI(file = 'Patm_Laguna_Fea_Laguna_Fea', colName = 'Patm', timeStep = '4h')
# swe <- read_BDHI(file = 'EAN_Atuel_Laguna_Atuel', colName = 'swe', timeStep = 'day')
#
# prueba <- create_hydroMet(class_name = 'BDHI')
# obj <- set_hydroMet(obj = prueba, Qmd = qmd, hr = hr, patm = patm, swe = swe)
#
# slot_name <- c('Qmd')
# col_number <- 2
#
# double_yaxis <- NULL
# line_type <- 'lines'
# line_color <- 'dodgerblue'
# title_lab <- NULL
# legend_lab <- 'Qmd(m3/s)' # no tiene sentido en double axis
# x_lab <- 'Date'
# y_lab <- 'Q(m3/s)'
# from <- '1990-01-01'
# to <- '1995-12-31'
# list_extra <- list(
# theme(
# axis.title.x = element_text(color="blue", size=14, face="bold"),
# axis.title.y = element_text(color="#993333", size=14, face="bold") )
# )
###
# Condicionales
## slot_name
n_slot_name <- length(slot_name) # lo genero para comparar con long de otros argumentos
# slot_name: caracter
if( is.character(slot_name) == FALSE ){
return('slot_name argument must be of class character')
}
# slot_name: se corresponden con los slots
aux <- match(x = slot_name, table = slotNames('hydroMet_BDHI')[1:13])
if( is.na( sum(aux) ) == TRUE ){
return('Unless one of the slot_name arguments is incorrect')
}
rm(aux)
## col_number
col_position <- Reduce(f = c, x = col_number) # posicion de las columnas a plotear
n_col_number <- length( col_position ) # cantidad
# col_number: numerico o lista
if(n_slot_name == 1){
# col_number como vector numerico
if( is.numeric(col_number) == FALSE ){
return('col_number argument must be of class numeric')
}
} else {
# col_number como lista que contiene numeros
if( is.list(col_number) == FALSE ){
return('col_number must be of list class')
}
# contiene numeros?
if( is.numeric(Reduce(f = c, x = col_number) ) == FALSE ){
return('Each list element should contain numeric vectors')
}
}
# col_number: mayor o igual a uno
col_position <- as.integer(col_position) # coerciono a entero para evitar columnas decimales
if(length( which(col_position <= 1) ) >= 1){
return('col_number arguments to plot must be >= 1')
}
## interactive
# interactive: logico
if( is.logical(interactive) == FALSE){
return('interactive must be either TRUE or FALSE')
}
# interactive: uno solo
if( length(interactive) > 1 ){
return('interactive accepts a single value')
}
## line_type
n_line_type <- length(line_type)
# line_type: asigno valores / compruebo sus valores
if(n_line_type == 0) {
# asigno valores por defecto
if(interactive == FALSE){
# ggplot2
line_type <- rep('solid', n_col_number)
} else{
# plotly
line_type <- rep('lines', n_col_number)
}
} else {
# misma longitud que col_number
if( n_line_type != n_col_number ){
return('line_type must have the same length as col_number')
}
# long_type valido
if(interactive == FALSE){
# ggplot2
valid_line_type <- c('solid', 'twodash', 'longdash', 'dotted', 'dotdash', 'dashed', 'blank')
correspondencia <- match(x = line_type, table = valid_line_type)
if( is.na( sum(correspondencia) ) == TRUE ){
aux_var <- line_type[ which(is.na(correspondencia) ) ]
return( paste0(aux_var, ' ', 'is not a valid line_type for ggplot2 graph') )
}
} else {
# plotly
valid_line_type <- c('lines', 'lines+markers', 'markers')
correspondencia <- match(x = line_type, table = valid_line_type)
if( is.na( sum(correspondencia) ) == TRUE ){
aux_var <- line_type[ which(is.na(correspondencia) ) ]
return( paste0(aux_var, ' ', 'is not a valid line_type for plotly graph') )
}
}
}
## line_color
n_line_color <- length(line_color)
# line_color: misma longitud que col_number
if( n_line_color != n_col_number ){
return('line_color must be of the same length as col_number')
}
# line_color: caracter
if( is.character(line_color) == FALSE ){
return('line_color must be of character class')
}
# line_color: color valido
# paleta_match <- match(x = line_color, table = colors())
#
# if( is.na(sum(paleta_match) ) == T ) {
# aux_var <- line_color[ which(is.na(paleta_match) ) ]
# return( paste0(aux_var, ' ', 'is not a valid line_color. Find valid color names with colors()') )
#
# }
## x_lab
# x_lab: caracter
if( is.character(x_lab) == FALSE ){
return('x_lab must be of class character')
}
# x_lab: uno solo
if( length(x_lab) != 1){
return('x_lab must be of length one')
}
## y_lab
# y_lab: caracter
if( is.character(y_lab) == FALSE ){
return('y_lab must be of class character')
}
# y_lab: uno o dos
if( is.null(double_yaxis) == TRUE){
# eje simple
if( length(y_lab) != 1){
return('y_lab must be of length one')
}
} else {
# eje doble
if( length(y_lab) != 2){
return('y_lab must be of length two')
}
}
## title_lab
# title_lab: caracter unico
if( is.null(title_lab) == FALSE){
# caracter
if( is.character(title_lab) == FALSE ){
return('title_lab argument must be of character class')
}
# unico
if( length(title_lab) != 1 ){
return('title_lab length must be one')
}
}
## legend_lab
if( is.null(legend_lab) == FALSE ){
n_legend_lab <- length(legend_lab)
# legend_lab: caracter
if( is.character(legend_lab) == FALSE ){
return('legend_lab must be of class character')
}
# legend_lab: cantidad
if( n_col_number != n_legend_lab){
return('You must provide as many legend_lab strings as line plots')
}
}
## double_yaxis
if( is.null(double_yaxis) == FALSE){
n_double_yaxis <- length(double_yaxis)
# double_yaxis: numerico
if( is.numeric(double_yaxis) == FALSE){
return('double_axis argument must be of numeric class')
}
# double_yaxis: cantidad
if( interactive == FALSE){
# ggplot2
if( n_double_yaxis != 2 ){
return('In interactive = FALSE double_yaxis arguments only allows a numeric vector of length two')
}
} else {
# plotly
if(n_double_yaxis != n_col_number){
return('double_yaxis numeric vector argument must be of the same length as col_number')
}
}
# double_yaxis: 1 y 2
target_nums <- c(1, 2)
match_nums <- match(x = double_yaxis, table = target_nums)
if( is.na( sum(match_nums) ) == TRUE ){
return('Only 1 and 2 are allow as arguments in double_yaxis')
}
}
## list_extra
if( is.null(list_extra) == FALSE ){
if( interactive == FALSE){
# ggplot2
# list_extra: lista
if( is.list(list_extra) == FALSE){
return('list_extra argument must be of list class')
}
# list_extra: longitud
# if(length(list_extra) != 1){
# return('list_extra should contain a single element list')
# }
} else {
# plotly
print('list_extra argument does not make sense if interactive = TRUE')
}
}# fin list_extra
## from
if( is.null(from) == FALSE){
# from: caracter
if( is.character(from) == FALSE & is.POSIXct(from) == FALSE){
return('from must be of class character or POSIXct')
}
# from: uno solo
if( length(from) != 1){
return('from must be of length one')
}
}
## to
if( is.null(to) == FALSE){
# to: caracter
if( is.character(to) == FALSE & is.POSIXct(from) == FALSE){
return('to must be of class character or POSIXct')
}
# to: uno solo
if( length(to) != 1){
return('to must be of length one')
}
}
# fin condicionales
###
#********************
# Binding variables
#********************
Date <- value <- NULL
#********************
## Obtener los slots de interes
all_slots <- get_hydroMet(obj = obj, name = slot_name)
# condicionar que el numero(s) de columna exista dentro de cada slot
target_max_col <- sapply(X = all_slots, FUN = ncol)
if(n_slot_name == 1){
# un slot
if(max(col_number) > target_max_col){
return('Unless one of the col_number does not exist in the slot')
}
} else {
# varios slots
for(i in 1:n_slot_name){
aux_col_num <- col_number[[i]]
if(max(aux_col_num) > target_max_col[i]){
return( paste0('Unless one of the col_number (', slot_name[i], ') does not exist in the slot') )
}
}# fin for
}
## Armar el un data frame para graficar (df_plot) de acuerdo a las columnas seleccionadas
# Verifico resolucion temporal
N_all_slots <- length(all_slots)
if(N_all_slots > 1){
unidades <- rep(NA_character_, N_all_slots) # que las unidades temporales sean las mismas
paso_tpo <- rep(NA_character_, N_all_slots) # que el paso de tiempo sea el mismo
for(i in 1:N_all_slots){
unidades[i] <- units( diff.Date( all_slots[[i]][ , 1] ) )
paso_tpo[i] <- length(unique( diff.Date( all_slots[[i]][ , 1] ) ) )
}# fin for
if( length( unique(unidades)) != 1 ){
return('the variables must have the same temporal resolution')
}
if( unique(paso_tpo) != 1 ){
return('the variables must have the same temporal resolution')
}
} # fin if
# Extraigo las variables de interes de cada slot
if(N_all_slots > 1){
# en este caso col_number es una lista
df_plot <- all_slots[[1]][ , c(1, col_number[[1]] )]
for(i in 2:N_all_slots){
df_aux <- all_slots[[i]][ , c(1, col_number[[i]] )]
df_plot <- merge(df_plot, df_aux, all = TRUE)
}
} else {
# solo un slot
df_plot <- all_slots[[1]][ , c(1, col_number)]
}
# En caso de ser necesario aplico subset al data frame
if( is.null(from) == FALSE & is.null(to) == FALSE){
df_plot <- subset(df_plot, subset = Date >= from & Date <= to)
} else if( is.null(from) == FALSE ) {
df_plot <- subset(df_plot, subset = Date >= from)
} else if( is.null(to) == FALSE) {
df_plot <- subset(df_plot, subset = Date <= to)
}
###
## ggplot2 o plotly? => esto define la sintaxis a usar
if( interactive == FALSE ){
## ggplot2
# Doble eje y?
if( is.null(double_yaxis) == TRUE){
# un solo eje y
# Armo df_plot2 con las columnas
N_plot <- nrow(df_plot)
N_var <- ncol(df_plot) - 1
if( is.null(legend_lab) == FALSE ){
tipo_linea <- list()
color_linea <- list()
leyen_linea <- list()
for(i in 1:N_var){
tipo_linea[[i]] <- rep(line_type[i], N_plot)
color_linea[[i]] <- rep(line_color[i], N_plot)
leyen_linea[[i]] <- rep(legend_lab[i], N_plot)
}
linea <- c(sapply(X = tipo_linea, '['))
color <- c(sapply(X = color_linea, '['))
leyen <- c(sapply(X = leyen_linea, '['))
df_plot2 <- melt(data = df_plot, id.vars = 'Date')
df_plot2 <- cbind(df_plot2, linea, color, leyen)
} else {
tipo_linea <- list()
color_linea <- list()
for(i in 1:N_var){
tipo_linea[[i]] <- rep(line_type[i], N_plot)
color_linea[[i]] <- rep(line_color[i], N_plot)
}
linea <- c(sapply(X = tipo_linea, '['))
color <- c(sapply(X = color_linea, '['))
df_plot2 <- melt(data = df_plot, id.vars = 'Date')
leyen <- df_plot2$variable
df_plot2 <- cbind(df_plot2, linea, color, leyen)
}
# Grafico
ggout <-
ggplot(data = df_plot2, aes(x = Date, y = value, color = leyen) ) +
geom_line(aes(linetype = leyen) ) +
scale_color_manual(values = line_color) +
scale_linetype_manual(values = line_type) +
theme(legend.title = element_blank()) +
ggtitle(label = title_lab) +
xlab(x_lab) + ylab(y_lab)
# agrego list_extra en caso de ser necesario
if( is.null(list_extra) == FALSE){
ggout <- ggout +
Reduce(f = c, x = list_extra)
}
# Salida
return(ggout)
} else {
# doble eje
# Fuente: http://rstudio-pubs-static.s3.amazonaws.com/329613_f53e84d1a18840d5a1df55efb90739d9.html
# obtengo posicion de las series en eje principal
main_pos <- which(double_yaxis == 1) + 1 # vector con numero de columna
seco_pos <- which(double_yaxis == 2) + 1 # idem
# especifico nombre de las variables para escalar
y1 <- colnames(df_plot)[main_pos[1]] # principal
y2 <- colnames(df_plot)[seco_pos[1]] # secundario
# extraigo nombres para plotear
y1_plot <- colnames(df_plot)[main_pos]
y2_plot <- colnames(df_plot)[seco_pos]
# genero matriz
m_plot <- as.matrix(x = df_plot[ , -1])
# reescalo el eje y secundario:
# - substraigo su valor minimo (para que empiece en cero)
# - escalo para que tenga el mismo rango del eje y principal
# - sumo el minimo valor de y1
a <- range(df_plot[[y1]], na.rm = TRUE)
b <- range(df_plot[[y2]], na.rm = TRUE)
scale_factor <- diff(a)/diff(b)
m_plot[ , (seco_pos - 1)] <- ( (m_plot[ , (seco_pos - 1)] - b[1]) * scale_factor) + a[1]
# formula para transformar el eje y secundario
trans <- ~ ((. - a[1]) / scale_factor) + b[1]
# genero un df_plot2 con los valores reescalados
df_plot2 <- data.frame(df_plot[ , 1], m_plot)
colnames(df_plot2) <- colnames(df_plot)
# grafico
ggout <-
ggplot(df_plot2) +
geom_line(aes_string('Date', y1_plot), col = line_color[ (main_pos - 1) ], lty = line_type[ (main_pos - 1) ] ) +
geom_line(aes_string('Date', y2_plot), col = line_color[ (seco_pos - 1) ], lty = line_type[ (seco_pos - 1) ] ) +
ggtitle(label = title_lab) +
xlab(x_lab) + ylab(y_lab[1]) +
scale_y_continuous(sec.axis = sec_axis(trans = trans, name = y_lab[2]))
# agrego list_extra en caso de ser necesario
if( is.null(list_extra) == FALSE){
ggout <- ggout +
Reduce(f = c, x = list_extra)
}
# Salida
return(ggout)
}
} else {
## plotly
# Doble eje y?
if( is.null(double_yaxis) == TRUE ){
# y simple
# Armo sentencia basica
ppout <- plot_ly(df_plot, x = ~Date)
N_plots <- ncol(df_plot) - 1
# genero graficos sin etiquetas en los ejes
for(i in 1:N_plots){
ppout <- ppout %>%
add_trace(y = df_plot[ , (i + 1)], name = legend_lab[i], type = 'scatter', mode = line_type[i], color = I(line_color[i]) )
}# fin for
# agrego etiquetas
ppout <-
ppout %>%
layout(title = title_lab,
xaxis = list(title = x_lab),
yaxis = list(title = y_lab) )
# Salida
return(ppout)
} else {
# y doble
# Armo sentencia basica
ppout <- plot_ly(df_plot, x = ~Date)
N_plots <- ncol(df_plot) - 1
# genero graficos sin etiquetas en los ejes
for(i in 1:N_plots){
if(double_yaxis[i] == 1){
# a eje principal
ppout <- ppout %>%
add_trace(y = df_plot[ , (i + 1)], name = legend_lab[i], type = 'scatter', mode = line_type[i], color = I(line_color[i]) )
} else if (double_yaxis[i] == 2){
# a eje secundario
ppout <- ppout %>%
add_trace(y = df_plot[ , (i + 1)], name = legend_lab[i], type = 'scatter', mode = line_type[i], color = I(line_color[i]), yaxis = 'y2')
}
}# fin for
# agrego etiquetas
ppout <-
ppout %>%
layout(title = title_lab,
xaxis = list(title = x_lab),
yaxis = list(title = y_lab[1]),
yaxis2 = list(title = y_lab[2],
overlaying = 'y',
side = 'right') )
# Salida
return(ppout)
}
} # fin plotly
} # fin funcion
)
#' @describeIn plot_hydroMet plot method for CR2 class
## CR2
setMethod(f = 'plot_hydroMet',
signature = 'hydroMet_CR2',
definition = function(obj, slot_name, col_number, interactive = FALSE,
line_type = NULL, line_color = 'dodgerblue',
x_lab = 'Date', y_lab = 'y', title_lab = NULL,
legend_lab = NULL, double_yaxis = NULL,
list_extra = NULL, from = NULL, to = NULL)
{
# Condicionales
## slot_name
n_slot_name <- length(slot_name) # lo genero para comparar con long de otros argumentos
# slot_name: caracter
if( is.character(slot_name) == FALSE ){
return('slot_name argument must be of class character')
}
# slot_name: se corresponden con los slots
aux <- match(x = slot_name, table = slotNames('hydroMet_CR2')[1:4])
if( is.na( sum(aux) ) == TRUE ){
return('Unless one of the slot_name arguments is incorrect')
}
rm(aux)
## col_number
col_position <- Reduce(f = c, x = col_number) # posicion de las columnas a plotear
n_col_number <- length( col_position ) # cantidad
# col_number: numerico o lista
if(n_slot_name == 1){
# col_number como vector numerico
if( is.numeric(col_number) == FALSE ){
return('col_number argument must be of class numeric')
}
} else {
# col_number como lista que contiene numeros
if( is.list(col_number) == FALSE ){
return('col_number must be of list class')
}
# contiene numeros?
if( is.numeric(Reduce(f = c, x = col_number) ) == FALSE ){
return('Each list element should contain numeric vectors')
}
}
# col_number: mayor o igual a uno
col_position <- as.integer(col_position) # coerciono a entero para evitar columnas decimales
if(length( which(col_position <= 1) ) >= 1){
return('col_number arguments to plot must be >= 1')
}
## interactive
# interactive: logico
if( is.logical(interactive) == FALSE){
return('interactive must be either TRUE or FALSE')
}
# interactive: uno solo
if( length(interactive) > 1 ){
return('interactive accepts a single value')
}
## line_type
n_line_type <- length(line_type)
# line_type: asigno valores / compruebo sus valores
if(n_line_type == 0) {
# asigno valores por defecto
if(interactive == FALSE){
# ggplot2
line_type <- rep('solid', n_col_number)
} else{
# plotly
line_type <- rep('lines', n_col_number)
}
} else {
# misma longitud que col_number
if( n_line_type != n_col_number ){
return('line_type must have the same length as col_number')
}
# long_type valido
if(interactive == FALSE){
# ggplot2
valid_line_type <- c('solid', 'twodash', 'longdash', 'dotted', 'dotdash', 'dashed', 'blank')
correspondencia <- match(x = line_type, table = valid_line_type)
if( is.na( sum(correspondencia) ) == TRUE ){
aux_var <- line_type[ which(is.na(correspondencia) ) ]
return( paste0(aux_var, ' ', 'is not a valid line_type for ggplot2 graph') )
}
} else {
# plotly
valid_line_type <- c('lines', 'lines+markers', 'markers')
correspondencia <- match(x = line_type, table = valid_line_type)
if( is.na( sum(correspondencia) ) == TRUE ){
aux_var <- line_type[ which(is.na(correspondencia) ) ]
return( paste0(aux_var, ' ', 'is not a valid line_type for plotly graph') )
}
}
}
## line_color
n_line_color <- length(line_color)
# line_color: misma longitud que col_number
if( n_line_color != n_col_number ){
return('line_color must be of the same length as col_number')
}
# line_color: caracter
if( is.character(line_color) == FALSE ){
return('line_color must be of character class')
}
# line_color: color valido
# paleta_match <- match(x = line_color, table = colors())
#
# if( is.na(sum(paleta_match) ) == T ) {
# aux_var <- line_color[ which(is.na(paleta_match) ) ]
# return( paste0(aux_var, ' ', 'is not a valid line_color. Find valid color names with colors()') )
#
# }
## x_lab
# x_lab: caracter
if( is.character(x_lab) == FALSE ){
return('x_lab must be of class character')
}
# x_lab: uno solo
if( length(x_lab) != 1){
return('x_lab must be of length one')
}
## y_lab
# y_lab: caracter
if( is.character(y_lab) == FALSE ){
return('y_lab must be of class character')
}
# y_lab: uno o dos
if( is.null(double_yaxis) == TRUE){
# eje simple
if( length(y_lab) != 1){
return('y_lab must be of length one')
}
} else {
# eje doble
if( length(y_lab) != 2){
return('y_lab must be of length two')
}
}
## title_lab
# title_lab: caracter unico
if( is.null(title_lab) == FALSE){
# caracter
if( is.character(title_lab) == FALSE ){
return('title_lab argument must be of character class')
}
# unico
if( length(title_lab) != 1 ){
return('title_lab length must be one')
}
}
## legend_lab
if( is.null(legend_lab) == FALSE ){
n_legend_lab <- length(legend_lab)
# legend_lab: caracter
if( is.character(legend_lab) == FALSE ){
return('legend_lab must be of class character')
}
# legend_lab: cantidad
if( n_col_number != n_legend_lab){
return('You must provide as many legend_lab strings as line plots')
}
}
## double_yaxis
if( is.null(double_yaxis) == FALSE){
n_double_yaxis <- length(double_yaxis)
# double_yaxis: numerico
if( is.numeric(double_yaxis) == FALSE){
return('double_axis argument must be of numeric class')
}
# double_yaxis: cantidad
if( interactive == FALSE){
# ggplot2
if( n_double_yaxis != 2 ){
return('In interactive = FALSE double_yaxis arguments only allows a numeric vector of length two')
}
} else {
# plotly
if(n_double_yaxis != n_col_number){
return('double_yaxis numeric vector argument must be of the same length as col_number')
}
}
# double_yaxis: 1 y 2
target_nums <- c(1, 2)
match_nums <- match(x = double_yaxis, table = target_nums)
if( is.na( sum(match_nums) ) == TRUE ){
return('Only 1 and 2 are allow as arguments in double_yaxis')
}
}
## list_extra
if( is.null(list_extra) == FALSE ){
if( interactive == FALSE){
# ggplot2
# list_extra: lista
if( is.list(list_extra) == FALSE){
return('list_extra argument must be of list class')
}
# list_extra: longitud
# if(length(list_extra) != 1){
# return('list_extra should contain a single element list')
# }
} else {
# plotly
print('list_extra argument does not make sense if interactive = TRUE')
}
}# fin list_extra
## from
if( is.null(from) == FALSE){
# from: caracter
if( is.character(from) == FALSE ){
return('from must be of class character')
}
# from: uno solo
if( length(from) != 1){
return('from must be of length one')
}
}
## to
if( is.null(to) == FALSE){
# to: caracter
if( is.character(to) == FALSE ){
return('to must be of class character')
}
# to: uno solo
if( length(to) != 1){
return('to must be of length one')
}
}
# fin condicionales
###
#********************
# Binding variables
#********************
Date <- value <- NULL
#********************
## Obtener los slots de interes
all_slots <- get_hydroMet(obj = obj, name = slot_name)
# condicionar que el numero(s) de columna exista dentro de cada slot
target_max_col <- sapply(X = all_slots, FUN = ncol)
if(n_slot_name == 1){
# un slot
if(max(col_number) > target_max_col){
return('Unless one of the col_number does not exist in the slot')
}
} else {
# varios slots
for(i in 1:n_slot_name){
aux_col_num <- col_number[[i]]
if(max(aux_col_num) > target_max_col[i]){
return( paste0('Unless one of the col_number (', slot_name[i], ') does not exist in the slot') )
}
}# fin for
}
## Armar el un data frame para graficar (df_plot) de acuerdo a las columnas seleccionadas
# Verifico resolucion temporal
N_all_slots <- length(all_slots)
if(N_all_slots > 1){
unidades <- rep(NA_character_, N_all_slots) # que las unidades temporales sean las mismas
paso_tpo <- rep(NA_character_, N_all_slots) # que el paso de tiempo sea el mismo
for(i in 1:N_all_slots){
unidades[i] <- units( diff.Date( all_slots[[i]][ , 1] ) )
paso_tpo[i] <- length(unique( diff.Date( all_slots[[i]][ , 1] ) ) )
}# fin for
if( length( unique(unidades)) != 1 ){
return('the variables must have the same temporal resolution')
}
if( unique(paso_tpo) != 1 ){
return('the variables must have the same temporal resolution')
}
} # fin if
# Extraigo las variables de interes de cada slot
if(N_all_slots > 1){
# en este caso col_number es una lista
df_plot <- all_slots[[1]][ , c(1, col_number[[1]] )]
for(i in 2:N_all_slots){
df_aux <- all_slots[[i]][ , c(1, col_number[[i]] )]
df_plot <- merge(df_plot, df_aux, all = TRUE)
}
} else {
# solo un slot
df_plot <- all_slots[[1]][ , c(1, col_number)]
}
# En caso de ser necesario aplico subset al data frame
if( is.null(from) == FALSE & is.null(to) == FALSE){
df_plot <- subset(df_plot, subset = Date >= from & Date <= to)
} else if( is.null(from) == FALSE ) {
df_plot <- subset(df_plot, subset = Date >= from)
} else if( is.null(to) == FALSE) {
df_plot <- subset(df_plot, subset = Date <= to)
}
###
## ggplot2 o plotly? => esto define la sintaxis a usar
if( interactive == FALSE ){
## ggplot2
# Doble eje y?
if( is.null(double_yaxis) == TRUE){
# un solo eje y
# Armo df_plot2 con las columnas
N_plot <- nrow(df_plot)
N_var <- ncol(df_plot) - 1
if( is.null(legend_lab) == FALSE ){
tipo_linea <- list()
color_linea <- list()
leyen_linea <- list()
for(i in 1:N_var){
tipo_linea[[i]] <- rep(line_type[i], N_plot)
color_linea[[i]] <- rep(line_color[i], N_plot)
leyen_linea[[i]] <- rep(legend_lab[i], N_plot)
}
linea <- c(sapply(X = tipo_linea, '['))
color <- c(sapply(X = color_linea, '['))
leyen <- c(sapply(X = leyen_linea, '['))
df_plot2 <- melt(data = df_plot, id.vars = 'Date')
df_plot2 <- cbind(df_plot2, linea, color, leyen)
} else {
tipo_linea <- list()
color_linea <- list()
for(i in 1:N_var){
tipo_linea[[i]] <- rep(line_type[i], N_plot)
color_linea[[i]] <- rep(line_color[i], N_plot)
}
linea <- c(sapply(X = tipo_linea, '['))
color <- c(sapply(X = color_linea, '['))
df_plot2 <- melt(data = df_plot, id.vars = 'Date')
leyen <- df_plot2$variable
df_plot2 <- cbind(df_plot2, linea, color, leyen)
}
# Grafico
ggout <-
ggplot(data = df_plot2, aes(x = Date, y = value, color = leyen) ) +
geom_line(aes(linetype = leyen) ) +
scale_color_manual(values = line_color) +
scale_linetype_manual(values = line_type) +
theme(legend.title = element_blank()) +
ggtitle(label = title_lab) +
xlab(x_lab) + ylab(y_lab)
# agrego list_extra en caso de ser necesario
if( is.null(list_extra) == FALSE){
ggout <- ggout +
Reduce(f = c, x = list_extra)
}
# Salida
return(ggout)
} else {
# doble eje
# Fuente: http://rstudio-pubs-static.s3.amazonaws.com/329613_f53e84d1a18840d5a1df55efb90739d9.html
# obtengo posicion de las series en eje principal
main_pos <- which(double_yaxis == 1) + 1 # vector con numero de columna
seco_pos <- which(double_yaxis == 2) + 1 # idem
# especifico nombre de las variables para escalar
y1 <- colnames(df_plot)[main_pos[1]] # principal
y2 <- colnames(df_plot)[seco_pos[1]] # secundario
# extraigo nombres para plotear
y1_plot <- colnames(df_plot)[main_pos]
y2_plot <- colnames(df_plot)[seco_pos]
# genero matriz
m_plot <- as.matrix(x = df_plot[ , -1])
# reescalo el eje y secundario:
# - substraigo su valor minimo (para que empiece en cero)
# - escalo para que tenga el mismo rango del eje y principal
# - sumo el minimo valor de y1
a <- range(df_plot[[y1]], na.rm = TRUE)
b <- range(df_plot[[y2]], na.rm = TRUE)
scale_factor <- diff(a)/diff(b)
m_plot[ , (seco_pos - 1)] <- ( (m_plot[ , (seco_pos - 1)] - b[1]) * scale_factor) + a[1]
# formula para transformar el eje y secundario
trans <- ~ ((. - a[1]) / scale_factor) + b[1]
# genero un df_plot2 con los valores reescalados
df_plot2 <- data.frame(df_plot[ , 1], m_plot)
colnames(df_plot2) <- colnames(df_plot)
# grafico
ggout <-
ggplot(df_plot2) +
geom_line(aes_string('Date', y1_plot), col = line_color[ (main_pos - 1) ], lty = line_type[ (main_pos - 1) ] ) +
geom_line(aes_string('Date', y2_plot), col = line_color[ (seco_pos - 1) ], lty = line_type[ (seco_pos - 1) ] ) +
ggtitle(label = title_lab) +
xlab(x_lab) + ylab(y_lab[1]) +
scale_y_continuous(sec.axis = sec_axis(trans = trans, name = y_lab[2]))
# agrego list_extra en caso de ser necesario
if( is.null(list_extra) == FALSE){
ggout <- ggout +
Reduce(f = c, x = list_extra)
}
# Salida
return(ggout)
}
} else {
## plotly
# Doble eje y?
if( is.null(double_yaxis) == TRUE ){
# y simple
# Armo sentencia basica
ppout <- plot_ly(df_plot, x = ~Date)
N_plots <- ncol(df_plot) - 1
# genero graficos sin etiquetas en los ejes
for(i in 1:N_plots){
ppout <- ppout %>%
add_trace(y = df_plot[ , (i + 1)], name = legend_lab[i], type = 'scatter', mode = line_type[i], color = I(line_color[i]) )
}# fin for
# agrego etiquetas
ppout <-
ppout %>%
layout(title = title_lab,
xaxis = list(title = x_lab),
yaxis = list(title = y_lab) )
# Salida
return(ppout)
} else {
# y doble
# Armo sentencia basica
ppout <- plot_ly(df_plot, x = ~Date)
N_plots <- ncol(df_plot) - 1
# genero graficos sin etiquetas en los ejes
for(i in 1:N_plots){
if(double_yaxis[i] == 1){
# a eje principal
ppout <- ppout %>%
add_trace(y = df_plot[ , (i + 1)], name = legend_lab[i], type = 'scatter', mode = line_type[i], color = I(line_color[i]) )
} else if (double_yaxis[i] == 2){
# a eje secundario
ppout <- ppout %>%
add_trace(y = df_plot[ , (i + 1)], name = legend_lab[i], type = 'scatter', mode = line_type[i], color = I(line_color[i]), yaxis = 'y2')
}
}# fin for
# agrego etiquetas
ppout <-
ppout %>%
layout(title = title_lab,
xaxis = list(title = x_lab),
yaxis = list(title = y_lab[1]),
yaxis2 = list(title = y_lab[2],
overlaying = 'y',
side = 'right') )
# Salida
return(ppout)
}
} # fin plotly
} # fin funcion
)
#' @describeIn plot_hydroMet plot method for DGI class
## DGI
setMethod(f = 'plot_hydroMet',
signature = 'hydroMet_DGI',
definition = function(obj, slot_name, col_number, interactive = FALSE,
line_type = NULL, line_color = 'dodgerblue',
x_lab = 'Date', y_lab = 'y', title_lab = NULL,
legend_lab = NULL, double_yaxis = NULL,
list_extra = NULL, from = NULL, to = NULL)
{
# Condicionales
## slot_name
n_slot_name <- length(slot_name) # lo genero para comparar con long de otros argumentos
# slot_name: caracter
if( is.character(slot_name) == FALSE ){
return('slot_name argument must be of class character')
}
# slot_name: se corresponden con los slots
aux <- match(x = slot_name, table = slotNames('hydroMet_DGI')[1:7])
if( is.na( sum(aux) ) == TRUE ){
return('Unless one of the slot_name arguments is incorrect')
}
rm(aux)
## col_number
col_position <- Reduce(f = c, x = col_number) # posicion de las columnas a plotear
n_col_number <- length( col_position ) # cantidad
# col_number: numerico o lista
if(n_slot_name == 1){
# col_number como vector numerico
if( is.numeric(col_number) == FALSE ){
return('col_number argument must be of class numeric')
}
} else {
# col_number como lista que contiene numeros
if( is.list(col_number) == FALSE ){
return('col_number must be of list class')
}
# contiene numeros?
if( is.numeric(Reduce(f = c, x = col_number) ) == FALSE ){
return('Each list element should contain numeric vectors')
}
}
# col_number: mayor o igual a uno
col_position <- as.integer(col_position) # coerciono a entero para evitar columnas decimales
if(length( which(col_position <= 1) ) >= 1){
return('col_number arguments to plot must be >= 1')
}
## interactive
# interactive: logico
if( is.logical(interactive) == FALSE){
return('interactive must be either TRUE or FALSE')
}
# interactive: uno solo
if( length(interactive) > 1 ){
return('interactive accepts a single value')
}
## line_type
n_line_type <- length(line_type)
# line_type: asigno valores / compruebo sus valores
if(n_line_type == 0) {
# asigno valores por defecto
if(interactive == FALSE){
# ggplot2
line_type <- rep('solid', n_col_number)
} else{
# plotly
line_type <- rep('lines', n_col_number)
}
} else {
# misma longitud que col_number
if( n_line_type != n_col_number ){
return('line_type must have the same length as col_number')
}
# long_type valido
if(interactive == FALSE){
# ggplot2
valid_line_type <- c('solid', 'twodash', 'longdash', 'dotted', 'dotdash', 'dashed', 'blank')
correspondencia <- match(x = line_type, table = valid_line_type)
if( is.na( sum(correspondencia) ) == TRUE ){
aux_var <- line_type[ which(is.na(correspondencia) ) ]
return( paste0(aux_var, ' ', 'is not a valid line_type for ggplot2 graph') )
}
} else {
# plotly
valid_line_type <- c('lines', 'lines+markers', 'markers')
correspondencia <- match(x = line_type, table = valid_line_type)
if( is.na( sum(correspondencia) ) == TRUE ){
aux_var <- line_type[ which(is.na(correspondencia) ) ]
return( paste0(aux_var, ' ', 'is not a valid line_type for plotly graph') )
}
}
}
## line_color
n_line_color <- length(line_color)
# line_color: misma longitud que col_number
if( n_line_color != n_col_number ){
return('line_color must be of the same length as col_number')
}
# line_color: caracter
if( is.character(line_color) == FALSE ){
return('line_color must be of character class')
}
# line_color: color valido
# paleta_match <- match(x = line_color, table = colors())
#
# if( is.na(sum(paleta_match) ) == T ) {
# aux_var <- line_color[ which(is.na(paleta_match) ) ]
# return( paste0(aux_var, ' ', 'is not a valid line_color. Find valid color names with colors()') )
#
# }
## x_lab
# x_lab: caracter
if( is.character(x_lab) == FALSE ){
return('x_lab must be of class character')
}
# x_lab: uno solo
if( length(x_lab) != 1){
return('x_lab must be of length one')
}
## y_lab
# y_lab: caracter
if( is.character(y_lab) == FALSE ){
return('y_lab must be of class character')
}
# y_lab: uno o dos
if( is.null(double_yaxis) == TRUE){
# eje simple
if( length(y_lab) != 1){
return('y_lab must be of length one')
}
} else {
# eje doble
if( length(y_lab) != 2){
return('y_lab must be of length two')
}
}
## title_lab
# title_lab: caracter unico
if( is.null(title_lab) == FALSE){
# caracter
if( is.character(title_lab) == FALSE ){
return('title_lab argument must be of character class')
}
# unico
if( length(title_lab) != 1 ){
return('title_lab length must be one')
}
}
## legend_lab
if( is.null(legend_lab) == FALSE ){
n_legend_lab <- length(legend_lab)
# legend_lab: caracter
if( is.character(legend_lab) == FALSE ){
return('legend_lab must be of class character')
}
# legend_lab: cantidad
if( n_col_number != n_legend_lab){
return('You must provide as many legend_lab strings as line plots')
}
}
## double_yaxis
if( is.null(double_yaxis) == FALSE){
n_double_yaxis <- length(double_yaxis)
# double_yaxis: numerico
if( is.numeric(double_yaxis) == FALSE){
return('double_axis argument must be of numeric class')
}
# double_yaxis: cantidad
if( interactive == FALSE){
# ggplot2
if( n_double_yaxis != 2 ){
return('In interactive = FALSE double_yaxis arguments only allows a numeric vector of length two')
}
} else {
# plotly
if(n_double_yaxis != n_col_number){
return('double_yaxis numeric vector argument must be of the same length as col_number')
}
}
# double_yaxis: 1 y 2
target_nums <- c(1, 2)
match_nums <- match(x = double_yaxis, table = target_nums)
if( is.na( sum(match_nums) ) == TRUE ){
return('Only 1 and 2 are allow as arguments in double_yaxis')
}
}
## list_extra
if( is.null(list_extra) == FALSE ){
if( interactive == FALSE){
# ggplot2
# list_extra: lista
if( is.list(list_extra) == FALSE){
return('list_extra argument must be of list class')
}
# list_extra: longitud
# if(length(list_extra) != 1){
# return('list_extra should contain a single element list')
# }
} else {
# plotly
print('list_extra argument does not make sense if interactive = TRUE')
}
}# fin list_extra
## from
if( is.null(from) == FALSE){
# from: caracter
if( is.character(from) == FALSE ){
return('from must be of class character')
}
# from: uno solo
if( length(from) != 1){
return('from must be of length one')
}
}
## to
if( is.null(to) == FALSE){
# to: caracter
if( is.character(to) == FALSE ){
return('to must be of class character')
}
# to: uno solo
if( length(to) != 1){
return('to must be of length one')
}
}
# fin condicionales
###
#********************
# Binding variables
#********************
Date <- value <- NULL
#********************
## Obtener los slots de interes
all_slots <- get_hydroMet(obj = obj, name = slot_name)
# condicionar que el numero(s) de columna exista dentro de cada slot
target_max_col <- sapply(X = all_slots, FUN = ncol)
if(n_slot_name == 1){
# un slot
if(max(col_number) > target_max_col){
return('Unless one of the col_number does not exist in the slot')
}
} else {
# varios slots
for(i in 1:n_slot_name){
aux_col_num <- col_number[[i]]
if(max(aux_col_num) > target_max_col[i]){
return( paste0('Unless one of the col_number (', slot_name[i], ') does not exist in the slot') )
}
}# fin for
}
## Armar el un data frame para graficar (df_plot) de acuerdo a las columnas seleccionadas
# Verifico resolucion temporal
N_all_slots <- length(all_slots)
if(N_all_slots > 1){
unidades <- rep(NA_character_, N_all_slots) # que las unidades temporales sean las mismas
paso_tpo <- rep(NA_character_, N_all_slots) # que el paso de tiempo sea el mismo
for(i in 1:N_all_slots){
unidades[i] <- units( diff.Date( all_slots[[i]][ , 1] ) )
paso_tpo[i] <- length(unique( diff.Date( all_slots[[i]][ , 1] ) ) )
}# fin for
if( length( unique(unidades)) != 1 ){
return('the variables must have the same temporal resolution')
}
if( unique(paso_tpo) != 1 ){
return('the variables must have the same temporal resolution')
}
} # fin if
# Extraigo las variables de interes de cada slot
if(N_all_slots > 1){
# en este caso col_number es una lista
df_plot <- all_slots[[1]][ , c(1, col_number[[1]] )]
for(i in 2:N_all_slots){
df_aux <- all_slots[[i]][ , c(1, col_number[[i]] )]
df_plot <- merge(df_plot, df_aux, all = TRUE)
}
} else {
# solo un slot
df_plot <- all_slots[[1]][ , c(1, col_number)]
}
# En caso de ser necesario aplico subset al data frame
if( is.null(from) == FALSE & is.null(to) == FALSE){
df_plot <- subset(df_plot, subset = Date >= from & Date <= to)
} else if( is.null(from) == FALSE ) {
df_plot <- subset(df_plot, subset = Date >= from)
} else if( is.null(to) == FALSE) {
df_plot <- subset(df_plot, subset = Date <= to)
}
###
## ggplot2 o plotly? => esto define la sintaxis a usar
if( interactive == FALSE ){
## ggplot2
# Doble eje y?
if( is.null(double_yaxis) == TRUE){
# un solo eje y
# Armo df_plot2 con las columnas
N_plot <- nrow(df_plot)
N_var <- ncol(df_plot) - 1
if( is.null(legend_lab) == FALSE ){
tipo_linea <- list()
color_linea <- list()
leyen_linea <- list()
for(i in 1:N_var){
tipo_linea[[i]] <- rep(line_type[i], N_plot)
color_linea[[i]] <- rep(line_color[i], N_plot)
leyen_linea[[i]] <- rep(legend_lab[i], N_plot)
}
linea <- c(sapply(X = tipo_linea, '['))
color <- c(sapply(X = color_linea, '['))
leyen <- c(sapply(X = leyen_linea, '['))
df_plot2 <- melt(data = df_plot, id.vars = 'Date')
df_plot2 <- cbind(df_plot2, linea, color, leyen)
} else {
tipo_linea <- list()
color_linea <- list()
for(i in 1:N_var){
tipo_linea[[i]] <- rep(line_type[i], N_plot)
color_linea[[i]] <- rep(line_color[i], N_plot)
}
linea <- c(sapply(X = tipo_linea, '['))
color <- c(sapply(X = color_linea, '['))
df_plot2 <- melt(data = df_plot, id.vars = 'Date')
leyen <- df_plot2$variable
df_plot2 <- cbind(df_plot2, linea, color, leyen)
}
# Grafico
ggout <-
ggplot(data = df_plot2, aes(x = Date, y = value, color = leyen) ) +
geom_line(aes(linetype = leyen) ) +
scale_color_manual(values = line_color) +
scale_linetype_manual(values = line_type) +
theme(legend.title = element_blank()) +
ggtitle(label = title_lab) +
xlab(x_lab) + ylab(y_lab)
# agrego list_extra en caso de ser necesario
if( is.null(list_extra) == FALSE){
ggout <- ggout +
Reduce(f = c, x = list_extra)
}
# Salida
return(ggout)
} else {
# doble eje
# Fuente: http://rstudio-pubs-static.s3.amazonaws.com/329613_f53e84d1a18840d5a1df55efb90739d9.html
# obtengo posicion de las series en eje principal
main_pos <- which(double_yaxis == 1) + 1 # vector con numero de columna
seco_pos <- which(double_yaxis == 2) + 1 # idem
# especifico nombre de las variables para escalar
y1 <- colnames(df_plot)[main_pos[1]] # principal
y2 <- colnames(df_plot)[seco_pos[1]] # secundario
# extraigo nombres para plotear
y1_plot <- colnames(df_plot)[main_pos]
y2_plot <- colnames(df_plot)[seco_pos]
# genero matriz
m_plot <- as.matrix(x = df_plot[ , -1])
# reescalo el eje y secundario:
# - substraigo su valor minimo (para que empiece en cero)
# - escalo para que tenga el mismo rango del eje y principal
# - sumo el minimo valor de y1
a <- range(df_plot[[y1]], na.rm = TRUE)
b <- range(df_plot[[y2]], na.rm = TRUE)
scale_factor <- diff(a)/diff(b)
m_plot[ , (seco_pos - 1)] <- ( (m_plot[ , (seco_pos - 1)] - b[1]) * scale_factor) + a[1]
# formula para transformar el eje y secundario
trans <- ~ ((. - a[1]) / scale_factor) + b[1]
# genero un df_plot2 con los valores reescalados
df_plot2 <- data.frame(df_plot[ , 1], m_plot)
colnames(df_plot2) <- colnames(df_plot)
# grafico
ggout <-
ggplot(df_plot2) +
geom_line(aes_string('Date', y1_plot), col = line_color[ (main_pos - 1) ], lty = line_type[ (main_pos - 1) ] ) +
geom_line(aes_string('Date', y2_plot), col = line_color[ (seco_pos - 1) ], lty = line_type[ (seco_pos - 1) ] ) +
ggtitle(label = title_lab) +
xlab(x_lab) + ylab(y_lab[1]) +
scale_y_continuous(sec.axis = sec_axis(trans = trans, name = y_lab[2]))
# agrego list_extra en caso de ser necesario
if( is.null(list_extra) == FALSE){
ggout <- ggout +
Reduce(f = c, x = list_extra)
}
# Salida
return(ggout)
}
} else {
## plotly
# Doble eje y?
if( is.null(double_yaxis) == TRUE ){
# y simple
# Armo sentencia basica
ppout <- plot_ly(df_plot, x = ~Date)
N_plots <- ncol(df_plot) - 1
# genero graficos sin etiquetas en los ejes
for(i in 1:N_plots){
ppout <- ppout %>%
add_trace(y = df_plot[ , (i + 1)], name = legend_lab[i], type = 'scatter', mode = line_type[i], color = I(line_color[i]) )
}# fin for
# agrego etiquetas
ppout <-
ppout %>%
layout(title = title_lab,
xaxis = list(title = x_lab),
yaxis = list(title = y_lab) )
# Salida
return(ppout)
} else {
# y doble
# Armo sentencia basica
ppout <- plot_ly(df_plot, x = ~Date)
N_plots <- ncol(df_plot) - 1
# genero graficos sin etiquetas en los ejes
for(i in 1:N_plots){
if(double_yaxis[i] == 1){
# a eje principal
ppout <- ppout %>%
add_trace(y = df_plot[ , (i + 1)], name = legend_lab[i], type = 'scatter', mode = line_type[i], color = I(line_color[i]) )
} else if (double_yaxis[i] == 2){
# a eje secundario
ppout <- ppout %>%
add_trace(y = df_plot[ , (i + 1)], name = legend_lab[i], type = 'scatter', mode = line_type[i], color = I(line_color[i]), yaxis = 'y2')
}
}# fin for
# agrego etiquetas
ppout <-
ppout %>%
layout(title = title_lab,
xaxis = list(title = x_lab),
yaxis = list(title = y_lab[1]),
yaxis2 = list(title = y_lab[2],
overlaying = 'y',
side = 'right') )
# Salida
return(ppout)
}
} # fin plotly
} # fin funcion
)
#' @describeIn plot_hydroMet plot method for IANIGLA class
## IANIGLA
setMethod(f = 'plot_hydroMet',
signature = 'hydroMet_IANIGLA',
definition = function(obj, slot_name, col_number, interactive = FALSE,
line_type = NULL, line_color = 'dodgerblue',
x_lab = 'Date', y_lab = 'y', title_lab = NULL,
legend_lab = NULL, double_yaxis = NULL,
list_extra = NULL, from = NULL, to = NULL)
{
# Condicionales
## slot_name
n_slot_name <- length(slot_name) # lo genero para comparar con long de otros argumentos
# slot_name: caracter
if( is.character(slot_name) == FALSE ){
return('slot_name argument must be of class character')
}
# slot_name: se corresponden con los slots
aux <- match(x = slot_name, table = slotNames('hydroMet_IANIGLA')[2:11])
if( is.na( sum(aux) ) == TRUE ){
return('Unless one of the slot_name arguments is incorrect')
}
rm(aux)
## col_number
col_position <- Reduce(f = c, x = col_number) # posicion de las columnas a plotear
n_col_number <- length( col_position ) # cantidad
# col_number: numerico o lista
if(n_slot_name == 1){
# col_number como vector numerico
if( is.numeric(col_number) == FALSE ){
return('col_number argument must be of class numeric')
}
} else {
# col_number como lista que contiene numeros
if( is.list(col_number) == FALSE ){
return('col_number must be of list class')
}
# contiene numeros?
if( is.numeric(Reduce(f = c, x = col_number) ) == FALSE ){
return('Each list element should contain numeric vectors')
}
}
# col_number: mayor o igual a uno
col_position <- as.integer(col_position) # coerciono a entero para evitar columnas decimales
if(length( which(col_position < 1) ) >= 1){
return('col_number arguments to plot must be > 1')
}
## interactive
# interactive: logico
if( is.logical(interactive) == FALSE){
return('interactive must be either TRUE or FALSE')
}
# interactive: uno solo
if( length(interactive) > 1 ){
return('interactive accepts a single value')
}
## line_type
n_line_type <- length(line_type)
# line_type: asigno valores / compruebo sus valores
if(n_line_type == 0) {
# asigno valores por defecto
if(interactive == FALSE){
# ggplot2
line_type <- rep('solid', n_col_number)
} else{
# plotly
line_type <- rep('lines', n_col_number)
}
} else {
# misma longitud que col_number
if( n_line_type != n_col_number ){
return('line_type must have the same length as col_number')
}
# long_type valido
if(interactive == FALSE){
# ggplot2
valid_line_type <- c('solid', 'twodash', 'longdash', 'dotted', 'dotdash', 'dashed', 'blank')
correspondencia <- match(x = line_type, table = valid_line_type)
if( is.na( sum(correspondencia) ) == TRUE ){
aux_var <- line_type[ which(is.na(correspondencia) ) ]
return( paste0(aux_var, ' ', 'is not a valid line_type for ggplot2 graph') )
}
} else {
# plotly
valid_line_type <- c('lines', 'lines+markers', 'markers')
correspondencia <- match(x = line_type, table = valid_line_type)
if( is.na( sum(correspondencia) ) == TRUE ){
aux_var <- line_type[ which(is.na(correspondencia) ) ]
return( paste0(aux_var, ' ', 'is not a valid line_type for plotly graph') )
}
}
}
## line_color
n_line_color <- length(line_color)
# line_color: misma longitud que col_number
if( n_line_color != n_col_number ){
return('line_color must be of the same length as col_number')
}
# line_color: caracter
if( is.character(line_color) == FALSE ){
return('line_color must be of character class')
}
# line_color: color valido
# paleta_match <- match(x = line_color, table = colors())
#
# if( is.na(sum(paleta_match) ) == T ) {
# aux_var <- line_color[ which(is.na(paleta_match) ) ]
# return( paste0(aux_var, ' ', 'is not a valid line_color. Find valid color names with colors()') )
#
# }
## x_lab
# x_lab: caracter
if( is.character(x_lab) == FALSE ){
return('x_lab must be of class character')
}
# x_lab: uno solo
if( length(x_lab) != 1){
return('x_lab must be of length one')
}
## y_lab
# y_lab: caracter
if( is.character(y_lab) == FALSE ){
return('y_lab must be of class character')
}
# y_lab: uno o dos
if( is.null(double_yaxis) == TRUE){
# eje simple
if( length(y_lab) != 1){
return('y_lab must be of length one')
}
} else {
# eje doble
if( length(y_lab) != 2){
return('y_lab must be of length two')
}
}
## title_lab
# title_lab: caracter unico
if( is.null(title_lab) == FALSE){
# caracter
if( is.character(title_lab) == FALSE ){
return('title_lab argument must be of character class')
}
# unico
if( length(title_lab) != 1 ){
return('title_lab length must be one')
}
}
## legend_lab
if( is.null(legend_lab) == FALSE ){
n_legend_lab <- length(legend_lab)
# legend_lab: caracter
if( is.character(legend_lab) == FALSE ){
return('legend_lab must be of class character')
}
# legend_lab: cantidad
if( n_col_number != n_legend_lab){
return('You must provide as many legend_lab strings as line plots')
}
}
## double_yaxis
if( is.null(double_yaxis) == FALSE){
n_double_yaxis <- length(double_yaxis)
# double_yaxis: numerico
if( is.numeric(double_yaxis) == FALSE){
return('double_axis argument must be of numeric class')
}
# double_yaxis: cantidad
if( interactive == FALSE){
# ggplot2
if( n_double_yaxis != 2 ){
return('In interactive = FALSE double_yaxis arguments only allows a numeric vector of length two')
}
} else {
# plotly
if(n_double_yaxis != n_col_number){
return('double_yaxis numeric vector argument must be of the same length as col_number')
}
}
# double_yaxis: 1 y 2
target_nums <- c(1, 2)
match_nums <- match(x = double_yaxis, table = target_nums)
if( is.na( sum(match_nums) ) == TRUE ){
return('Only 1 and 2 are allow as arguments in double_yaxis')
}
}
## list_extra
if( is.null(list_extra) == FALSE ){
if( interactive == FALSE){
# ggplot2
# list_extra: lista
if( is.list(list_extra) == FALSE){
return('list_extra argument must be of list class')
}
# list_extra: longitud
# if(length(list_extra) != 1){
# return('list_extra should contain a single element list')
# }
} else {
# plotly
print('list_extra argument does not make sense if interactive = TRUE')
}
}# fin list_extra
## from
if( is.null(from) == FALSE){
# from: caracter
if( is.character(from) == FALSE & is.POSIXct(from) == FALSE){
return('from must be of class character or POSIXct')
}
# from: uno solo
if( length(from) != 1){
return('from must be of length one')
}
}
## to
if( is.null(to) == FALSE){
# to: caracter
if( is.character(to) == FALSE & is.POSIXct(from) == FALSE){
return('to must be of class character or POSIXct')
}
# to: uno solo
if( length(to) != 1){
return('to must be of length one')
}
}
# fin condicionales
###
#********************
# Binding variables
#********************
Date <- value <- NULL
#********************
## Obtener los slots de interes
all_slots <- get_hydroMet(obj = obj, name = slot_name)
# condicionar que el numero(s) de columna exista dentro de cada slot
target_max_col <- sapply(X = all_slots, FUN = ncol)
if(n_slot_name == 1){
# un slot
if(max(col_number) > target_max_col){
return('Unless one of the col_number does not exist in the slot')
}
} else {
# varios slots
for(i in 1:n_slot_name){
aux_col_num <- col_number[[i]]
if(max(aux_col_num) > target_max_col[i]){
return( paste0('Unless one of the col_number (', slot_name[i], ') does not exist in the slot') )
}
}# fin for
}
## Armar el un data frame para graficar (df_plot) de acuerdo a las columnas seleccionadas
# Verifico resolucion temporal (no hace falta porque todos estan en la misma resolucion)
N_all_slots <- length(all_slots)
# if(N_all_slots > 1){
#
# unidades <- rep(NA_character_, N_all_slots) # que las unidades temporales sean las mismas
# paso_tpo <- rep(NA_character_, N_all_slots) # que el paso de tiempo sea el mismo
# for(i in 1:N_all_slots){
#
# unidades[i] <- units( diff.Date( all_slots[[i]][ , 1] ) )
# paso_tpo[i] <- length(unique( diff.Date( all_slots[[i]][ , 1] ) ) )
#
# }# fin for
#
# if( length( unique(unidades)) != 1 ){
# return('the variables must have the same temporal resolution')
# }
#
# if( unique(paso_tpo) != 1 ){
# return('the variables must have the same temporal resolution')
# }
#
#
# } # fin if
# Extraigo las variables de interes de cada slot
date_serie <- get_hydroMet(obj = obj, name = 'date')[[1]] # fechas
if(N_all_slots > 1){
# en este caso col_number es una lista
aux_nom <- colnames(all_slots[[1]])[ c( col_number[[1]] ) ]
df_plot <- data.frame(date_serie, all_slots[[1]][ , c(col_number[[1]] )] )
colnames(df_plot) <- c('Date', aux_nom)
for(i in 2:N_all_slots){
aux_nom <- c('Date', aux_nom, colnames(all_slots[[i]])[ c( col_number[[i]] ) ] )
df_aux <- data.frame(Date = date_serie, all_slots[[i]][ , c(col_number[[i]] )] )
df_plot <- merge(df_plot, df_aux, all = TRUE)
colnames(df_plot) <- aux_nom
}
} else {
# solo un slot
aux_nom <- colnames(all_slots[[1]])[ c(col_number) ]
df_plot <- data.frame(date_serie, all_slots[[1]][ , c(col_number)] )
colnames(df_plot) <- c('Date', aux_nom)
}
# En caso de ser necesario aplico subset al data frame
if( is.null(from) == FALSE & is.null(to) == FALSE){
df_plot <- subset(df_plot, subset = Date >= from & Date <= to)
} else if( is.null(from) == FALSE ) {
df_plot <- subset(df_plot, subset = Date >= from)
} else if( is.null(to) == FALSE) {
df_plot <- subset(df_plot, subset = Date <= to)
}
###
## ggplot2 o plotly? => esto define la sintaxis a usar
if( interactive == FALSE ){
## ggplot2
# Doble eje y?
if( is.null(double_yaxis) == TRUE){
# un solo eje y
# Armo df_plot2 con las columnas
N_plot <- nrow(df_plot)
N_var <- ncol(df_plot) - 1
if( is.null(legend_lab) == FALSE ){
tipo_linea <- list()
color_linea <- list()
leyen_linea <- list()
for(i in 1:N_var){
tipo_linea[[i]] <- rep(line_type[i], N_plot)
color_linea[[i]] <- rep(line_color[i], N_plot)
leyen_linea[[i]] <- rep(legend_lab[i], N_plot)
}
linea <- c(sapply(X = tipo_linea, '['))
color <- c(sapply(X = color_linea, '['))
leyen <- c(sapply(X = leyen_linea, '['))
df_plot2 <- melt(data = df_plot, id.vars = 'Date')
df_plot2 <- cbind(df_plot2, linea, color, leyen)
} else {
tipo_linea <- list()
color_linea <- list()
for(i in 1:N_var){
tipo_linea[[i]] <- rep(line_type[i], N_plot)
color_linea[[i]] <- rep(line_color[i], N_plot)
}
linea <- c(sapply(X = tipo_linea, '['))
color <- c(sapply(X = color_linea, '['))
df_plot2 <- melt(data = df_plot, id.vars = 'Date')
leyen <- df_plot2$variable
df_plot2 <- cbind(df_plot2, linea, color, leyen)
}
# Grafico
ggout <-
ggplot(data = df_plot2, aes(x = Date, y = value, color = leyen) ) +
geom_line(aes(linetype = leyen) ) +
scale_color_manual(values = line_color) +
scale_linetype_manual(values = line_type) +
theme(legend.title = element_blank()) +
ggtitle(label = title_lab) +
xlab(x_lab) + ylab(y_lab)
# agrego list_extra en caso de ser necesario
if( is.null(list_extra) == FALSE){
ggout <- ggout +
Reduce(f = c, x = list_extra)
}
# Salida
return(ggout)
} else {
# doble eje
# Fuente: http://rstudio-pubs-static.s3.amazonaws.com/329613_f53e84d1a18840d5a1df55efb90739d9.html
# obtengo posicion de las series en eje principal
main_pos <- which(double_yaxis == 1) + 1 # vector con numero de columna
seco_pos <- which(double_yaxis == 2) + 1 # idem
# especifico nombre de las variables para escalar
y1 <- colnames(df_plot)[main_pos[1]] # principal
y2 <- colnames(df_plot)[seco_pos[1]] # secundario
# extraigo nombres para plotear
y1_plot <- colnames(df_plot)[main_pos]
y2_plot <- colnames(df_plot)[seco_pos]
# genero matriz
m_plot <- as.matrix(x = df_plot[ , -1])
# reescalo el eje y secundario:
# - substraigo su valor minimo (para que empiece en cero)
# - escalo para que tenga el mismo rango del eje y principal
# - sumo el minimo valor de y1
a <- range(df_plot[[y1]], na.rm = TRUE)
b <- range(df_plot[[y2]], na.rm = TRUE)
scale_factor <- diff(a)/diff(b)
m_plot[ , (seco_pos - 1)] <- ( (m_plot[ , (seco_pos - 1)] - b[1]) * scale_factor) + a[1]
# formula para transformar el eje y secundario
trans <- ~ ((. - a[1]) / scale_factor) + b[1]
# genero un df_plot2 con los valores reescalados
df_plot2 <- data.frame(df_plot[ , 1], m_plot)
colnames(df_plot2) <- colnames(df_plot)
# grafico
ggout <-
ggplot(df_plot2) +
geom_line(aes_string('Date', y1_plot), col = line_color[ (main_pos - 1) ], lty = line_type[ (main_pos - 1) ] ) +
geom_line(aes_string('Date', y2_plot), col = line_color[ (seco_pos - 1) ], lty = line_type[ (seco_pos - 1) ] ) +
ggtitle(label = title_lab) +
xlab(x_lab) + ylab(y_lab[1]) +
scale_y_continuous(sec.axis = sec_axis(trans = trans, name = y_lab[2]))
# agrego list_extra en caso de ser necesario
if( is.null(list_extra) == FALSE){
ggout <- ggout +
Reduce(f = c, x = list_extra)
}
# Salida
return(ggout)
}
} else {
## plotly
# Doble eje y?
if( is.null(double_yaxis) == TRUE ){
# y simple
# Armo sentencia basica
ppout <- plot_ly(df_plot, x = ~Date)
N_plots <- ncol(df_plot) - 1
# genero graficos sin etiquetas en los ejes
for(i in 1:N_plots){
ppout <- ppout %>%
add_trace(y = df_plot[ , (i + 1)], name = legend_lab[i], type = 'scatter', mode = line_type[i], color = I(line_color[i]) )
}# fin for
# agrego etiquetas
ppout <-
ppout %>%
layout(title = title_lab,
xaxis = list(title = x_lab),
yaxis = list(title = y_lab) )
# Salida
return(ppout)
} else {
# y doble
# Armo sentencia basica
ppout <- plot_ly(df_plot, x = ~Date)
N_plots <- ncol(df_plot) - 1
# genero graficos sin etiquetas en los ejes
for(i in 1:N_plots){
if(double_yaxis[i] == 1){
# a eje principal
ppout <- ppout %>%
add_trace(y = df_plot[ , (i + 1)], name = legend_lab[i], type = 'scatter', mode = line_type[i], color = I(line_color[i]) )
} else if (double_yaxis[i] == 2){
# a eje secundario
ppout <- ppout %>%
add_trace(y = df_plot[ , (i + 1)], name = legend_lab[i], type = 'scatter', mode = line_type[i], color = I(line_color[i]), yaxis = 'y2')
}
}# fin for
# agrego etiquetas
ppout <-
ppout %>%
layout(title = title_lab,
xaxis = list(title = x_lab),
yaxis = list(title = y_lab[1]),
yaxis2 = list(title = y_lab[2],
overlaying = 'y',
side = 'right') )
# Salida
return(ppout)
}
} # fin plotly
} # fin funcion
)
#' @describeIn plot_hydroMet plot method for \code{compact} class
## compact
setMethod(f = 'plot_hydroMet',
signature = 'hydroMet_compact',
definition = function(obj, slot_name, col_number, interactive = FALSE,
line_type = NULL, line_color = 'dodgerblue',
x_lab = 'x', y_lab = 'y', title_lab = NULL,
legend_lab = NULL, double_yaxis = NULL,
list_extra = NULL, from = NULL, to = NULL, scatter = NULL)
{
#**********************************************************************
# Condicionales
#**********************************************************************
## slot_name
n_slot_name <- length(slot_name) # lo genero para comparar con long de otros argumentos
# slot_name: caracter
if( is.character(slot_name) == FALSE ){
return('slot_name argument must be of class character')
}
# slot_name: se corresponden con los slots
aux <- match(x = slot_name, table = slotNames('hydroMet_compact')[1])
if( is.na( sum(aux) ) == TRUE ){
return('Unless one of the slot_name arguments is incorrect')
}
rm(aux)
## col_number
col_position <- Reduce(f = c, x = col_number) # posicion de las columnas a plotear
n_col_number <- length( col_position ) # cantidad
# col_number: numerico o lista
if(n_slot_name == 1){
# col_number como vector numerico
if( is.numeric(col_number) == FALSE ){
return('col_number argument must be of class numeric')
}
} else {
# col_number como lista que contiene numeros
if( is.list(col_number) == FALSE ){
return('col_number must be of list class')
}
# contiene numeros?
if( is.numeric(Reduce(f = c, x = col_number) ) == FALSE ){
return('Each list element should contain numeric vectors')
}
}
# col_number: mayor o igual a uno
col_position <- as.integer(col_position) # coerciono a entero para evitar columnas decimales
if(length( which(col_position <= 1) ) >= 1){
return('col_number arguments to plot must be >= 1')
}
## interactive
# interactive: logico
if( is.logical(interactive) == FALSE){
return('interactive must be either TRUE or FALSE')
}
# interactive: uno solo
if( length(interactive) > 1 ){
return('interactive accepts a single value')
}
## line_type
n_line_type <- length(line_type)
# line_type: asigno valores / compruebo sus valores
if(n_line_type == 0) {
# asigno valores por defecto
if(interactive == FALSE){
# ggplot2
line_type <- rep('solid', n_col_number)
} else{
# plotly
line_type <- rep('lines', n_col_number)
}
} else {
# misma longitud que col_number
if( n_line_type != n_col_number ){
return('line_type must have the same length as col_number')
}
# long_type valido
if(interactive == FALSE){
# ggplot2
valid_line_type <- c('solid', 'twodash', 'longdash', 'dotted', 'dotdash', 'dashed', 'blank')
correspondencia <- match(x = line_type, table = valid_line_type)
if( is.na( sum(correspondencia) ) == TRUE ){
aux_var <- line_type[ which(is.na(correspondencia) ) ]
return( paste0(aux_var, ' ', 'is not a valid line_type for ggplot2 graph') )
}
} else {
# plotly
valid_line_type <- c('lines', 'lines+markers', 'markers')
correspondencia <- match(x = line_type, table = valid_line_type)
if( is.na( sum(correspondencia) ) == TRUE ){
aux_var <- line_type[ which(is.na(correspondencia) ) ]
return( paste0(aux_var, ' ', 'is not a valid line_type for plotly graph') )
}
}
}
## line_color
n_line_color <- length(line_color)
if( is.null(scatter) == TRUE ){
# line_color: misma longitud que col_number
if( n_line_color != n_col_number ){
return('line_color must be of the same length as col_number')
}
# line_color: caracter
if( is.character(line_color) == FALSE ){
return('line_color must be of character class')
}
# line_color: color valido
# paleta_match <- match(x = line_color, table = colors())
#
# if( is.na(sum(paleta_match) ) == T ) {
# aux_var <- line_color[ which(is.na(paleta_match) ) ]
# return( paste0(aux_var, ' ', 'is not a valid line_color. Find valid color names with colors()') )
#
# }
}
## x_lab
# x_lab: caracter
if( is.character(x_lab) == FALSE ){
return('x_lab must be of class character')
}
# x_lab: uno solo
if( length(x_lab) != 1){
return('x_lab must be of length one')
}
## y_lab
# y_lab: caracter
if( is.character(y_lab) == FALSE ){
return('y_lab must be of class character')
}
# y_lab: uno o dos
if( is.null(double_yaxis) == TRUE){
# eje simple
if( length(y_lab) != 1){
return('y_lab must be of length one')
}
} else {
# eje doble
if( length(y_lab) != 2){
return('y_lab must be of length two')
}
}
## title_lab
# title_lab: caracter unico
if( is.null(title_lab) == FALSE){
# caracter
if( is.character(title_lab) == FALSE ){
return('title_lab argument must be of character class')
}
# unico
if( length(title_lab) != 1 ){
return('title_lab length must be one')
}
}
## legend_lab
if( is.null(legend_lab) == FALSE ){
n_legend_lab <- length(legend_lab)
# legend_lab: caracter
if( is.character(legend_lab) == FALSE ){
return('legend_lab must be of class character')
}
# legend_lab: cantidad
if( n_col_number != n_legend_lab){
return('You must provide as many legend_lab strings as line plots')
}
}
## double_yaxis
if( is.null(double_yaxis) == FALSE){
n_double_yaxis <- length(double_yaxis)
# double_yaxis: numerico
if( is.numeric(double_yaxis) == FALSE){
return('double_axis argument must be of numeric class')
}
# double_yaxis: cantidad
if( interactive == FALSE){
# ggplot2
if( n_double_yaxis != 2 ){
return('In interactive = FALSE double_yaxis arguments only allows a numeric vector of length two')
}
} else {
# plotly
if(n_double_yaxis != n_col_number){
return('double_yaxis numeric vector argument must be of the same length as col_number')
}
}
# double_yaxis: 1 y 2
target_nums <- c(1, 2)
match_nums <- match(x = double_yaxis, table = target_nums)
if( is.na( sum(match_nums) ) == TRUE ){
return('Only 1 and 2 are allow as arguments in double_yaxis')
}
}
## list_extra
if( is.null(list_extra) == FALSE ){
if( interactive == FALSE){
# ggplot2
# list_extra: lista
if( is.list(list_extra) == FALSE){
return('list_extra argument must be of list class')
}
}
}# fin list_extra
## from
if( is.null(from) == FALSE){
# from: caracter
if( is.character(from) == FALSE ){
return('from must be of class character')
}
# from: uno solo
if( length(from) != 1){
return('from must be of length one')
}
}
## to
if( is.null(to) == FALSE){
# to: caracter
if( is.character(to) == FALSE ){
return('to must be of class character')
}
# to: uno solo
if( length(to) != 1){
return('to must be of length one')
}
}
## scatter
if( is.null(scatter) == FALSE ){
# es numerico?
if( is.numeric(scatter) == FALSE ){
return('scatter argument must be of class numeric')
}
# es de longitud 2?
if( length(scatter) != 2){
return('scatter supports just two variables. Please provide a numeric vector of length two.')
}
# esta dentro de col_num?
aux_sacatter <- match(x = scatter, table = col_number)
if( is.na( sum(aux_sacatter) ) == TRUE ){
return('scatter numbers must be included in col_number argument.')
}
}
# fin condicionales
#**********************************************************************
#**********************************************************************
#**************
# Binding
#**************
Date <- value <- NULL
#**************
## COMIENZO CON EL METODO
## Obtener los slots de interes
all_slots <- get_hydroMet(obj = obj, name = slot_name)
# condicionar que el numero(s) de columna exista dentro de cada slot
target_max_col <- sapply(X = all_slots, FUN = ncol)
if(n_slot_name == 1){
# un slot
if(max(col_number) > target_max_col){
return('Unless one of the col_number does not exist in the slot')
}
} else {
# varios slots
for(i in 1:n_slot_name){
aux_col_num <- col_number[[i]]
if(max(aux_col_num) > target_max_col[i]){
return( paste0('Unless one of the col_number (', slot_name[i], ') does not exist in the slot') )
}
}# fin for
}
## Armar el un data frame para graficar (df_plot) de acuerdo a las columnas seleccionadas
# Verifico resolucion temporal
N_all_slots <- length(all_slots)
if(N_all_slots > 1){
unidades <- rep(NA_character_, N_all_slots) # que las unidades temporales sean las mismas
paso_tpo <- rep(NA_character_, N_all_slots) # que el paso de tiempo sea el mismo
for(i in 1:N_all_slots){
unidades[i] <- units( diff.Date( all_slots[[i]][ , 1] ) )
paso_tpo[i] <- length(unique( diff.Date( all_slots[[i]][ , 1] ) ) )
}# fin for
if( length( unique(unidades)) != 1 ){
return('the variables must have the same temporal resolution')
}
if( unique(paso_tpo) != 1 ){
return('the variables must have the same temporal resolution')
}
} # fin if
# Extraigo las variables de interes de cada slot
if(N_all_slots > 1){
# en este caso col_number es una lista
df_plot <- all_slots[[1]][ , c(1, col_number[[1]] )]
for(i in 2:N_all_slots){
df_aux <- all_slots[[i]][ , c(1, col_number[[i]] )]
df_plot <- merge(df_plot, df_aux, all = TRUE)
}
} else {
# solo un slot
df_plot <- all_slots[[1]][ , c(1, col_number)]
}
# En caso de ser necesario aplico subset al data frame
if( is.null(from) == FALSE & is.null(to) == FALSE){
df_plot <- subset(df_plot, subset = Date >= from & Date <= to)
} else if( is.null(from) == FALSE ) {
df_plot <- subset(df_plot, subset = Date >= from)
} else if( is.null(to) == FALSE) {
df_plot <- subset(df_plot, subset = Date <= to)
}
###
# Series de tiempo o nube de puntos
if( is.null(scatter) == TRUE){
# series de tiempo
## ggplot2 o plotly? => esto define la sintaxis a usar
if( interactive == FALSE ){
## ggplot2
# Doble eje y?
if( is.null(double_yaxis) == TRUE){
# un solo eje y
# Armo df_plot2 con las columnas
N_plot <- nrow(df_plot)
N_var <- ncol(df_plot) - 1
if( is.null(legend_lab) == FALSE ){
tipo_linea <- list()
color_linea <- list()
leyen_linea <- list()
for(i in 1:N_var){
tipo_linea[[i]] <- rep(line_type[i], N_plot)
color_linea[[i]] <- rep(line_color[i], N_plot)
leyen_linea[[i]] <- rep(legend_lab[i], N_plot)
}
linea <- c(sapply(X = tipo_linea, '['))
color <- c(sapply(X = color_linea, '['))
leyen <- c(sapply(X = leyen_linea, '['))
df_plot2 <- melt(data = df_plot, id.vars = 'Date')
df_plot2 <- cbind(df_plot2, linea, color, leyen)
} else {
tipo_linea <- list()
color_linea <- list()
for(i in 1:N_var){
tipo_linea[[i]] <- rep(line_type[i], N_plot)
color_linea[[i]] <- rep(line_color[i], N_plot)
}
linea <- c(sapply(X = tipo_linea, '['))
color <- c(sapply(X = color_linea, '['))
df_plot2 <- melt(data = df_plot, id.vars = 'Date')
leyen <- df_plot2$variable
df_plot2 <- cbind(df_plot2, linea, color, leyen)
}
# Grafico
ggout <-
ggplot(data = df_plot2, aes(x = Date, y = value, color = leyen) ) +
geom_line(aes(linetype = leyen) ) +
scale_color_manual(values = line_color) +
scale_linetype_manual(values = line_type) +
theme(legend.title = element_blank()) +
ggtitle(label = title_lab) +
xlab(x_lab) + ylab(y_lab)
# agrego list_extra en caso de ser necesario
if( is.null(list_extra) == FALSE){
ggout <- ggout +
Reduce(f = c, x = list_extra)
}
# Salida
return(ggout)
} else {
# doble eje
# Fuente: http://rstudio-pubs-static.s3.amazonaws.com/329613_f53e84d1a18840d5a1df55efb90739d9.html
# obtengo posicion de las series en eje principal
main_pos <- which(double_yaxis == 1) + 1 # vector con numero de columna
seco_pos <- which(double_yaxis == 2) + 1 # idem
# especifico nombre de las variables para escalar
y1 <- colnames(df_plot)[main_pos[1]] # principal
y2 <- colnames(df_plot)[seco_pos[1]] # secundario
# extraigo nombres para plotear
y1_plot <- colnames(df_plot)[main_pos]
y2_plot <- colnames(df_plot)[seco_pos]
# genero matriz
m_plot <- as.matrix(x = df_plot[ , -1])
# reescalo el eje y secundario:
# - substraigo su valor minimo (para que empiece en cero)
# - escalo para que tenga el mismo rango del eje y principal
# - sumo el minimo valor de y1
a <- range(df_plot[[y1]], na.rm = TRUE)
b <- range(df_plot[[y2]], na.rm = TRUE)
scale_factor <- diff(a)/diff(b)
m_plot[ , (seco_pos - 1)] <- ( (m_plot[ , (seco_pos - 1)] - b[1]) * scale_factor) + a[1]
# formula para transformar el eje y secundario
trans <- ~ ((. - a[1]) / scale_factor) + b[1]
# genero un df_plot2 con los valores reescalados
df_plot2 <- data.frame(df_plot[ , 1], m_plot)
colnames(df_plot2) <- colnames(df_plot)
# grafico
ggout <-
ggplot(df_plot2) +
geom_line(aes_string('Date', y1_plot), col = line_color[ (main_pos - 1) ], lty = line_type[ (main_pos - 1) ] ) +
geom_line(aes_string('Date', y2_plot), col = line_color[ (seco_pos - 1) ], lty = line_type[ (seco_pos - 1) ] ) +
ggtitle(label = title_lab) +
xlab(x_lab) + ylab(y_lab[1]) +
scale_y_continuous(sec.axis = sec_axis(trans = trans, name = y_lab[2]))
# agrego list_extra en caso de ser necesario
if( is.null(list_extra) == FALSE){
ggout <- ggout +
Reduce(f = c, x = list_extra)
}
# Salida
return(ggout)
}
} else {
## plotly
# Doble eje y?
if( is.null(double_yaxis) == TRUE ){
# y simple
# Armo sentencia basica
ppout <- plot_ly(df_plot, x = ~Date)
N_plots <- ncol(df_plot) - 1
# genero graficos sin etiquetas en los ejes
for(i in 1:N_plots){
ppout <- ppout %>%
add_trace(y = df_plot[ , (i + 1)], name = legend_lab[i], type = 'scatter', mode = line_type[i], color = I(line_color[i]) )
}# fin for
# agrego etiquetas
ppout <-
ppout %>%
layout(title = title_lab,
xaxis = list(title = x_lab),
yaxis = list(title = y_lab) )
# Salida
return(ppout)
} else {
# y doble
# Armo sentencia basica
ppout <- plot_ly(df_plot, x = ~Date)
N_plots <- ncol(df_plot) - 1
# genero graficos sin etiquetas en los ejes
for(i in 1:N_plots){
if(double_yaxis[i] == 1){
# a eje principal
ppout <- ppout %>%
add_trace(y = df_plot[ , (i + 1)], name = legend_lab[i], type = 'scatter', mode = line_type[i], color = I(line_color[i]) )
} else if (double_yaxis[i] == 2){
# a eje secundario
ppout <- ppout %>%
add_trace(y = df_plot[ , (i + 1)], name = legend_lab[i], type = 'scatter', mode = line_type[i], color = I(line_color[i]), yaxis = 'y2')
}
}# fin for
# agrego etiquetas
ppout <-
ppout %>%
layout(title = title_lab,
xaxis = list(title = x_lab),
yaxis = list(title = y_lab[1]),
yaxis2 = list(title = y_lab[2],
overlaying = 'y',
side = 'right') )
# Salida
return(ppout)
}
} # fin plotly
} else {
# Nube de puntos
## ggplot2 o plotly? => esto define la sintaxis a usar
if( interactive == FALSE){
# ggplot2
df_col <- c(1, scatter)
pos_col <- match(x = scatter, table = df_col)
if( is.null(list_extra) == TRUE){
ggout <-
ggplot(data = df_plot, aes(x = df_plot[ , pos_col[1]], y = df_plot[ , pos_col[2]] ) ) +
geom_point() +
ggtitle(label = title_lab) +
xlab(x_lab) + ylab(y_lab)
} else {
ggout <-
ggplot(data = df_plot, aes(x = df_plot[ , pos_col[1]], y = df_plot[ , pos_col[2]] ) ) +
Reduce(f = c, x = list_extra) +
ggtitle(label = title_lab) +
xlab(x_lab) + ylab(y_lab)
}
# Salida
return(ggout)
} else {
# plotly
df_col <- c(1, scatter)
pos_col <- match(x = scatter, table = df_col)
if( is.null(list_extra) == TRUE){
ggout <-
ggplot(data = df_plot, aes(x = df_plot[ , pos_col[1]], y = df_plot[ , pos_col[2]] ) ) +
geom_point() +
ggtitle(label = title_lab) +
xlab(x_lab) + ylab(y_lab)
} else {
ggout <-
ggplot(data = df_plot, aes(x = df_plot[ , pos_col[1]], y = df_plot[ , pos_col[2]] ) ) +
Reduce(f = c, x = list_extra) +
ggtitle(label = title_lab) +
xlab(x_lab) + ylab(y_lab)
}
# Salida
plotly_out <- ggplotly( p = ggout )
return(plotly_out)
} # fin plotly
} # fin scatter
} # fin funcion
)
|
adca927123cf37f0fd17c485e6199ba9eb5692dd | 63beda838a67edda7393d3da991c98e6f7cd66bd | /MidtermSeatworks/SW5-Midterm.R | 5fd5f2495f80f7bd9f00b764274102ebd893255e | [] | no_license | hannahnavarro9827/Midterm_Repo | 780fbb222eda090963cd037d3f8ae4aa66a4dfea | 29926c6958b718036710e29b622eb6345d1661a8 | refs/heads/master | 2020-03-22T05:08:28.869932 | 2018-09-18T15:55:53 | 2018-09-18T15:55:53 | 139,544,917 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,166 | r | SW5-Midterm.R | #Seatwork No. 5 Midterm
#Navarro, Ma. Hannah G.
install.packages("imager")
library(imager)
file <- system.file('extdata/coins.jng' ,package = 'imager')
img <-load.image("C:/Users/RM A-225/Documents/R/win-library/3.5/imager/extdata/coins.png")
plot(img)
#Resizing the image
library(imager)
file <- system.file('extdata/coins.jnj' ,package = 'imager')
img <- load.image(coins.jng)
img <- resize(jemar, 245, 267)
plot(img,main = "Resized Image")
#Reading Image thru web
install.packages("magick")
library(magick)
img <- image_read('https://www.ghirardelli.com/wcsstore/GhirardelliCatalogAssetStore//Images/images/recipe-full-size/1123-frappe-mocha.jpg')
print(img)
#Grayscale
layout(t(1:2))
plot(img,rescale=FALSE)
plot(img/2,rescale=FALSE)
cscale <- function(r,g,b) rgb(g,r,b)
plot(img,colourscale=cscale,rescale=FALSE)
cscale <- function(v) rgb(0,0,v)
grayscale(img) %>% plot(colourscale=cscale,rescale=FALSE)
#Rotating the image
library(imager)
fpath <- system.file('exdata/parrots.png' , package = 'imager')
im <- load.image("C:/Users/RM A-225/Documents/R/win-library/3.5/imager/extdata/parrots.png")
imrotate(jemar,270) %>% plot(main = "Rotating")
|
0cff20b0394c170389d9328ddd2d84d4a03cc8a0 | c6c0881ca260a793a70f5814ab6993c61dc2401c | /scripts/pred_all_jhs.R | c27a742e1e0595ae1da1faa506b2d3474eaa76e5 | [] | no_license | luyin-z/PRS_Height_Admixed_Populations | 5fe1c1bef372b3c64bfd143397709c7529a2705a | bf04ba884fd16e5e8c0685ccfbc86ed72d02c7f2 | refs/heads/master | 2023-03-16T17:05:56.658896 | 2020-09-18T16:58:04 | 2020-09-18T16:58:04 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,103 | r | pred_all_jhs.R | #!/usr/bin/env Rscript
##preable###
###########
library(data.table)
library(dplyr)
library(ggplot2);library(reshape2); library(wesanderson)
library(rlist)
library(asbio)
library(GGally)
library(tidyr)
library(hexbin)
library(psychometric)
library(boot)
options(scipen=999)
a<-c('_p1.0000e-08','_p1.0000e-07','_p1.0000e-06','_p1.0000e-05','_p3.0000e-05','_p1.0000e-04','_p3.0000e-04','_p1.0000e-03','_p3.0000e-03','_p1.0000e-02', '_p3.0000e-02', '_p1.0000e-01', '_p3.0000e-01', '_p1.0000e+00')
a1<-paste0('~/height_prediction/ldpred/output/JHS.score_P+T_r0.20',a, '.txt')
b<-c('_p1.0000e-03', '_p3.0000e-03','_p1.0000e-02', '_p3.0000e-02', '_p1.0000e-01', '_p3.0000e-01','_p1.0000e+00', '-inf')
b1<-paste0('~/height_prediction/ldpred/output/JHS.score_LDpred', b, '.txt')
d<-c(unlist(a1), unlist(b1))
lapply(d , function(X) fread(X))-> PGS_JHS
#read in phenotype data
fread('~/height_prediction/input/JHS/JHS_phenotypes.txt')-> Pheno_JHS
#a partial R2 function
source('~/height_prediction/strat_prs/scripts/Rsq_R2.R')
##############
my_names<-c(paste0('pt', a), paste0('gibbs', b))
names(PGS_JHS)<-my_names
#add PGS to Pheno table in order to be able to make multiple analyses
#Pheno_JHS[, SUBJID:=paste0("0_", as.character(Pheno_JHS[, SUBJID]))]
setkey(Pheno_JHS, SUBJID)
#add ancestry
ancestry<-do.call(rbind, lapply(1:22, function(X) fread(paste0('~/height_prediction/input/JHS/rfmix_anc_chr', X, '.txt'))))
anc_JHS<-ancestry %>% group_by(SUBJID) %>% summarise(AFR_ANC=mean(AFR_ANC), EUR_ANC=1-mean(AFR_ANC)) %>% as.data.table #mean across chromosomes for each individual
anc_JHS$SUBJID<-substr(anc_JHS[,SUBJID],3,9)
setkey(anc_JHS, SUBJID)
for (I in my_names){
colnames(PGS_JHS[[I]])[1]<-'SUBJID'
setkey(PGS_JHS[[I]], SUBJID)
PGS_JHS[[I]][Pheno_JHS, nomatch=0]-> PGS_JHS[[I]]
PGS_JHS[[I]][anc_JHS, nomatch=0]-> PGS_JHS[[I]]
PGS_JHS[[I]][,age2:=age_baseline^2]
PGS_JHS[[I]][,age:=age_baseline][, age_baseline:=NULL]
PGS_JHS[[I]][AFR_ANC>=0.05]-> PGS_JHS[[I]]#filter out individuals that are not african...
PGS_JHS[[I]][-which(is.na(PGS_JHS[[I]][,height_baseline])),]-> PGS_JHS[[I]]
PGS_JHS[[I]][,HEIGHTX:=height_baseline]
PGS_JHS[[I]][,height_baseline:=NULL]
PGS_JHS[[I]][,sex:=sex_selfreport]
PGS_JHS[[I]][,sex_selfreport:=NULL]
}
lapply(PGS_JHS, function(X) lm(HEIGHTX~PRS, X))-> lm1_JHS
lapply(PGS_JHS, function(X) lm(HEIGHTX~age, X))-> lm2_JHS
lapply(PGS_JHS, function(X) lm(HEIGHTX~age2, X))-> lm3_JHS
lapply(PGS_JHS, function(X) lm(HEIGHTX~EUR_ANC, X))-> lm4_JHS
lapply(PGS_JHS, function(X) lm(HEIGHTX~sex+age, X))-> lm5_JHS
lapply(PGS_JHS, function(X) lm(HEIGHTX~sex+age+age2, X))-> lm6_JHS
lapply(PGS_JHS, function(X) lm(HEIGHTX~sex+age+age2+EUR_ANC, X))-> lm7_JHS
lapply(PGS_JHS, function(X) lm(HEIGHTX~sex+age+age2+EUR_ANC+PRS,X))-> lm8_JHS
partial_r2_JHS<-lapply(1:length(PGS_JHS), function(X) partial.R2(lm7_JHS[[X]], lm8_JHS[[X]])) #min 1.8, max 4.3
names(partial_r2_JHS)<- names(PGS_JHS)
lapply(PGS_JHS, function(X) X[, Quantile:= cut(EUR_ANC,
breaks=quantile(EUR_ANC, probs=seq(0,1, by=1/2), na.rm=TRUE),
include.lowest=TRUE)])-> PGS2_JHS
lapply(1:length(PGS2_JHS), function(X) PGS2_JHS[[X]][,Med_Eur_Anc:=median(EUR_ANC),by=Quantile])
lapply(1:length(PGS2_JHS), function(X) as.character(unique((PGS2_JHS[[X]]$Quantile))))-> a
lapply(a, function(X) c(X[2], X[1]))-> a1
names(a1)<-names(PGS2_JHS)
r2_JHS<-vector('list', length(PGS2_JHS))
names(r2_JHS)<-names(PGS2_JHS)
for(I in names(r2_JHS)){
r2_JHS[[I]]<-vector('list', length(a1[[I]]))
names(r2_JHS[[I]])<-a1[[I]]
for(i in a1[[I]]){
r2_JHS[[I]][[i]]<-partial.R2(lm(HEIGHTX~sex+age+age2+EUR_ANC, PGS2_JHS[[I]][Quantile==i]),lm(HEIGHTX~sex+age+age2+EUR_ANC+PRS, PGS2_JHS[[I]][Quantile==i]))
}
}
B_JHS<-vector('list', length(r2_JHS))
names(B_JHS)<-names(r2_JHS)
for (I in names(r2_JHS)){
B_JHS[[I]]<-data.table(Quant=c(a1[[I]], "total"),
R_sq=c(unlist(r2_JHS[[I]]), partial_r2_JHS[[I]]),
Med_Eur_Anc=c(unique(PGS2_JHS[[I]][Quantile==a1[[I]][1]][,Med_Eur_Anc]), unique(PGS2_JHS[[I]][Quantile==a1[[I]][2]][,Med_Eur_Anc]), median(PGS2_JHS[[I]][, EUR_ANC])))
B_JHS[[I]][,N:=c(nrow(PGS2_JHS[[I]][Quantile==a1[[I]][1]]), nrow(PGS2_JHS[[I]][Quantile==a1[[I]][2]]),nrow(PGS2_JHS[[I]]))]
B_JHS[[I]][,K:=1] #number of predictors. Need to check later if this is correct.
B_JHS[[I]][, LCL:=CI.Rsq(R_sq, k=K, n=N)[3]]
B_JHS[[I]][, UCL:=CI.Rsq(R_sq, k=K, n=N)[4]]
}
### add confidence intervals calculated with bootstrap: https://www.statmethods.net/advstats/bootstrapping.html
results.JHS<-vector('list', length(PGS2_JHS))
names(results.JHS)<-names(PGS2_JHS)
for (I in names(PGS2_JHS)){
results.JHS[[I]]<-vector('list', length(a1[[I]])+1)
names(results.JHS[[I]])<-c(a1[[I]], "total")
lapply(a1[[I]], function(i) boot(data=PGS2_JHS[[I]][Quantile==i], statistic=rsq.R2,R=1000, formula1=HEIGHTX~sex+age+age2+EUR_ANC, formula2=HEIGHTX~sex+age+age2+EUR_ANC+PRS))-> results.JHS[[I]]
cat(I)
cat(' done\n')
}
#saveRDS(PGS3_JHS, file='/project/mathilab/bbita/gwas_admix/new_height/JHS/PGS3_JHS.Rds')
#saveRDS(results.JHS, file='/project/mathilab/bbita/gwas_admix/new_height/JHS/results.JHS.Rds')
for (I in names(PGS2_JHS)){
tp <- boot(data=PGS_JHS[[I]], statistic=rsq.R2, R=1000, formula1=HEIGHTX~sex+age+age2+EUR_ANC, formula2=HEIGHTX~sex+age+age2+EUR_ANC+PRS)
list.append(results.JHS[[I]], tp)-> results.JHS[[I]]
names(results.JHS[[I]])<-c(a1[[I]], "total")
cat(I)
cat(' done\n')
}
saveRDS(PGS2_JHS, file='~/height_prediction/ldpred/output/PGS3_JHS.Rds')
saveRDS(results.JHS, file='~/height_prediction/ldpred/output/results.JHS.Rds')
#confidence intervals
boots.ci.JHS<-lapply(results.JHS, function(Y) lapply(Y, function(X) boot.ci(X, type = c("norm", 'basic', "perc"))))
names(boots.ci.JHS)<-names(results.JHS)
for (I in names(PGS2_JHS)){
B_JHS[[I]][1:2,]-> a
B_JHS[[I]][3,]-> b
a[,HVB_L:=sapply(a$Quant, function(X) as.numeric(gsub("\\]","",gsub("\\(","",gsub("\\[","",strsplit(X,",")[[1]])))))[1,]]
a[,HVB_U:=sapply(a$Quant, function(X) as.numeric(gsub("\\]","",gsub("\\(","",gsub("\\[","",strsplit(X,",")[[1]])))))[2,]]
b[,HVB_L:=1]
b[,HVB_U:=1]
rbind(a,b)->B_JHS[[I]]
B_JHS[[I]][, Dataset:='JHS_AA']
B_JHS[[I]][, boots_norm_L:=sapply(1:3, function(X) boots.ci.JHS[[I]][[X]]$normal[2])]
B_JHS[[I]][, boots_norm_U:=sapply(1:3, function(X) boots.ci.JHS[[I]][[X]]$normal[3])]
B_JHS[[I]][, boots_perc_L:=sapply(1:3, function(X) boots.ci.JHS[[I]][[X]]$perc[4])]
B_JHS[[I]][, boots_perc_U:=sapply(1:3, function(X) boots.ci.JHS[[I]][[X]]$perc[5])]
B_JHS[[I]][, boots_basic_L:=sapply(1:3, function(X) boots.ci.JHS[[I]][[X]]$basic[4])]
B_JHS[[I]][, boots_basic_U:=sapply(1:3, function(X) boots.ci.JHS[[I]][[X]]$basic[5])]
}
saveRDS(B_JHS, file="~/height_prediction/ldpred/output/B_JHS.Rds")
|
865534576d450d9b19c7c2f3cc6a4a934c363fb2 | 7fc38c30ffa40b0e4efe78c529380038807b954c | /messageAcrossScripts/scripts/plotsGeneratorCHI.r | 5c43b9c8899829326066c6a2e4744677f447a609 | [
"CC-BY-4.0"
] | permissive | SamGomes/message-across | 2ac0ecf4958999e98639595f779d78a3abc09793 | 9edf8638d98e05755087c4138ce146c11eeab5ca | refs/heads/master | 2023-07-19T20:54:14.161122 | 2023-07-12T10:55:52 | 2023-07-12T10:55:52 | 132,795,031 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,238 | r | plotsGeneratorCHI.r | suppressMessages(library(ggplot2))
suppressMessages(library(multcomp))
suppressMessages(library(nlme))
suppressMessages(library(pastecs))
suppressMessages(library(reshape))
suppressMessages(library(tidyverse))
suppressMessages(library(sjPlot))
suppressMessages(library(sjmisc))
suppressMessages(library(dplyr))
suppressMessages(library(emmeans))
oldw <- getOption("warn")
options(warn = -1)
print("Plotting main effects in facets...")
myData <- read.csv(file="output/meltedDataThreeCategories.csv", header=TRUE, sep=",")
myData<-myData[!(myData$ScoreSystem=="B" | myData$ScoreSystem=="D"),]
levels(myData$ScoreSystem)[levels(myData$ScoreSystem) == "A"] <- "Competition"
levels(myData$ScoreSystem)[levels(myData$ScoreSystem) == "C"] <- "Mutual Help"
#myData$ScoreSystem <- factor(myData$ScoreSystem, levels=rev(levels(myData$ScoreSystem)))
myData$C <- factor(myData$C , levels=c("High", "Medium", "Low"))
myData$A <- factor(myData$A , levels=c("High", "Medium", "Low"))
lm0 = lm(takes ~ C * ScoreSystem, data = myData)
x <- emmip(lm0, C ~ ScoreSystem, engine="ggplot", CIs = TRUE) + labs(x="Game Version", y="Average Take Actions", colour="Conscienciousness") + theme(text = element_text(size=20)) + coord_cartesian(ylim=c(0,4)) + scale_x_discrete(labels=c("Competition", "Mutual Help"))
x + scale_colour_manual(values=c("#d7301f", "#fc8d59", "#fdcc8a"), labels=c("High", "Medium", "Low"), guide = guide_legend(reverse = FALSE)) #+ geom_jitter(aes(x = ScoreSystem, y = takes, colour = C), data = myData, pch = 20, width = 0.1, size=2)
suppressMessages(ggsave("plots/interactionEffects/takes/takes.png", height = 4, width = 10))
lm0 = lm(who ~ A * ScoreSystem, data = myData)
x <- emmip(lm0, A ~ ScoreSystem, engine="ggplot", CIs = TRUE) + labs(x="Game Version", y="Focus", colour="Agreeableness") + theme(text = element_text(size=20)) + coord_cartesian(ylim=c(1,7)) + scale_x_discrete(labels=c("Competition", "Mutual Help"))
x + scale_colour_manual(values=c("#d7301f", "#fc8d59", "#fdcc8a"), labels=c("High", "Medium", "Low"), guide = guide_legend(reverse = FALSE)) + scale_y_reverse("Focus", labels = as.character(c("Me 3","2", "1", "Neutral 0", "-1", "-2", "The Other -3")), breaks = c(1,2,3,4,5,6,7)) #+ geom_jitter(aes(x = ScoreSystem, y = takes, colour = C), data = myData, pch = 20, width = 0.1, size=2)
suppressMessages(ggsave("plots/interactionEffects/who/who.png", height = 4, width = 10))
lm0 = lm(what ~ A * ScoreSystem, data = myData)
x <- emmip(lm0, A ~ ScoreSystem, engine="ggplot", CIs = TRUE) + labs(x="Game Version", y="Social Valence", colour="Agreeableness") + theme(text = element_text(size=20)) + coord_cartesian(ylim=c(1,7)) + scale_x_discrete(labels=c("Competition", "Mutual Help"))
x + scale_colour_manual(values=c("#d7301f", "#fc8d59", "#fdcc8a"), labels=c("High", "Medium", "Low"), guide = guide_legend(reverse = FALSE)) + scale_y_reverse("Social Valence", labels = as.character(c("Help 3","2", "1", "Neutral 0", "-1", "-2", "Complicate -3")), breaks = c(1,2,3,4,5,6,7)) #+ geom_jitter(aes(x = ScoreSystem, y = takes, colour = C), data = myData, pch = 20, width = 0.1, size=2)
suppressMessages(ggsave("plots/interactionEffects/what/what.png", height = 4, width = 10))
options(warn = oldw)
|
9674134c3d5884fdd7ab1e44fee01adc513a2ef1 | 58bbae05372d92b197078e2dc457a3bca7f21401 | /R/generate.R | a96cdf2523e30d0efbeb71eed7e223b2b65c31d2 | [
"MIT"
] | permissive | jonatanrg/fglm_intern | f964cfb7f0913af4af92499842dc7c476294a540 | bdf5765931d7f17e6fbe94857723dbbd10a3c53b | refs/heads/main | 2023-06-04T22:55:18.714871 | 2021-06-24T08:05:26 | 2021-06-24T08:05:26 | 379,848,277 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 444 | r | generate.R | generate_normal <- function(X, b, d = 1, ymax = 1){
# Do argument checking
stopifnot(is.matrix(X))
p <- ncol(X)
n <- nrow(X)
stopifnot(is.numeric(b), length(b) == p)
stopifnot(is.numeric(d), length(d) == 1, d > 0)
stopifnot(is.numeric(ymax), length(ymax) == 1, ymax >= 0)
y <- rnorm(n = nrow(X),0,1)
for (i in 1:length(y)){
if (y[i] > -X[i,]%*%b){
y[i] <- 1
}
else{
y[i] <- 0
}
}
return(y)
} |
38ebd7291c7d4ae01fc63da4aeb895331ff1f766 | 31547779d96b1369af66e09e3acefc38c81b70e3 | /server.R | 73f8e8346eb4c7e7187ea51701889df7c9be7f08 | [] | no_license | mwschultz/ExampleRUI | e3680c3ad08922ef4f30ceac1bf054db2e01541f | 700a27a71cf2bea8ce5fec65bf8b080ccc7ebda6 | refs/heads/master | 2021-01-16T18:10:49.595714 | 2017-08-11T19:08:36 | 2017-08-11T19:08:36 | 100,047,947 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 861 | r | server.R | library(shiny)
library(dplyr)
library(ggplot2)
shinyServer(function(input, output, session) {
newData <- reactive({msleep %>% filter(vore==input$vore)})
output$sleepPlot <- renderPlot({
if (input$conservation == TRUE) {
g <- ggplot(newData(), aes(x = bodywt, y = sleep_total))
g + geom_point(size = input$size, aes(col = conservation))
}
else {
g <- ggplot(newData(), aes(x = bodywt, y = sleep_total))
g + geom_point(size = input$size)
}
})
#create text info
output$info <- renderText({
paste("The average body weight for order", "is", input$vore, round(mean(newData()$bodywt, na.rm = TRUE), 2), "and the average total sleep time is", round(mean(newData()$sleep_total, na.rm = TRUE), 2), sep = " ")
})
#create output of observations
output$table <- renderTable({
select(newData(), name, sleep_total, bodywt)
})
})
|
1d2af3f32a19e4bdc53b1eea1bec6a5a8b3b12e2 | 15dbd3d4e01b67d8d152396a22721ec7f00bc652 | /man/getsigns.Rd | ccc8c7526f984503fa47fe2b381afe493ac9cbec | [] | no_license | sckott/rwikispeedia | 5ff37d5d8a003ec625da6b988c5dacf40e2f45d6 | 71d5eaeeeab2954047b2eeeccf851fd81018b3bc | refs/heads/master | 2016-09-05T22:45:57.662950 | 2013-12-13T20:11:27 | 2013-12-13T20:11:27 | 3,345,521 | 1 | 2 | null | null | null | null | UTF-8 | R | false | false | 725 | rd | getsigns.Rd | \name{getsigns}
\alias{getsigns}
\title{Search Wikispeedia for speed limit signs.}
\usage{
getsigns(nelat = NA, swlat = NA, nelng = NA, swlng = NA,
curl = getCurlHandle(), ...)
}
\arguments{
\item{nelat}{Northeastern latitude bounding point.}
\item{swlat}{Southwestern latitude bounding point.}
\item{nelng}{Northeastern longitude bounding point.}
\item{swlng}{Southwestern longitude bounding point.}
\item{curl}{curl handle}
\item{...}{More args passed on to getForm()}
}
\value{
Data frame of speed limit signs with label, lat, long, mph,
kph, cog, and alt_meters.
}
\description{
Search Wikispeedia for speed limit signs.
}
\examples{
\dontrun{
getsigns(35.198676, 35.194676, -89.56558, -89.56958)
}
}
|
e0c4f47783f7d455ccf6cfd7f413e9b88894c9ce | 1ac95d877cf9686adbecf71548899da166e9e6f4 | /man/create_tibble.Rd | 3252febbef6fffb9fad41a8502a1e448b45aa86d | [] | no_license | joelgsponer/waRRior2 | 6bd343f537e5fd79eac757a3e8ca7f83d5023204 | c402215209735b89aefd32b3d54e5afa21e3fd03 | refs/heads/master | 2020-04-30T08:18:59.917677 | 2020-02-18T08:48:51 | 2020-02-18T08:48:51 | 176,711,590 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 441 | rd | create_tibble.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/examples.R
\name{create_tibble}
\alias{create_tibble}
\title{Create a tibble with random values}
\usage{
create_tibble(cols = 5, rows = 100, add_id = TRUE, type = "runif",
verbose = F, debug = F)
}
\arguments{
\item{debug}{}
}
\description{
Create a tibble with random values
}
\examples{
# Normal distributed values
create_tibble(rows = 100, type = "rnorm")
}
|
1f5861d5fc16b0bb7a70868669be880b4a897445 | ef35717b113233dc1a9122df61cf1c06645ceaec | /man/nyse.Rd | 95b5c22bf137e32148dc974e8e40ecf2c44f9ac5 | [] | no_license | cran/astsa | d33ba640a0edda0dd9e112ed011bb05ac5c36fb3 | 1e597fa74efc437eb247787fcf7d26e0fe0c6b17 | refs/heads/master | 2023-04-10T07:36:18.196423 | 2023-01-09T21:50:14 | 2023-01-09T21:50:14 | 17,694,513 | 7 | 14 | null | 2016-03-21T15:10:46 | 2014-03-13T04:00:06 | R | UTF-8 | R | false | false | 1,095 | rd | nyse.Rd | \name{nyse}
\alias{nyse}
\docType{data}
\title{Returns of the New York Stock Exchange
}
\description{Returns of the New York Stock Exchange (NYSE) from February 2, 1984 to December 31, 1991.
}
\format{
The format is:
Time-Series [1:2000] from 1 to 2000: 0.00335 -0.01418 -0.01673 0.00229 -0.01692 ...
}
\source{S+GARCH module - Version 1.1 Release 2: 1998
}
\note{Various packages have data sets called \code{nyse}. Consequently, it may be best to specify this data set as \code{nyse = astsa::nyse} to avoid conflicts.
}
\references{You can find demonstrations of astsa capabilities at
\href{https://github.com/nickpoison/astsa/blob/master/fun_with_astsa/fun_with_astsa.md}{FUN WITH ASTSA}.
The most recent version of the package can be found at \url{https://github.com/nickpoison/astsa/}.
In addition, the News and ChangeLog files are at \url{https://github.com/nickpoison/astsa/blob/master/NEWS.md}.
The webpages for the texts and some help on using R for time series analysis can be found at
\url{https://nickpoison.github.io/}.
}
\keyword{datasets}
|
f6ee6e4263d8a54c120683c026150a42ff911dcd | 4f4ee1b08bb83448eb96554c606387935456ae55 | /man/codelist.Rd | d438eea7be459385be22e694bb9169a1296401ec | [] | no_license | altaf-ali/countrycode | 2d3a61e41c04c12c16749947f594773f934c609f | b8db1690b1f3f82b05b858bba0808be07327ec43 | refs/heads/master | 2020-03-23T01:02:17.050397 | 2018-07-15T23:18:34 | 2018-07-15T23:18:34 | 60,879,538 | 0 | 0 | null | 2018-07-13T16:18:31 | 2016-06-10T22:05:17 | R | UTF-8 | R | false | true | 4,126 | rd | codelist.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codelist.R
\docType{data}
\name{codelist}
\alias{codelist}
\title{Country Code Translation Data Frame (Cross-Sectional)}
\format{data frame with codes as columns}
\description{
A data frame used internally by the \code{countrycode()} function. `countrycode` can use any valid code as destination, but only some codes can be used as origin.
}
\details{
Origin and Destination:
\itemize{
\item country.name: country name (English)
\item country.name.de: country name (German)
\item cowc: Correlates of War character
\item cown: Correlates of War numeric
\item ecb: European Central Bank
\item eurostat: Eurostat
\item fao: Food and Agriculture Organization of the United Nations numerical code
\item fips: FIPS 10-4 (Federal Information Processing Standard)
\item gaul: Global Administrative Unit Layers
\item genc2c: GENC 2-letter code
\item genc3c: GENC 3-letter code
\item genc3n: GENC numeric code
\item imf: International Monetary Fund
\item ioc: International Olympic Committee
\item iso2c: ISO-2 character
\item iso3c: ISO-3 character
\item iso2n: ISO-2 numeric
\item iso3n: ISO-3 numeric
\item p4n: Polity IV numeric country code
\item p4c: Polity IV character country code
\item un: United Nations M49 numeric codes
\item unpd: United Nations Procurement Division
\item vdem: Varieties of Democracy (V-Dem version 8, April 2018)
\item wb: World Bank (very similar but not identical to iso3c)
\item wvs: World Values Survey numeric code
}
Destination only:
\itemize{
\item ar5: IPCC's regional mapping used both in the Fifth Assessment Report
(AR5) and for the Reference Concentration Pathways (RCP)
\item continent: Continent as defined in the World Bank Development Indicators
\item cow.name: Correlates of War country name
\item ecb.name: European Central Bank country name
\item eurocontrol_pru: European Organisation for the Safety of Air Navigation
\item eurocontrol_statfor: European Organisation for the Safety of Air Navigation
\item eurostat.name: Eurostat country name
\item eu28: Member states of the European Union (as of December 2015),
without special territories
\item fao.name: Food and Agriculture Organization of the United Nations country name
\item fips.name: FIPS 10-4 Country name
\item genc.name: Geopolitical Entities, Names and Codes standard country names
\item icao: International Civil Aviation Organization
\item icao_region: International Civil Aviation Organization (Region)
\item ioc.name: International Olympic Committee country name
\item iso.name.en: ISO English short name
\item iso.name.fr: ISO French short name
\item p4.name: Polity IV country name
\item region: Regions as defined in the World Bank Development Indicators
\item un.name.ar: United Nations Arabic country name
\item un.name.en: United Nations English country name
\item un.name.es: United Nations Spanish country name
\item un.name.fr: United Nations French country name
\item un.name.ru: United Nations Russian country name
\item un.name.zh: United Nations Chinese country name
\item unpd.name: United Nations Procurement Division country name
\item wvs.name: World Values Survey numeric code country name
\item cldr.*: 622 country name variants from the UNICODE CLDR project.
Inspect the `countrycode::cldr_examples` data.frame for a full list of
available country names and examples.
}
}
\note{
The Correlates of War (cow) and Polity 4 (p4) project produce codes in
country year format. Some countries go through political transitions that
justify changing codes over time. When building a purely cross-sectional
conversion dictionary, this forces us to make arbitrary choices with respect
to some entities (e.g., Western Germany, Vietnam, Serbia). `countrycode`
includes a reconciled dataset in panel format:
`countrycode::countrycode_panel`. Instead of converting code, we recommend
that users dealing with panel data "left-merge" their data into this panel
dictionary.
}
\keyword{datasets}
|
fcfc46c1338bcafaea8aed2d824ccb464625a717 | b15fc352b31a36d0e136e1fae4c0c093ffa3635d | /man/AUC.uno.Rd | 7d581668191b31d0afc7799cff28a28859e3ab76 | [] | no_license | cran/survAUC | 76c3c54be6721f4e7e62c248c9ad231289a2384b | e211cd09d0523bdfcfe701a9aca191770878d75b | refs/heads/master | 2023-04-06T12:50:33.745180 | 2023-03-21T17:20:02 | 2023-03-21T17:20:02 | 17,700,199 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,413 | rd | AUC.uno.Rd | \name{AUC.uno}
\alias{AUC.uno}
\alias{spec.uno}
\alias{sens.uno}
\title{AUC estimator proposed by Uno et al.}
\description{
Uno's estimator of cumulative/dynamic AUC for right-censored time-to-event data
}
\usage{
AUC.uno(Surv.rsp, Surv.rsp.new, lpnew, times, savesensspec=FALSE)
sens.uno(Surv.rsp, Surv.rsp.new, lpnew, times)
spec.uno(Surv.rsp.new, lpnew, times)
}
\arguments{
\item{Surv.rsp}{A \code{Surv(.,.)} object containing to the outcome of the training data.}
\item{Surv.rsp.new}{A \code{Surv(.,.)} object containing the outcome of the test data.}
\item{lpnew}{The vector of predictors obtained from the test data.}
\item{times}{A vector of time points at which to evaluate AUC.}
\item{savesensspec}{A logical specifying whether sensitivities and specificities
should be saved.}
}
\details{
The \code{sens.uno} and \code{spec.uno} functions implement the estimators of
time-dependent true and false positive rates proposed in Section 5.1 of Uno et
al. (2007).
The \code{AUC.uno} function implements the estimator of cumulative/dynamic AUC
that is based on the TPR and FPR estimators proposed by
Uno et al. (2007). It is given by the area(s) under the time-dependent
ROC curve(s) estimated by \code{sens.sh} and \code{spec.sh}. The \code{iauc}
summary measure is given by the integral of AUC on
[0, max(\code{times})] (weighted by the estimated probability density of
the time-to-event outcome).
Uno's estimators are based on inverse-probability-of-censoring
weights and do not assume a specific working model for deriving the predictor
\code{lpnew}. It is assumed, however, that there is a one-to-one
relationship between the predictor and the expected survival times conditional
on the predictor. Note that the estimators implemented in \code{sens.uno},
\code{spec.uno} and \code{AUC.uno} are restricted to situations
where the random censoring assumption holds.
}
\value{
\code{AUC.uno} returns an object of class \code{survAUC}. Specifically,
\code{AUC.uno} returns a list with the following components:
\item{auc}{The cumulative/dynamic AUC estimates (evaluated at \code{times}).}
\item{times}{The vector of time points at which AUC is evaluated.}
\item{iauc}{The summary measure of AUC.}
\code{sens.uno} and \code{spec.uno} return matrices of dimensions \code{times} x
\code{(lpnew + 1)}. The elements of these matrices are the sensitivity and
specificity estimates for each threshold of \code{lpnew} and for each time point
specified in \code{times}.
}
\references{
Uno, H., T. Cai, L. Tian, and L. J. Wei (2007).\cr Evaluating prediction rules for
t-year survivors with censored regression models.\cr \emph{Journal of the American
Statistical Association} \bold{102}, 527--537.\cr
}
\seealso{
\code{\link{AUC.cd}}, \code{\link{AUC.sh}}, \code{\link{AUC.hc}},
\code{\link{IntAUC}}
}
\examples{
data(cancer,package="survival")
TR <- ovarian[1:16,]
TE <- ovarian[17:26,]
train.fit <- survival::coxph(survival::Surv(futime, fustat) ~ age,
x=TRUE, y=TRUE, method="breslow", data=TR)
lpnew <- predict(train.fit, newdata=TE)
Surv.rsp <- survival::Surv(TR$futime, TR$fustat)
Surv.rsp.new <- survival::Surv(TE$futime, TE$fustat)
times <- seq(10, 1000, 10)
AUC_Uno <- AUC.uno(Surv.rsp, Surv.rsp.new, lpnew, times)
names(AUC_Uno)
AUC_Uno$iauc
}
\keyword{classif}
|
6a9ada4630b9b7e96561e4debb358b8ce7aa89e1 | cbd70b829a3dffc23bffe01929d7732f2df815f5 | /man/plot_interaction_heatmap.Rd | a26ca03b808ede845b2b7a0951af7bef0b53835a | [] | no_license | kimberlyroche/ROL | 603ac7a71a0487b1ff999a14cc34681090e299fc | d33c8f63c692a55cdca9c551b718f1c04929d6f8 | refs/heads/master | 2021-02-18T23:58:43.631388 | 2020-12-08T23:47:55 | 2020-12-08T23:47:55 | 245,254,434 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,528 | rd | plot_interaction_heatmap.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/universal_microbes.R
\name{plot_interaction_heatmap}
\alias{plot_interaction_heatmap}
\title{Plot heatmap over all pairwise (MAP) correlations between microbes at designated taxonomic level}
\usage{
plot_interaction_heatmap(
tax_level = "ASV",
logratio = "alr",
Sigmas = NULL,
DLM = FALSE,
cluster = TRUE,
taxon_idx = NULL,
show_plot = FALSE,
return_matrix = FALSE
)
}
\arguments{
\item{tax_level}{taxonomic level at which to agglomerate data}
\item{logratio}{logratio representation to use (e.g. "alr", "ilr", "clr")}
\item{Sigmas}{optional list (indexed by host short name) of MAP estimates of microbial covariance; if not provided, this will be loaded}
\item{DLM}{if TRUE, looks for DLM model fits instead of GP model fits}
\item{cluster}{optional flag to hierarchically cluster across hosts and interactions}
\item{taxon_idx}{optional logratio coordinate to render correlations against; if NULL, render all pairwise correlations}
\item{show_plot}{optional flag to show() plot in addition to rendering it to a file}
\item{return_matrix}{optional flag to return host x interaction correlation value matrix}
}
\value{
NULL or heatmap matrix
}
\description{
Plot heatmap over all pairwise (MAP) correlations between microbes at designated taxonomic level
}
\examples{
Sigmas <- load_MAP_estimates(tax_level = "ASV", DLM = TRUE, logratio = "clr")
plot_interaction_heatmap(tax_level = "ASV", logratio = "clr", Sigmas = Sigmas)
}
|
bd85db7e8f622f541483422a9c04c70d9582e814 | 4c5c4febfb7f8cddb8a13368e4b6dad965684eaf | /NEFSC_trawl_explore.R | 200a2ac76263c0d23de5646d25162a64176af21f | [] | no_license | jlmorano/NYB_ecosystem | 1260255747171a79c37c10e4d0e8cc1f73b5d152 | f616446f8309ff39febb29b06d6bfcea566eb4dd | refs/heads/master | 2023-01-13T03:46:43.324088 | 2020-11-20T18:19:24 | 2020-11-20T18:19:24 | 300,346,051 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,848 | r | NEFSC_trawl_explore.R | # Using NEFSC trawl data to explore species distribution along Northeast US coast
# setup
# setwd("/Users/janellemorano/Git/MAB_ecosystem")
library (dplyr)
fall_twl_svbio <- read_csv ("/Users/janellemorano/DATA/NEFSC_trawl/Fall_trawl/22560_FSCSTables/22560_UNION_FSCS_SVBIO.csv")
colnames(fall_twl_svbio)
# [1] "CRUISE6" "STRATUM" "TOW" "STATION" "ID" "SVSPP" "CATCHSEX"
# [8] "INDID" "LENGTH" "INDWT" "SEX" "MATURITY" "AGE" "STOM_VOLUME"
# [15] "STOM_WGT"
fall_twl_svcat <- read_csv ("/Users/janellemorano/DATA/NEFSC_trawl/Fall_trawl/22560_FSCSTables/22560_UNION_FSCS_SVCAT.csv")
colnames(fall_twl_svcat)
# [1] "CRUISE6" "STRATUM" "TOW" "STATION"
# [5] "ID" "LOGGED_SPECIES_NAME" "SVSPP" "CATCHSEX"
# [9] "EXPCATCHNUM" "EXPCATCHWT"
fall_twl_svlen <- read_csv ("/Users/janellemorano/DATA/NEFSC_trawl/Fall_trawl/22560_FSCSTables/22560_UNION_FSCS_SVLEN.csv")
colnames(fall_twl_svlen)
# [1] "CRUISE6" "STRATUM" "TOW" "STATION"
# [5] "ID" "LOGGED_SPECIES_NAME" "SVSPP" "CATCHSEX"
# [9] "LENGTH" "EXPNUMLEN"
fall_twl_svsta <- read_csv ("/Users/janellemorano/DATA/NEFSC_trawl/Fall_trawl/22560_FSCSTables/22560_UNION_FSCS_SVSTA.csv")
colnames(fall_twl_svsta)
# [1] "CRUISE6" "STRATUM" "TOW" "STATION" "ID"
# [6] "AREA" "SVVESSEL" "CRUNUM" "SVGEAR" "BEGIN_EST_TOWDATE"
# [11] "END_EST_TOWDATE" "BEGIN_GMT_TOWDATE" "END_GMT_TOWDATE" "EST_YEAR" "EST_MONTH"
# [16] "EST_DAY" "EST_JULIAN_DAY" "EST_TIME" "GMT_YEAR" "GMT_MONTH"
# [21] "GMT_DAY" "GMT_JULIAN_DAY" "GMT_TIME" "TOWDUR" "SETDEPTH"
# [26] "ENDDEPTH" "MINDEPTH" "MAXDEPTH" "AVGDEPTH" "BEGLAT"
# [31] "BEGLON" "ENDLAT" "ENDLON" "DECDEG_BEGLAT" "DECDEG_BEGLON"
# [36] "DECDEG_ENDLAT" "DECDEG_ENDLON" "CABLE" "PITCH" "HEADING"
# [41] "COURSE" "RPM" "DOPDISTB" "DOPDISTW" "DESSPEED"
# [46] "AIRTEMP" "CLOUD" "BAROPRESS" "WINDDIR" "WINDSP"
# [51] "WEATHER" "WAVEHGT" "SWELLDIR" "SWELLHGT" "BKTTEMP"
# [56] "XBT" "SURFTEMP" "SURFSALIN" "BOTTEMP" "BOTSALIN"
# SST
svsta_surftemp <- fall_twl_svsta %>%
group_by(EST_YEAR) %>%
filter(!is.na(SURFTEMP)) %>%
summarise(avg = mean(SURFTEMP), min = min(SURFTEMP), max = max(SURFTEMP))
ggplot(data = svsta_surftemp, aes(EST_YEAR, avg)) +
geom_line() +
theme_bw() +
ggtitle("Surface Temperature (from Fall: 22560_UNION_FSCS_SVSTA.csv)") +
xlab("") +
ylab("Average Temp")
# Bottom Temp
svsta_bottemp <- fall_twl_svsta %>%
group_by(EST_YEAR) %>%
filter(!is.na(BOTTEMP)) %>%
summarise(avg = mean(BOTTEMP), min = min(BOTTEMP), max = max(BOTTEMP))
ggplot(data = svsta_bottemp, aes(EST_YEAR, avg)) +
geom_line() +
theme_bw() +
ggtitle("Bottom Temperature (from Fall: 22560_UNION_FSCS_SVSTA.csv)") +
xlab("") +
ylab("Average Temp")
# Surface and Bottom Temp combined
ggplot() +
geom_line(data = svsta_surftemp, aes(EST_YEAR, avg), color = "red") +
geom_line(data = svsta_bottemp, aes(EST_YEAR, avg), color = "blue") +
theme_bw() +
theme(legend.position="bottom") +
ggtitle("Surface and Bottom Temperature (from Fall: 22560_UNION_FSCS_SVSTA.csv)") +
xlab("") +
ylab("Average Temp")
|
4b0c47bd63f09db5cce25cff6d8dc1a15c85e27f | badba9cc4390c3e60e5f0837c74697d645dacc8e | /ucom/man/convert_choiceDF.Rd | e6df7a4ebb11af4ec504617692e805826a249a29 | [
"MIT"
] | permissive | UCOMstudy/UCOM-data-collection | f7ccf03690782ae6861fef49329d54501dbf9bf3 | 9c4f8bfcd72e81776d3626cfcdfa30b87c6d0291 | refs/heads/master | 2020-04-10T20:59:07.067826 | 2020-03-27T18:43:35 | 2020-03-27T18:43:35 | 161,284,380 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 546 | rd | convert_choiceDF.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/transform.R
\name{convert_choiceDF}
\alias{convert_choiceDF}
\title{Convert choice Dataframe}
\usage{
convert_choiceDF(df, var_names,
pattern = "^(-)?[0-9]{1,5}(\\\\.[0-9]{1,4})?")
}
\arguments{
\item{df}{Data frame}
\item{var_names}{A vecot of variable names to convert}
\item{pattern}{Regex pattern to match the numbers.
Default: `^(-)?[0-9]{1,5}(\\.[0-9]{1,4})?`}
}
\value{
Converted choice data frame
}
\description{
Convert choice Dataframe to remove text.
}
|
d5e4410c1fc2991cc926dfb70c0137472d0baf34 | 738cbcc77e4ffe408e3be114988e62141b4faf2e | /R/affixes.R | e38bb686f89de1657632e502bebc2366497d6902 | [
"MIT"
] | permissive | jmones/catmunalias | ab3da6ae9a2c507c422b791d768dbd10ba69e261 | 1dda8c32414fc90c2b1cd5e48b74e09b7323b664 | refs/heads/master | 2022-06-21T15:23:22.186997 | 2020-05-04T21:45:21 | 2020-05-04T21:45:21 | 257,105,799 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,093 | r | affixes.R | library(tidyverse)
#' Returns with element from 'from' matches 'words' using 'compare_function'
#' Returns Inf if none
#'
#' @param words words that will be compared
#' @param from elements to compare to
#' @param compare_function Function to use in comparison
#' @return A vector indexes of the same length of words, with positions in from.
#' NA if none matched for that word.
match_first <- function(words, from, compare_function) {
indices <- lst(words, from) %>%
cross() %>%
map(~ compare_function(.$words, .$from)) %>%
as.logical() %>%
matrix(nrow = length(words), ncol = length(from)) %>%
apply(1, function(r) which.max(if_else(r == FALSE, NA, T))) %>%
as.integer()
return(indices)
}
#' Returns the list of words, replacing the suffix by the corresponding prefix,
#' if matching. If it doesn't match, it results the word as is.
#'
#' @param words words that will be transformed
#' @param suffixes list of suffixes to search for
#' @param prefixes corresponsing prefixes that will be prepended if a suffix
#' matches
#' @return A list of the transformed words
from_suffix_to_prefix <- function(words, suffixes, prefixes) {
indices <- match_first(words, suffixes, endsWith)
result <- map2(
words,
indices,
~ if (is.na(.y)) {
.x
} else {
paste0(prefixes[.y], substr(.x, 1, nchar(.x) - nchar(suffixes[.y])))
}
)
return(result)
}
#' Returns the list of words, replacing the prefix by the corresponding suffix,
#' if matching. If it doesn't match, it results the word as is.
#'
#' @param words words that will be transformed
#' @param prefixes list of prefixes to search for
#' @param suffixes corresponsing suffixes that will be prepended if a prefix
#' matches
#' @return A list of the transformed words
from_prefix_to_suffix <- function(words, prefixes, suffixes) {
indices <- match_first(words, prefixes, startsWith)
result <- map2(
words,
indices,
~ if (is.na(.y)) {
.x
} else {
paste0(substr(.x, nchar(prefixes[.y]) + 1, nchar(.x)), suffixes[.y])
}
)
return(result)
}
|
a77267e1c7c0c33c55ea73645fb1f42f3a4a590d | 08eb06a7e26e8fafe46c2cb5bbeca0e2d666bd97 | /tools/quantp/quantp.r | b33d4449134e5d9d368a16cb61a2c05f7599e5dd | [
"MIT"
] | permissive | bernt-matthias/tools-galaxyp | 9e558e0c4126f629ccba25160f14f2cc7a45ebc6 | 6128c982eda8bb6c7a9fd01bd9a5b8d7fc3f810d | refs/heads/master | 2023-08-17T03:10:04.843458 | 2023-08-07T10:49:17 | 2023-08-07T10:49:17 | 104,768,403 | 0 | 0 | MIT | 2022-11-25T21:24:32 | 2017-09-25T15:31:52 | JavaScript | UTF-8 | R | false | false | 67,744 | r | quantp.r | #***************************************************************************************************************************************
# Functions: Start
#***************************************************************************************************************************************
#===============================================================================
# PCA
#===============================================================================
multisample_PCA = function(df, sampleinfo_df, outfile)
{
tempdf = df[,-1];
tempcol = colnames(tempdf);
tempgrp = sampleinfo_df[tempcol,2];
tempdf = t(tempdf) %>% as.data.frame();
tempdf[is.na(tempdf)] = 0;
tempdf$Group = tempgrp;
png(outfile, width = 6, height = 6, units = 'in', res=300);
# bitmap(outfile, "png16m");
g = autoplot(prcomp(select(tempdf, -Group)), data = tempdf, colour = 'Group', size=3);
saveWidget(ggplotly(g), file.path(gsub("\\.png", "\\.html", outplot)))
plot(g);
dev.off();
}
#===============================================================================
# Regression and Cook's distance
#===============================================================================
singlesample_regression = function(PE_TE_data,htmloutfile, append=TRUE)
{
rownames(PE_TE_data) = PE_TE_data$PE_ID;
regmodel = lm(PE_abundance~TE_abundance, data=PE_TE_data);
regmodel_summary = summary(regmodel);
cat("<font><h3>Linear Regression model fit between Proteome and Transcriptome data</h3></font>\n",
"<p>Assuming a linear relationship between Proteome and Transcriptome data, we here fit a linear regression model.</p>\n",
'<table border=1 cellspacing=0 cellpadding=5 style="table-layout:auto; "> <tr bgcolor="#7a0019"><th><font color=#ffcc33>Parameter</font></th><th><font color=#ffcc33>Value</font></th></tr>\n',
file = htmloutfile, append = TRUE);
cat("<tr><td>Formula</td><td>","PE_abundance~TE_abundance","</td></tr>\n",
"<tr><td colspan='2' align='center'> <b>Coefficients</b></td>","</tr>\n",
"<tr><td>",names(regmodel$coefficients[1]),"</td><td>",regmodel$coefficients[1]," (Pvalue:", regmodel_summary$coefficients[1,4],")","</td></tr>\n",
"<tr><td>",names(regmodel$coefficients[2]),"</td><td>",regmodel$coefficients[2]," (Pvalue:", regmodel_summary$coefficients[2,4],")","</td></tr>\n",
"<tr><td colspan='2' align='center'> <b>Model parameters</b></td>","</tr>\n",
"<tr><td>Residual standard error</td><td>",regmodel_summary$sigma," (",regmodel_summary$df[2]," degree of freedom)</td></tr>\n",
"<tr><td>F-statistic</td><td>",regmodel_summary$fstatistic[1]," ( on ",regmodel_summary$fstatistic[2]," and ",regmodel_summary$fstatistic[3]," degree of freedom)</td></tr>\n",
"<tr><td>R-squared</td><td>",regmodel_summary$r.squared,"</td></tr>\n",
"<tr><td>Adjusted R-squared</td><td>",regmodel_summary$adj.r.squared,"</td></tr>\n",
file = htmloutfile, append = TRUE);
cat("</table>\n", file = htmloutfile, append = TRUE);
cat(
"<font color='#ff0000'><h3>Regression and diagnostics plots</h3></font>\n",
file = htmloutfile, append = TRUE);
outplot = paste(outdir,"/PE_TE_lm_1.png",sep="",collapse="");
png(outplot, width = 10, height = 10, units = 'in',res=300);
# bitmap(outplot, "png16m");
par(mfrow=c(1,1));
plot(regmodel, 1, cex.lab=1.5);
dev.off();
suppressWarnings(g <- autoplot(regmodel, label = FALSE)[[1]] +
geom_point(aes(text=sprintf("Residual: %.2f<br>Fitted value: %.2f<br>Gene: %s", .fitted, .resid, PE_TE_data$PE_ID)),
shape = 1, size = .1, stroke = .2) +
theme_light())
saveWidget(ggplotly(g, tooltip= c("text")), file.path(gsub("\\.png", "\\.html", outplot)))
outplot = paste(outdir,"/PE_TE_lm_2.png",sep="",collapse="");
png(outplot,width = 10, height = 10, units = 'in', res=300);
# bitmap(outplot, "png16m");
par(mfrow=c(1,1));
g <- plot(regmodel, 2, cex.lab=1.5);
ggplotly(g)
dev.off();
suppressWarnings(g <- autoplot(regmodel, label = FALSE)[[2]] +
geom_point(aes(text=sprintf("Standarized residual: %.2f<br>Theoretical quantile: %.2f<br>Gene: %s", .qqx, .qqy, PE_TE_data$PE_ID)),
shape = 1, size = .1) +
theme_light())
saveWidget(ggplotly(g, tooltip = "text"), file.path(gsub("\\.png", "\\.html", outplot)))
outplot = paste(outdir,"/PE_TE_lm_5.png",sep="",collapse="");
png(outplot, width = 10, height = 10, units = 'in',res=300);
# bitmap(outplot, "png16m");
par(mfrow=c(1,1));
plot(regmodel, 5, cex.lab=1.5);
dev.off();
cd_cont_pos <- function(leverage, level, model) {sqrt(level*length(coef(model))*(1-leverage)/leverage)}
cd_cont_neg <- function(leverage, level, model) {-cd_cont_pos(leverage, level, model)}
suppressWarnings(g <- autoplot(regmodel, label = FALSE)[[4]] +
aes(label = PE_TE_data$PE_ID) +
geom_point(aes(text=sprintf("Leverage: %.2f<br>Standardized residual: %.2f<br>Gene: %s", .hat, .stdresid, PE_TE_data$PE_ID))) +
theme_light())
saveWidget(ggplotly(g, tooltip = "text"), file.path(gsub("\\.png", "\\.html", outplot)))
cat('<table border=1 cellspacing=0 cellpadding=5 style="table-layout:auto; ">', file = htmloutfile, append = TRUE);
cat(
'<tr bgcolor="#7a0019"><th>', "<font color='#ffcc33'><h4>1) <u>Residuals vs Fitted plot</h4></font></u></th>\n",
'<th><font color=#ffcc33><h4>2) <u>Normal Q-Q plot of residuals</h4></font></u></th></tr>\n',
file = htmloutfile, append = TRUE);
cat(
'<tr><td align=center><img src="PE_TE_lm_1.png" width=600 height=600>',
gsub("width:500px;height:500px", "width:600px;height:600px", extractWidgetCode(paste(outdir,"/PE_TE_lm_1.png",sep="",collapse=""))$widget_div),
'</td><td align=center><img src="PE_TE_lm_2.png" width=600 height=600>',
gsub("width:500px;height:500px", "width:600px;height:600px", extractWidgetCode(paste(outdir,"/PE_TE_lm_2.png",sep="",collapse=""))$widget_div),
'</td></tr>\n', file = htmloutfile, append = TRUE);
cat(
'<tr><td align=center>This plot checks for linear relationship assumptions.<br>If a horizontal line is observed without any distinct patterns, it indicates a linear relationship.</td>\n',
'<td align=center>This plot checks whether residuals are normally distributed or not.<br>It is good if the residuals points follow the straight dashed line i.e., do not deviate much from dashed line.</td></tr></table>\n',
file = htmloutfile, append = TRUE);
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# Residuals data
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
res_all = regmodel$residuals;
res_mean = mean(res_all);
res_sd = sd(res_all);
res_diff = (res_all-res_mean);
res_zscore = res_diff/res_sd;
# res_outliers = res_all[which((res_zscore > 2)|(res_zscore < -2))]
tempind = which((res_zscore > 2)|(res_zscore < -2));
res_PE_TE_data_no_outlier = PE_TE_data[-tempind,];
res_PE_TE_data_no_outlier$residuals = res_all[-tempind];
res_PE_TE_data_outlier = PE_TE_data[tempind,];
res_PE_TE_data_outlier$residuals = res_all[tempind];
# Save the complete table for download (influential_observations)
temp_outlier_data = data.frame(res_PE_TE_data_outlier$PE_ID, res_PE_TE_data_outlier$TE_abundance, res_PE_TE_data_outlier$PE_abundance, res_PE_TE_data_outlier$residuals)
colnames(temp_outlier_data) = c("Gene", "Transcript abundance", "Protein abundance", "Residual value")
outdatafile = paste(outdir,"/PE_TE_outliers_residuals.txt", sep="", collapse="");
write.table(temp_outlier_data, file=outdatafile, row.names=F, sep="\t", quote=F);
# Save the complete table for download (non influential_observations)
temp_all_data = data.frame(PE_TE_data$PE_ID, PE_TE_data$TE_abundance, PE_TE_data$PE_abundance, res_all)
colnames(temp_all_data) = c("Gene", "Transcript abundance", "Protein abundance", "Residual value")
outdatafile = paste(outdir,"/PE_TE_abundance_residuals.txt", sep="", collapse="");
write.table(temp_all_data, file=outdatafile, row.names=F, sep="\t", quote=F);
cat('<br><h2 id="inf_obs"><font color=#ff0000>Outliers based on the residuals from regression analysis</font></h2>\n',
file = htmloutfile, append = TRUE);
cat('<table border=1 cellspacing=0 cellpadding=5 style="table-layout:auto; ">\n',
'<tr bgcolor="#7a0019"><th colspan=2><font color=#ffcc33>Residuals from Regression</font></th></tr>\n',
'<tr bgcolor="#7a0019"><th><font color=#ffcc33>Parameter</font></th><th><font color=#ffcc33>Value</font></th></tr>\n',
file = htmloutfile, append = TRUE);
cat("<tr><td>Mean Residual value</td><td>",res_mean,"</td></tr>\n",
"<tr><td>Standard deviation (Residuals)</td><td>",res_sd,"</td></tr>\n",
'<tr><td>Total outliers (Residual value > 2 standard deviation from the mean)</td><td>',length(tempind),' <font size=4>(<b><a href="PE_TE_outliers_residuals.txt" target="_blank">Download these ',length(tempind),' data points with high residual values here</a></b>)</font></td>\n',
'<tr><td colspan=2 align=center>',
'<font size=4>(<b><a href="PE_TE_abundance_residuals.txt" target="_blank">Download the complete residuals data here</a></b>)</font>',
"</td></tr>\n</table><br><br>\n",
file = htmloutfile, append = TRUE);
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
cat('<br><br><table border=1 cellspacing=0 cellpadding=5 style="table-layout:auto; ">', file = htmloutfile, append = TRUE);
cat(
'<tr bgcolor="#7a0019"><th><font color=#ffcc33><h4>3) <u>Residuals vs Leverage plot</h4></font></u></th></tr>\n',
file = htmloutfile, append = TRUE);
cat(
'<tr><td align=center><img src="PE_TE_lm_5.png" width=600 height=600>',
gsub("width:500px;height:500px", "width:600px;height:600px", extractWidgetCode(paste(outdir,"/PE_TE_lm_5.png",sep="",collapse=""))$widget_div)
, '</td></tr>\n',
file = htmloutfile, append = TRUE);
cat(
'<tr><td align=center>This plot is useful to identify any influential cases, that is outliers or extreme values.<br>They might influence the regression results upon inclusion or exclusion from the analysis.</td></tr></table><br>\n',
file = htmloutfile, append = TRUE);
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
# Cook's Distance
#^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
cat('<hr/><h2 id="inf_obs"><font color=#ff0000>INFLUENTIAL OBSERVATIONS</font></h2>\n',
file = htmloutfile, append = TRUE);
cat(
'<p><b>Cook\'s distance</b> computes the influence of each data point/observation on the predicted outcome. i.e. this measures how much the observation is influencing the fitted values.<br>In general use, those observations that have a <b>Cook\'s distance > than ', cookdist_upper_cutoff,' times the mean</b> may be classified as <b>influential.</b></p>\n',
file = htmloutfile, append = TRUE);
cooksd <- cooks.distance(regmodel);
outplot = paste(outdir,"/PE_TE_lm_cooksd.png",sep="",collapse="");
png(outplot, width = 10, height = 10, units = 'in', res=300);
# bitmap(outplot, "png16m");
par(mfrow=c(1,1));
plot(cooksd, main="Influential Obs. by Cook\'s distance", ylab="Cook\'s distance", xlab="Observations", type="n") # plot cooks distance
sel_outlier=which(cooksd>=as.numeric(cookdist_upper_cutoff)*mean(cooksd, na.rm=T))
sel_nonoutlier=which(cooksd<as.numeric(cookdist_upper_cutoff)*mean(cooksd, na.rm=T))
points(sel_outlier, cooksd[sel_outlier],pch="*", cex=2, cex.lab=1.5, col="red")
points(sel_nonoutlier, cooksd[sel_nonoutlier],pch="*", cex=2, cex.lab=1.5, col="black")
abline(h = as.numeric(cookdist_upper_cutoff)*mean(cooksd, na.rm=T), col="red") # add cutoff line
#text(x=1:length(cooksd)+1, y=cooksd, labels=ifelse(cooksd>as.numeric(cookdist_upper_cutoff)*mean(cooksd, na.rm=T),names(cooksd),""), col="red", pos=2) # add labels
dev.off();
cooksd_df <- data.frame(cooksd)
cooksd_df$genes <- row.names(cooksd_df)
cooksd_df$index <- 1:nrow(cooksd_df)
cooksd_df$colors <- "black"
cutoff <- as.numeric(cookdist_upper_cutoff)*mean(cooksd, na.rm=T)
cooksd_df[cooksd_df$cooksd > cutoff,]$colors <- "red"
g <- ggplot(cooksd_df, aes(x = index, y = cooksd, label = row.names(cooksd_df), color=as.factor(colors),
text=sprintf("Gene: %s<br>Cook's Distance: %.3f", row.names(cooksd_df), cooksd))) +
ggtitle("Influential Obs. by Cook's distance") + xlab("Observations") + ylab("Cook's Distance") +
#xlim(0, 3000) + ylim(0, .15) +
scale_shape_discrete(solid=F) +
geom_point(size = 2, shape = 8) +
geom_hline(yintercept = cutoff,
linetype = "dashed", color = "red") +
scale_color_manual(values = c("black" = "black", "red" = "red")) +
theme_light() + theme(legend.position="none")
saveWidget(ggplotly(g, tooltip= "text"), file.path(gsub("\\.png", "\\.html", outplot)))
cat(
'<img src="PE_TE_lm_cooksd.png" width=800 height=800>',
gsub("width:500px;height:500px", "width:800px;height:800px", extractWidgetCode(outplot)$widget_div),
'<br>In the above plot, observations above red line (',cookdist_upper_cutoff,' * mean Cook\'s distance) are influential. Genes that are outliers could be important. These observations influences the correlation values and regression coefficients<br><br>',
file = htmloutfile, append = TRUE);
tempind = which(cooksd>as.numeric(cookdist_upper_cutoff)*mean(cooksd, na.rm=T));
PE_TE_data_no_outlier = PE_TE_data[-tempind,];
PE_TE_data_no_outlier$cooksd = cooksd[-tempind];
PE_TE_data_outlier = PE_TE_data[tempind,];
PE_TE_data_outlier$cooksd = cooksd[tempind];
a = sort(PE_TE_data_outlier$cooksd, decreasing=T, index.return=T);
PE_TE_data_outlier_sorted = PE_TE_data_outlier[a$ix,];
cat(
'<table border=1 cellspacing=0 cellpadding=5 style="table-layout:auto; "> <tr bgcolor="#7a0019"><th><font color=#ffcc33>Parameter</font></th><th><font color=#ffcc33>Value</font></th></tr>\n',
file = htmloutfile, append = TRUE);
# Save the complete table for download (influential_observations)
temp_outlier_data = data.frame(PE_TE_data_outlier$PE_ID, PE_TE_data_outlier$TE_abundance, PE_TE_data_outlier$PE_abundance, PE_TE_data_outlier$cooksd)
colnames(temp_outlier_data) = c("Gene", "Transcript abundance", "Protein abundance", "Cook's distance")
outdatafile = paste(outdir,"/PE_TE_influential_observation.txt", sep="", collapse="");
write.table(temp_outlier_data, file=outdatafile, row.names=F, sep="\t", quote=F);
# Save the complete table for download (non influential_observations)
temp_no_outlier_data = data.frame(PE_TE_data_no_outlier$PE_ID, PE_TE_data_no_outlier$TE_abundance, PE_TE_data_no_outlier$PE_abundance, PE_TE_data_no_outlier$cooksd)
colnames(temp_no_outlier_data) = c("Gene", "Transcript abundance", "Protein abundance", "Cook's distance")
outdatafile = paste(outdir,"/PE_TE_non_influential_observation.txt", sep="", collapse="");
write.table(temp_no_outlier_data, file=outdatafile, row.names=F, sep="\t", quote=F);
cat("<tr><td>Mean Cook\'s distance</td><td>",mean(cooksd, na.rm=T),"</td></tr>\n",
"<tr><td>Total influential observations (Cook\'s distance > ",cookdist_upper_cutoff," * mean Cook\'s distance)</td><td>",length(tempind),"</td>\n",
"<tr><td>Observations with Cook\'s distance < ",cookdist_upper_cutoff," * mean Cook\'s distance</td><td>",length(which(cooksd<as.numeric(cookdist_upper_cutoff)*mean(cooksd, na.rm=T))),"</td>\n",
"</table><br><br>\n",
file = htmloutfile, append = TRUE);
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
# Scatter plot after removal of influential points
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
outplot = paste(outdir,"/AbundancePlot_scatter_without_outliers.png",sep="",collapse="");
min_lim = min(c(PE_TE_data$PE_abundance,PE_TE_data$TE_abundance));
max_lim = max(c(PE_TE_data$PE_abundance,PE_TE_data$TE_abundance));
png(outplot, width = 10, height = 10, units = 'in', res=300);
# bitmap(outplot,"png16m");
suppressWarnings(g <- ggplot(PE_TE_data_no_outlier, aes(x=TE_abundance, y=PE_abundance, label=PE_ID)) + geom_smooth() +
xlab("Transcript abundance log fold-change") + ylab("Protein abundance log fold-change") +
xlim(min_lim,max_lim) + ylim(min_lim,max_lim) +
geom_point(aes(text=sprintf("Gene: %s<br>Transcript Abundance (log fold-change): %.3f<br>Protein Abundance (log fold-change): %.3f",
PE_ID, TE_abundance, PE_abundance))))
suppressMessages(plot(g))
suppressMessages(saveWidget(ggplotly(g, tooltip="text"), file.path(gsub("\\.png", "\\.html", outplot))))
dev.off();
cat('<table border=1 cellspacing=0 cellpadding=5 style="table-layout:auto; "> <tr bgcolor="#7a0019"><th><font color=#ffcc33>Scatterplot: Before removal</font></th><th><font color=#ffcc33>Scatterplot: After removal</font></th></tr>\n', file = htmloutfile, append = TRUE);
# Before
cat("<tr><td align=center><!--<font color='#ff0000'><h3>Scatter plot between Proteome and Transcriptome Abundance</h3></font>\n-->",
'<img src="TE_PE_scatter.png" width=600 height=600>',
gsub('id="html', 'id="secondhtml"',
gsub("width:500px;height:500px", "width:600px;height:600px", extractWidgetCode(paste(outdir,"/TE_PE_scatter.png",sep="",collapse=""))$widget_div)),
'</td>\n',
file = htmloutfile, append = TRUE);
# After
cat("<td align=center>\n",
'<img src="AbundancePlot_scatter_without_outliers.png" width=600 height=600>',
gsub("width:500px;height:500px", "width:600px;height:600px", extractWidgetCode(outplot)$widget_div),
'</td></tr>\n',
file = htmloutfile, append = TRUE);
#@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
cor_result_pearson = cor.test(PE_TE_data_no_outlier[,"TE_abundance"], PE_TE_data_no_outlier[,"PE_abundance"], method = "pearson");
cor_result_spearman = cor.test(PE_TE_data_no_outlier[,"TE_abundance"], PE_TE_data_no_outlier[,"PE_abundance"], method = "spearman");
cor_result_kendall = cor.test(PE_TE_data_no_outlier[,"TE_abundance"], PE_TE_data_no_outlier[,"PE_abundance"], method = "kendall");
cat('<tr><td>\n', file = htmloutfile, append=TRUE);
singlesample_cor(PE_TE_data, htmloutfile, append=TRUE);
cat('</td>\n', file = htmloutfile, append=TRUE);
cat('<td><table border=1 cellspacing=0 cellpadding=5 style="table-layout:auto; "> <tr bgcolor="#7a0019"><th><font color=#ffcc33>Parameter</font></th><th><font color=#ffcc33>Method 1</font></th><th><font color=#ffcc33>Method 2</font></th><th><font color=#ffcc33>Method 3</font></th></tr>\n',
file = htmloutfile, append = TRUE);
cat(
"<tr><td>Correlation method</td><td>",cor_result_pearson$method,"</td><td>",cor_result_spearman$method,"</td><td>",cor_result_kendall$method,"</td></tr>\n",
"<tr><td>Correlation coefficient</td><td>",cor_result_pearson$estimate,"</td><td>",cor_result_spearman$estimate,"</td><td>",cor_result_kendall$estimate,"</td></tr>\n",
file = htmloutfile, append = TRUE)
cat("</table></td></tr></table>\n", file = htmloutfile, append = TRUE)
if(dim(PE_TE_data_outlier)[1]<10)
{
tab_n_row = dim(PE_TE_data_outlier)[1];
}else{
tab_n_row = 10;
}
cat("<br><br><font size=5><b><a href='PE_TE_influential_observation.txt' target='_blank'>Download the complete list of influential observations</a></b></font> ",
"<font size=5><b><a href='PE_TE_non_influential_observation.txt' target='_blank'>Download the complete list (After removing influential points)</a></b></font><br>\n",
'<br><font color="brown"><h4>Top ',as.character(tab_n_row),' Influential observations (Cook\'s distance > ',cookdist_upper_cutoff,' * mean Cook\'s distance)</h4></font>\n',
file = htmloutfile, append = TRUE);
cat('<table border=1 cellspacing=0 cellpadding=5> <tr bgcolor="#7a0019">\n', sep = "",file = htmloutfile, append = TRUE);
cat("<th><font color=#ffcc33>Gene</font></th><th><font color=#ffcc33>Protein Log Fold-Change</font></th><th><font color=#ffcc33>Transcript Log Fold-Change</font></th><th><font color=#ffcc33>Cook's Distance</font></th></tr>\n",
file = htmloutfile, append = TRUE);
for(i in 1:tab_n_row)
{
cat(
'<tr>','<td>',as.character(PE_TE_data_outlier_sorted[i,1]),'</td>\n',
'<td>',format(PE_TE_data_outlier_sorted[i,2], scientific=F),'</td>\n',
'<td>',PE_TE_data_outlier_sorted[i,4],'</td>\n',
'<td>',format(PE_TE_data_outlier_sorted[i,5], scientific=F),'</td></tr>\n',
file = htmloutfile, append = TRUE);
}
cat('</table><br><br>\n',file = htmloutfile, append = TRUE);
}
#===============================================================================
# Heatmap
#===============================================================================
singlesample_heatmap=function(PE_TE_data, htmloutfile, hm_nclust){
cat('<br><table border=1 cellspacing=0 cellpadding=5 style="table-layout:auto; "> <tr bgcolor="#7a0019"><th><font color=#ffcc33>Heatmap of PE and TE abundance values (Hierarchical clustering)</font></th><th><font color=#ffcc33>Number of clusters to extract: ',hm_nclust,'</font></th></tr>\n',
file = htmloutfile, append = TRUE);
hc=hclust(dist(as.matrix(PE_TE_data[,c("PE_abundance","TE_abundance")])))
hm_cluster = cutree(hc,k=hm_nclust);
outplot = paste(outdir,"/PE_TE_heatmap.png",sep="",collapse="");
png(outplot, width = 10, height = 10, units = 'in', res=300);
# bitmap(outplot, "png16m");
par(mfrow=c(1,1));
hmap = heatmap.2(as.matrix(PE_TE_data[,c("PE_abundance","TE_abundance")]),
trace="none", cexCol=1, col=greenred(100),Colv=F,
labCol=c("Proteins","Transcripts"), scale="col",
hclustfun = hclust, distfun = dist);
dev.off();
p <- d3heatmap(as.matrix(PE_TE_data[,c("PE_abundance","TE_abundance")]), scale = "col",
dendrogram = "row", colors = greenred(100),
hclustfun = hclust, distfun = dist,
show_grid = FALSE)
saveWidget(p, file.path(gsub("\\.png", "\\.html", outplot)))
cat('<tr><td align=center colspan="2">',
'<img src="PE_TE_heatmap.png" width=800 height=800>',
gsub("width:960px;height:500px", "width:800px;height:800px", extractWidgetCode(outplot)$widget_div),
'</td></tr>\n',
file = htmloutfile, append = TRUE);
temp_PE_TE_data = data.frame(PE_TE_data$PE_ID, PE_TE_data$TE_abundance, PE_TE_data$PE_abundance, hm_cluster);
colnames(temp_PE_TE_data) = c("Gene", "Transcript abundance", "Protein abundance", "Cluster (Hierarchical clustering)")
tempoutfile = paste(outdir,"/PE_TE_hc_clusterpoints.txt",sep="",collapse="");
write.table(temp_PE_TE_data, file=tempoutfile, row.names=F, quote=F, sep="\t", eol="\n")
cat('<tr><td colspan="2" align=center><font size=5><a href="PE_TE_hc_clusterpoints.txt" target="_blank"><b>Download the hierarchical cluster list</b></a></font></td></tr></table>\n',
file = htmloutfile, append = TRUE);
}
#===============================================================================
# K-means clustering
#===============================================================================
singlesample_kmeans=function(PE_TE_data, htmloutfile, nclust){
PE_TE_data_kdata = PE_TE_data;
k1 = kmeans(PE_TE_data_kdata[,c("PE_abundance","TE_abundance")], nclust);
outplot = paste(outdir,"/PE_TE_kmeans.png",sep="",collapse="");
png(outplot, width = 10, height = 10, units = 'in', res=300);
# bitmap(outplot, "png16m");
par(mfrow=c(1,1));
scatter.smooth(PE_TE_data_kdata[,"TE_abundance"], PE_TE_data_kdata[,"PE_abundance"], xlab="Transcript Abundance", ylab="Protein Abundance", cex.lab=1.5);
legend(1, 95, legend=c("Cluster 1", "Line 2"), col="red", lty=1:1, cex=0.8)
legend(1, 95, legend="Cluster 2", col="green", lty=1:1, cex=0.8)
ind=which(k1$cluster==1);
points(PE_TE_data_kdata[ind,"TE_abundance"], PE_TE_data_kdata[ind,"PE_abundance"], col="red", pch=16);
ind=which(k1$cluster==2);
points(PE_TE_data_kdata[ind,"TE_abundance"], PE_TE_data_kdata[ind,"PE_abundance"], col="green", pch=16);
ind=which(k1$cluster==3);
points(PE_TE_data_kdata[ind,"TE_abundance"], PE_TE_data_kdata[ind,"PE_abundance"], col="blue", pch=16);
ind=which(k1$cluster==4);
points(PE_TE_data_kdata[ind,"TE_abundance"], PE_TE_data_kdata[ind,"PE_abundance"], col="cyan", pch=16);
ind=which(k1$cluster==5);
points(PE_TE_data_kdata[ind,"TE_abundance"], PE_TE_data_kdata[ind,"PE_abundance"], col="black", pch=16);
ind=which(k1$cluster==6);
points(PE_TE_data_kdata[ind,"TE_abundance"], PE_TE_data_kdata[ind,"PE_abundance"], col="brown", pch=16);
ind=which(k1$cluster==7);
points(PE_TE_data_kdata[ind,"TE_abundance"], PE_TE_data_kdata[ind,"PE_abundance"], col="gold", pch=16);
ind=which(k1$cluster==8);
points(PE_TE_data_kdata[ind,"TE_abundance"], PE_TE_data_kdata[ind,"PE_abundance"], col="thistle", pch=16);
ind=which(k1$cluster==9);
points(PE_TE_data_kdata[ind,"TE_abundance"], PE_TE_data_kdata[ind,"PE_abundance"], col="yellow", pch=16);
ind=which(k1$cluster==10);
points(PE_TE_data_kdata[ind,"TE_abundance"], PE_TE_data_kdata[ind,"PE_abundance"], col="orange", pch=16);
dev.off();
# Interactive plot for k-means clustering
g <- ggplot(PE_TE_data, aes(x = TE_abundance, y = PE_abundance, label = row.names(PE_TE_data),
text=sprintf("Gene: %s<br>Transcript Abundance: %.3f<br>Protein Abundance: %.3f",
PE_ID, TE_abundance, PE_abundance),
color=as.factor(k1$cluster))) +
xlab("Transcript Abundance") + ylab("Protein Abundance") +
scale_shape_discrete(solid=F) + geom_smooth(method = "loess", span = 2/3) +
geom_point(size = 1, shape = 8) +
theme_light() + theme(legend.position="none")
saveWidget(ggplotly(g, tooltip=c("text")), file.path(gsub("\\.png", "\\.html", outplot)))
cat('<br><br><table border=1 cellspacing=0 cellpadding=5 style="table-layout:auto; "> <tr bgcolor="#7a0019"><th><font color=#ffcc33>K-mean clustering</font></th><th><font color=#ffcc33>Number of clusters: ',nclust,'</font></th></tr>\n',
file = htmloutfile, append = TRUE);
tempind = order(k1$cluster);
tempoutfile = paste(outdir,"/PE_TE_kmeans_clusterpoints.txt",sep="",collapse="");
write.table(data.frame(PE_TE_data_kdata[tempind, ], Cluster=k1$cluster[tempind]), file=tempoutfile, row.names=F, quote=F, sep="\t", eol="\n")
#paste(outdir,"/PE_TE_heatmap.png",sep="",collapse="");
cat('<tr><td colspan="2" align=center><img src="PE_TE_kmeans.png" width=800 height=800>',
gsub("width:500px;height:500px", "width:800px;height:800px", extractWidgetCode(outplot)$widget_div), '</td></tr>\n',
file = htmloutfile, append = TRUE);
cat('<tr><td colspan="2" align=center><font size=5><a href="PE_TE_kmeans_clusterpoints.txt" target="_blank"><b>Download the cluster list</b></a></font></td></tr></table><br><hr/>\n',
file = htmloutfile, append = TRUE);
}
#===============================================================================
# scatter plot
#===============================================================================
singlesample_scatter = function(PE_TE_data, outfile)
{
min_lim = min(c(PE_TE_data$PE_abundance,PE_TE_data$TE_abundance));
max_lim = max(c(PE_TE_data$PE_abundance,PE_TE_data$TE_abundance));
png(outfile, width = 10, height = 10, units = 'in', res=300);
# bitmap(outfile, "png16m");
suppressWarnings(g <- ggplot(PE_TE_data, aes(x=TE_abundance, y=PE_abundance, label=PE_ID)) + geom_smooth() +
xlab("Transcript abundance log fold-change") + ylab("Protein abundance log fold-change") +
xlim(min_lim,max_lim) + ylim(min_lim,max_lim) +
geom_point(aes(text=sprintf("Gene: %s<br>Transcript Abundance (log fold-change): %.3f<br>Protein Abundance (log fold-change): %.3f",
PE_ID, TE_abundance, PE_abundance)),
size = .5))
suppressMessages(plot(g))
suppressMessages(saveWidget(ggplotly(g, tooltip = "text"), file.path(gsub("\\.png", "\\.html", outfile))))
dev.off();
}
#===============================================================================
# Correlation table
#===============================================================================
singlesample_cor = function(PE_TE_data, htmloutfile, append=TRUE)
{
cor_result_pearson = cor.test(PE_TE_data$TE_abundance, PE_TE_data$PE_abundance, method = "pearson");
cor_result_spearman = cor.test(PE_TE_data$TE_abundance, PE_TE_data$PE_abundance, method = "spearman");
cor_result_kendall = cor.test(PE_TE_data$TE_abundance, PE_TE_data$PE_abundance, method = "kendall");
cat(
'<table border=1 cellspacing=0 cellpadding=5 style="table-layout:auto; "> <tr bgcolor="#7a0019"><th><font color=#ffcc33>Parameter</font></th><th><font color=#ffcc33>Method 1</font></th><th><font color=#ffcc33>Method 2</font></th><th><font color=#ffcc33>Method 3</font></th></tr>\n',
file = htmloutfile, append = TRUE);
cat(
"<tr><td>Correlation method</td><td>",cor_result_pearson$method,"</td><td>",cor_result_spearman$method,"</td><td>",cor_result_kendall$method,"</td></tr>\n",
"<tr><td>Correlation coefficient</td><td>",cor_result_pearson$estimate,"</td><td>",cor_result_spearman$estimate,"</td><td>",cor_result_kendall$estimate,"</td></tr>\n",
file = htmloutfile, append = TRUE)
cat("</table>\n", file = htmloutfile, append = TRUE);
}
#===============================================================================
# Boxplot
#===============================================================================
multisample_boxplot = function(df, sampleinfo_df, outfile, fill_leg, user_xlab, user_ylab)
{
tempdf = df[,-1, drop=FALSE];
tempdf = t(tempdf) %>% as.data.frame();
tempdf[is.na(tempdf)] = 0;
tempdf$Sample = rownames(tempdf);
tempdf1 = melt(tempdf, id.vars = "Sample");
if("Gene" %in% colnames(df)){
tempdf1$Name = df$Gene;
} else if ("Protein" %in% colnames(df)){
tempdf1$Name = df$Protein;
} else if ("Genes" %in% colnames(df)){
tempdf1$Name = df$Genes;
}
tempdf1$Group = sampleinfo_df[tempdf1$Sample,2];
png(outplot, width = 6, height = 6, units = 'in', res=300);
# bitmap(outplot, "png16m");
if(fill_leg=="No"){
tempdf1$Group = c("case", "control")
}
g = ggplot(tempdf1, aes(x=Sample, y=value, fill=Group)) +
geom_boxplot()+
labs(x=user_xlab) + labs(y=user_ylab)
saveWidget(ggplotly(g), file.path(gsub("\\.png", "\\.html", outfile)))
plot(g);
dev.off();
}
## A wrapper to saveWidget which compensates for arguable BUG in
## saveWidget which requires `file` to be in current working
## directory.
saveWidget <- function (widget,file,...) {
wd<-getwd()
on.exit(setwd(wd))
outDir<-dirname(file)
file<-basename(file)
setwd(outDir);
htmlwidgets::saveWidget(widget,file=file,selfcontained = FALSE)
}
#===============================================================================
# Mean or Median of Replicates
#===============================================================================
mergeReplicates = function(TE_df,PE_df, sampleinfo_df, method)
{
grps = unique(sampleinfo_df[,2]);
TE_df_merged <<- sapply(grps, function(x){
tempsample = sampleinfo_df[which(sampleinfo_df$Group==x),1]
if(length(tempsample)!=1){
apply(TE_df[,tempsample],1,method);
}else{
return(TE_df[,tempsample]);
}
});
TE_df_merged <<- data.frame(as.character(TE_df[,1]), TE_df_merged);
colnames(TE_df_merged) = c(colnames(TE_df)[1], grps);
PE_df_merged <<- sapply(grps, function(x){
tempsample = sampleinfo_df[which(sampleinfo_df$Group==x),1]
if(length(tempsample)!=1){
apply(PE_df[,tempsample],1,method);
}else{
return(PE_df[,tempsample]);
}
});
PE_df_merged <<- data.frame(as.character(PE_df[,1]), PE_df_merged);
colnames(PE_df_merged) = c(colnames(PE_df)[1], grps);
#sampleinfo_df_merged = data.frame(Sample = grps, Group = grps, stringsAsFactors = F);
sampleinfo_df_merged = data.frame(Sample = grps, Group = "Group", stringsAsFactors = F);
return(list(TE_df_merged = TE_df_merged, PE_df_merged = PE_df_merged, sampleinfo_df_merged = sampleinfo_df_merged));
}
#===============================================================================
# (T-Test or Wilcoxon ranksum test) and Volcano Plot
#===============================================================================
perform_Test_Volcano = function(TE_df_data,PE_df_data,TE_df_logfold, PE_df_logfold,sampleinfo_df, method, correction_method,volc_with)
{
PE_colnames = colnames(PE_df_data);
control_sample = sampleinfo_df[which(sampleinfo_df$Group=="control"),1];
control_ind <<- sapply(control_sample, function(x){temp_ind = which(PE_colnames==x); as.numeric(temp_ind)});
condition_sample = sampleinfo_df[which(sampleinfo_df$Group=="case"),1];
condition_ind <<- sapply(condition_sample, function(x){temp_ind = which(PE_colnames==x); as.numeric(temp_ind)});
if(method=="mean"){
#PE_pval = apply(PE_df_data[2:length(colnames(PE_df_data))],1,function(x) t.test(x[condition_ind-1], x[control_ind-1])$p.value);
PE_pval = apply(PE_df_data[2:length(colnames(PE_df_data))],1,function(x) {obj<-try(t.test(x[condition_ind-1], x[control_ind-1]),silent=TRUE); if(is(obj, "try-error")){return(NA)}else{return(obj$p.value)}})
}else{
if(method=="median"){
PE_pval = apply(PE_df_data[2:length(colnames(PE_df_data))],1,function(x) {obj<-try(wilcox.test(x[condition_ind-1], x[control_ind-1]),silent=TRUE); if(is(obj, "try-error")){return(NA)}else{return(obj$p.value)}})
# PE_pval = apply(PE_df_data[2:length(colnames(PE_df_data))],1,function(x) wilcox.test(x[condition_ind-1], x[control_ind-1])$p.value);
}
}
PE_adj_pval = p.adjust(PE_pval, method = correction_method, n = length(PE_pval))
TE_colnames = colnames(TE_df_data);
control_sample = sampleinfo_df[which(sampleinfo_df$Group=="control"),1];
control_ind <<- sapply(control_sample, function(x){temp_ind = which(TE_colnames==x); as.numeric(temp_ind)});
condition_sample = sampleinfo_df[which(sampleinfo_df$Group=="case"),1];
condition_ind <<- sapply(condition_sample, function(x){temp_ind = which(TE_colnames==x); as.numeric(temp_ind)});
if(method=="mean"){
# TE_pval = apply(TE_df_data[2:length(colnames(TE_df_data))],1,function(x) t.test(x[condition_ind-1], x[control_ind-1])$p.value);
TE_pval = apply(TE_df_data[2:length(colnames(TE_df_data))],1,function(x) {obj<-try(t.test(x[condition_ind-1], x[control_ind-1]),silent=TRUE); if(is(obj, "try-error")){return(NA)}else{return(obj$p.value)}})
}else{
if(method=="median"){
TE_pval = apply(TE_df_data[2:length(colnames(TE_df_data))],1,function(x) {obj<-try(wilcox.test(x[condition_ind-1], x[control_ind-1]),silent=TRUE); if(is(obj, "try-error")){return(NA)}else{return(obj$p.value)}})
# TE_pval = apply(TE_df_data[2:length(colnames(TE_df_data))],1,function(x) wilcox.test(x[condition_ind-1], x[control_ind-1])$p.value);
}
}
TE_adj_pval = p.adjust(TE_pval, method = correction_method, n = length(TE_pval))
PE_TE_logfold_pval = data.frame(TE_df_logfold$Gene, TE_df_logfold$LogFold, TE_pval, TE_adj_pval, PE_df_logfold$LogFold, PE_pval, PE_adj_pval);
colnames(PE_TE_logfold_pval) = c("Gene", "Transcript log fold-change", "p-value (transcript)", "adj p-value (transcript)", "Protein log fold-change", "p-value (protein)", "adj p-value (protein)");
outdatafile = paste(outdir,"/PE_TE_logfold_pval.txt", sep="", collapse="");
write.table(PE_TE_logfold_pval, file=outdatafile, row.names=F, sep="\t", quote=F);
cat("<br><br><font size=5><b><a href='PE_TE_logfold_pval.txt' target='_blank'>Download the complete fold change data here</a></b></font><br>\n",
file = htmloutfile, append = TRUE);
if(length(condition_ind)!=1)
{
# Volcano Plot
if(volc_with=="adj_pval")
{
PE_pval = PE_adj_pval
TE_pval = TE_adj_pval
volc_ylab = "-log10 Adjusted p-value";
}else{
if(volc_with=="pval")
{
volc_ylab = "-log10 p-value";
}
}
outplot_PE = paste(outdir,"/PE_volcano.png",sep="",collapse="");
png(outplot_PE, width = 10, height = 10, units = 'in', res=300);
# bitmap(outplot, "png16m");
par(mfrow=c(1,1));
plot(PE_df_logfold$LogFold, -log10(PE_pval),
xlab="log2 fold change", ylab=volc_ylab,
type="n")
sel <- which((PE_df_logfold$LogFold<=log(2,base=2))&(PE_df_logfold$LogFold>=log(0.5, base=2))) # or whatever you want to use
points(PE_df_logfold[sel,"LogFold"], -log10(PE_pval[sel]),col="black")
PE_df_logfold$color <- "black"
#sel <- which((PE_df_logfold$LogFold>log(2,base=2))&(PE_df_logfold$LogFold<log(0.5,base=2))) # or whatever you want to use
sel <- which((PE_df_logfold$LogFold>log(2,base=2))|(PE_df_logfold$LogFold<log(0.5, base=2)))
sel1 <- which(PE_pval<=0.05)
sel=intersect(sel,sel1)
points(PE_df_logfold[sel,"LogFold"], -log10(PE_pval[sel]),col="red")
PE_df_logfold[sel,]$color <- "red"
sel <- which((PE_df_logfold$LogFold>log(2,base=2))|(PE_df_logfold$LogFold<log(0.5, base=2)))
sel1 <- which(PE_pval>0.05)
sel=intersect(sel,sel1)
points(PE_df_logfold[sel,"LogFold"], -log10(PE_pval[sel]),col="blue")
PE_df_logfold[sel,]$color <- "blue"
abline(h = -log(0.05,base=10), col="red", lty=2)
abline(v = log(2,base=2), col="red", lty=2)
abline(v = log(0.5,base=2), col="red", lty=2)
dev.off();
g <- ggplot(PE_df_logfold, aes(x = LogFold, -log10(PE_pval), color = as.factor(color),
text=sprintf("Gene: %s<br>Log2 Fold-Change: %.3f<br>-log10 p-value: %.3f<br>p-value: %.3f",
Genes, LogFold, -log10(PE_pval), PE_pval))) +
xlab("log2 fold change") + ylab("-log10 p-value") +
geom_point(shape=1, size = 1.5, stroke = .2) +
scale_color_manual(values = c("black" = "black", "red" = "red", "blue" = "blue")) +
geom_hline(yintercept = -log(0.05,base=10), linetype="dashed", color="red") +
geom_vline(xintercept = log(2,base=2), linetype="dashed", color="red") +
geom_vline(xintercept = log(0.5,base=2), linetype="dashed", color="red") +
theme_light() + theme(legend.position="none")
saveWidget(ggplotly(g, tooltip="text"), file.path(gsub("\\.png", "\\.html", outplot_PE)))
outplot_TE = paste(outdir,"/TE_volcano.png",sep="",collapse="");
png(outplot_TE, width = 10, height = 10, units = 'in', res=300);
# bitmap(outplot, "png16m");
par(mfrow=c(1,1));
plot(TE_df_logfold$LogFold, -log10(TE_pval),
xlab="log2 fold change", ylab=volc_ylab,
type="n")
sel <- which((TE_df_logfold$LogFold<=log(2,base=2))&(TE_df_logfold$LogFold>=log(0.5, base=2))) # or whatever you want to use
points(TE_df_logfold[sel,"LogFold"], -log10(TE_pval[sel]),col="black")
TE_df_logfold$color <- "black"
#sel <- which((TE_df_logfold$LogFold>log(2,base=2))&(TE_df_logfold$LogFold<log(0.5,base=2))) # or whatever you want to use
sel <- which((TE_df_logfold$LogFold>log(2,base=2))|(TE_df_logfold$LogFold<log(0.5, base=2)))
sel1 <- which(TE_pval<=0.05)
sel=intersect(sel,sel1)
points(TE_df_logfold[sel,"LogFold"], -log10(TE_pval[sel]),col="red")
TE_df_logfold[sel,]$color <- "red"
sel <- which((TE_df_logfold$LogFold>log(2,base=2))|(TE_df_logfold$LogFold<log(0.5, base=2)))
sel1 <- which(TE_pval>0.05)
sel=intersect(sel,sel1)
points(TE_df_logfold[sel,"LogFold"], -log10(TE_pval[sel]),col="blue")
TE_df_logfold[sel,]$color <- "blue"
abline(h = -log(0.05,base=10), col="red", lty=2)
abline(v = log(2,base=2), col="red", lty=2)
abline(v = log(0.5,base=2), col="red", lty=2)
dev.off();
g <- ggplot(TE_df_logfold, aes(x = LogFold, -log10(TE_pval), color = as.factor(color),
text=sprintf("Gene: %s<br>Log2 Fold-Change: %.3f<br>-log10 p-value: %.3f<br>p-value: %.3f",
Genes, LogFold, -log10(TE_pval), TE_pval))) +
xlab("log2 fold change") + ylab("-log10 p-value") +
geom_point(shape=1, size = 1.5, stroke = .2) +
scale_color_manual(values = c("black" = "black", "red" = "red", "blue" = "blue")) +
geom_hline(yintercept = -log(0.05,base=10), linetype="dashed", color="red") +
geom_vline(xintercept = log(2,base=2), linetype="dashed", color="red") +
geom_vline(xintercept = log(0.5,base=2), linetype="dashed", color="red") +
theme_light() + theme(legend.position="none")
saveWidget(ggplotly(g, tooltip="text"), file.path(gsub("\\.png", "\\.html", outplot_TE)))
cat('<br><table border=1 cellspacing=0 cellpadding=5 style="table-layout:auto; "> <tr bgcolor="#7a0019"><th><font color=#ffcc33>Transcript Fold-Change</font></th><th><font color=#ffcc33>Protein Fold-Change</font></th></tr>\n', file = htmloutfile, append = TRUE);
cat("<tr><td align=center>",
'<img src="TE_volcano.png" width=600 height=600>',
extractWidgetCode(outplot_TE)$widget_div,
'</td>\n', file = htmloutfile, append = TRUE);
cat("<td align=center>",
'<img src="PE_volcano.png" width=600 height=600>',
extractWidgetCode(outplot_PE)$widget_div,
'</td></tr></table><br>\n',
file = htmloutfile, append = TRUE);
}else{
cat('<br><br><b><font color=red>!!! No replicates found. Cannot perform test to check significance of differential expression. Thus, no Volcano plot generated !!!</font></b><br><br>',
file = htmloutfile, append = TRUE);
}
}
#***************************************************************************************************************************************
# Functions: End
#***************************************************************************************************************************************
#===============================================================================
# Arguments
#===============================================================================
noargs = 12;
args = commandArgs(trailingOnly = TRUE);
if(length(args) != noargs)
{
stop(paste("Please check usage. Number of arguments is not equal to ",noargs,sep="",collapse=""));
}
mode = args[1]; # "multiple" or "logfold"
method = args[2]; # "mean" or "median"
sampleinfo_file = args[3];
proteome_file = args[4];
transcriptome_file = args[5];
correction_method = args[6];
cookdist_upper_cutoff = args[7];
numCluster = args[8];
hm_nclust = args[9];
volc_with = args[10];
htmloutfile = args[11]; # html output file
outdir = args[12]; # html supporting files
#===============================================================================
# Check for file existance
#===============================================================================
if(! file.exists(proteome_file))
{
stop(paste("Proteome Data file does not exists. Path given: ",proteome_file,sep="",collapse=""));
}
if(! file.exists(transcriptome_file))
{
stop(paste("Transcriptome Data file does not exists. Path given: ",transcriptome_file,sep="",collapse=""));
}
#===============================================================================
# Load library
#===============================================================================
options(warn=-1);
suppressPackageStartupMessages(library(dplyr));
suppressPackageStartupMessages(library(data.table));
suppressPackageStartupMessages(library(gplots));
suppressPackageStartupMessages(library(ggplot2));
suppressPackageStartupMessages(library(ggfortify));
suppressPackageStartupMessages(library(plotly));
suppressPackageStartupMessages(library(d3heatmap));
#===============================================================================
# Select mode and parse experiment design file
#===============================================================================
if(mode=="multiple")
{
expDesign = fread(sampleinfo_file, header = FALSE, stringsAsFactors = FALSE, sep="\t") %>% data.frame();
expDesign_cc = expDesign[1:2,];
sampleinfo_df = expDesign[3:nrow(expDesign),];
rownames(sampleinfo_df)=1:nrow(sampleinfo_df);
colnames(sampleinfo_df) = c("Sample","Group");
condition_cols = sampleinfo_df[which(sampleinfo_df[,2]==expDesign_cc[which(expDesign_cc[,1]=="case"),2]),1];
condition_g_name = "case";
control_cols = sampleinfo_df[which(sampleinfo_df[,2]==expDesign_cc[which(expDesign_cc[,1]=="control"),2]),1];
control_g_name = "control";
sampleinfo_df[which(sampleinfo_df[,2]==expDesign_cc[which(expDesign_cc[,1]=="case"),2]),2] = "case";
sampleinfo_df[which(sampleinfo_df[,2]==expDesign_cc[which(expDesign_cc[,1]=="control"),2]),2] = "control";
sampleinfo_df_orig = sampleinfo_df;
}
if(mode=="logfold")
{
sampleinfo_df = data.frame("Sample"= c("LogFold"), "Group"=c("Fold_Change"))
}
#===============================================================================
# Parse Transcriptome data
#===============================================================================
TE_df_orig = fread(transcriptome_file, sep="\t", stringsAsFactor=F, header=T) %>% data.frame();
if(mode=="multiple")
{
TE_df = TE_df_orig[,c(colnames(TE_df_orig)[1],condition_cols,control_cols)];
}
if(mode=="logfold")
{
TE_df = TE_df_orig;
colnames(TE_df) = c("Genes", "LogFold");
}
#===============================================================================
# Parse Proteome data
#===============================================================================
PE_df_orig = fread(proteome_file, sep="\t", stringsAsFactor=F, header=T) %>% data.frame();
if(mode=="multiple")
{
PE_df = PE_df_orig[,c(colnames(PE_df_orig)[1],condition_cols,control_cols)];
}
if(mode=="logfold")
{
PE_df = PE_df_orig;
colnames(PE_df) = c("Genes", "LogFold");
}
#=============================================================================================================
# Create directory structures and then set the working directory to output directory
#=============================================================================================================
if(! file.exists(outdir))
{
dir.create(outdir);
}
#===============================================================================
# Write initial data summary in html outfile
#===============================================================================
cat("<html><head></head><body>\n", file = htmloutfile);
cat("<h1><u>QuanTP: Association between abundance ratios of transcript and protein</u></h1><hr/>\n",
"<font><h3>Input data summary</h3></font>\n",
"<ul>\n",
"<li>Abbreviations used: PE (Proteome data) and TE (Transcriptome data)","</li><br>\n",
"<li>Input Proteome data dimension (Row Column): ", dim(PE_df)[1]," x ", dim(PE_df)[2],"</li>\n",
"<li>Input Transcriptome data dimension (Row Column): ", dim(TE_df)[1]," x ", dim(TE_df)[2],"</li></ul><hr/>\n",
file = htmloutfile, append = TRUE);
cat("<h3 id=table_of_content>Table of Contents:</h3>\n",
"<ul>\n",
"<li><a href=#sample_dist>Sample distribution</a></li>\n",
"<li><a href=#corr_data>Correlation</a></li>\n",
"<li><a href=#regression_data>Regression analysis</a></li>\n",
"<li><a href=#inf_obs>Influential observations</a></li>\n",
"<li><a href=#cluster_data>Cluster analysis</a></li></ul><hr/>\n",
file = htmloutfile, append = TRUE);
#===============================================================================
# Find common samples
#===============================================================================
common_samples = intersect(sampleinfo_df[,1], colnames(TE_df)[-1]) %>% intersect(., colnames(PE_df)[-1]);
if(length(common_samples)==0)
{
stop("No common samples found ");
cat("<b>Please check your experiment design file. Sample names (column names) in the Transcriptome and the Proteome data do not match. </b>\n",file = htmloutfile, append = TRUE);
}
#===============================================================================
# Create subsets based on common samples
#===============================================================================
TE_df = select(TE_df, 1, common_samples);
PE_df = select(PE_df, 1, common_samples);
sampleinfo_df = filter(sampleinfo_df, Sample %in% common_samples);
rownames(sampleinfo_df) = sampleinfo_df[,1];
#===============================================================================
# Check for number of rows similarity
#===============================================================================
if(nrow(TE_df) != nrow(PE_df))
{
stop("Number of rows in Transcriptome and Proteome data are not same i.e. they are not paired");
cat("<b>The correlation analysis expects paired TE and PE data i.e. (i)th gene/transcript of TE file should correspond to (i)th protein of PE file. In the current input provided there is mismatch in terms of number of rows of TE and PE file. Please make sure you provide paired data.</b>\n",file = htmloutfile, append = TRUE);
}
#===============================================================================
# Number of groups
#===============================================================================
ngrps = unique(sampleinfo_df[,2]) %>% length();
grps = unique(sampleinfo_df[,2]);
names(grps) = grps;
#===============================================================================
# Change column1 name
#===============================================================================
colnames(TE_df)[1] = "Gene";
colnames(PE_df)[1] = "Protein";
#===============================================================================
# Treat missing values
#===============================================================================
TE_nacount = sum(is.na(TE_df));
PE_nacount = sum(is.na(PE_df));
TE_df[is.na(TE_df)] = 0;
PE_df[is.na(PE_df)] = 0;
#===============================================================================
# Obtain JS/HTML lines for interactive visualization
#===============================================================================
extractWidgetCode = function(outplot){
lines <- readLines(gsub("\\.png", "\\.html", outplot))
return(list(
'prescripts' = c('',
gsub('script', 'script',
lines[grep('<head>',lines) + 3
:grep('</head>' ,lines) - 5]),
''),
'widget_div' = paste('<!--',
gsub('width:100%;height:400px',
'width:500px;height:500px',
lines[grep(lines, pattern='html-widget')]),
'-->', sep=''),
'postscripts' = paste('',
gsub('script', 'script',
lines[grep(lines, pattern='<script type')]),
'', sep='')));
}
prescripts <- list()
postscripts <- list()
#===============================================================================
# Decide based on analysis mode
#===============================================================================
if(mode=="logfold")
{
cat('<h2 id="sample_dist"><font color=#ff0000>SAMPLE DISTRIBUTION</font></h2>\n',
file = htmloutfile, append = TRUE);
# TE Boxplot
outplot = paste(outdir,"/Box_TE.png",sep="",collape="");
multisample_boxplot(TE_df, sampleinfo_df, outplot, "Yes", "Samples", "Transcript Abundance data");
lines <- extractWidgetCode(outplot)
prescripts <- c(prescripts, lines$prescripts)
postscripts <- c(postscripts, lines$postscripts)
cat('<table border=1 cellspacing=0 cellpadding=5 style="table-layout:auto; ">\n',
'<tr bgcolor="#7a0019"><th><font color=#ffcc33>Boxplot: Transcriptome data</font></th><th><font color=#ffcc33>Boxplot: Proteome data</font></th></tr>\n',
"<tr><td align=center>", '<img src="Box_TE.png" width=500 height=500>', lines$widget_div, '</td>\n', file = htmloutfile, append = TRUE);
# PE Boxplot
outplot = paste(outdir,"/Box_PE.png",sep="",collape="");
multisample_boxplot(PE_df, sampleinfo_df, outplot, "Yes", "Samples", "Protein Abundance data");
lines <- extractWidgetCode(outplot)
postscripts <- c(postscripts, lines$postscripts)
cat("<td align=center>", '<img src="Box_PE.png" width=500 height=500>', lines$widget_div,
'</td></tr></table>\n', file = htmloutfile, append = TRUE);
cat('<hr/><h2 id="corr_data"><font color=#ff0000>CORRELATION</font></h2>\n',
file = htmloutfile, append = TRUE);
# TE PE scatter
PE_TE_data = data.frame(PE_df, TE_df);
colnames(PE_TE_data) = c("PE_ID","PE_abundance","TE_ID","TE_abundance");
outplot = paste(outdir,"/TE_PE_scatter.png",sep="",collape="");
cat('<table border=1 cellspacing=0 cellpadding=5 style="table-layout:auto; "> <tr bgcolor="#7a0019"><th><font color=#ffcc33>Scatter plot between Proteome and Transcriptome Abundance</font></th></tr>\n', file = htmloutfile, append = TRUE);
singlesample_scatter(PE_TE_data, outplot);
lines <- extractWidgetCode(outplot);
postscripts <- c(postscripts, lines$postscripts);
cat("<tr><td align=center>", '<img src="TE_PE_scatter.png" width=800 height=800>', gsub('width:500px;height:500px', 'width:800px;height:800px' , lines$widget_div), '</td></tr>\n', file = htmloutfile, append = TRUE);
# TE PE Cor
cat("<tr><td align=center>", file = htmloutfile, append = TRUE);
singlesample_cor(PE_TE_data, htmloutfile, append=TRUE);
cat('<font color="red">*Note that <u>correlation</u> is <u>sensitive to outliers</u> in the data. So it is important to analyze outliers/influential observations in the data.<br> Below we use <u>Cook\'s distance based approach</u> to identify such influential observations.</font>\n',
file = htmloutfile, append = TRUE);
cat('</td></table>',
file = htmloutfile, append = TRUE);
cat('<hr/><h2 id="regression_data"><font color=#ff0000>REGRESSION ANALYSIS</font></h2>\n',
file = htmloutfile, append = TRUE);
# TE PE Regression
singlesample_regression(PE_TE_data,htmloutfile, append=TRUE);
postscripts <- c(postscripts, c(extractWidgetCode(paste(outdir,"/PE_TE_lm_1.png",sep="",collapse=""))$postscripts,
extractWidgetCode(paste(outdir,"/PE_TE_lm_2.png",sep="",collapse=""))$postscripts,
extractWidgetCode(paste(outdir,"/PE_TE_lm_5.png",sep="",collapse=""))$postscripts,
extractWidgetCode(paste(outdir,"/PE_TE_lm_cooksd.png",sep="",collapse=""))$postscripts,
extractWidgetCode(paste(outdir,"/AbundancePlot_scatter_without_outliers.png",sep="",collapse=""))$postscripts,
gsub('data-for="html', 'data-for="secondhtml"',
extractWidgetCode(paste(outdir,"/TE_PE_scatter.png",sep="",collapse=""))$postscripts)))
cat('<hr/><h2 id="cluster_data"><font color=#ff0000>CLUSTER ANALYSIS</font></h2>\n',
file = htmloutfile, append = TRUE);
# TE PE Heatmap
singlesample_heatmap(PE_TE_data, htmloutfile, hm_nclust);
lines <- extractWidgetCode(paste(outdir,"/PE_TE_heatmap.png",sep="",collapse=""))
postscripts <- c(postscripts, lines$postscripts)
prescripts <- c(prescripts, lines$prescripts)
# TE PE Clustering (kmeans)
singlesample_kmeans(PE_TE_data, htmloutfile, nclust=as.numeric(numCluster))
postscripts <- c(postscripts, extractWidgetCode(paste(outdir,"/PE_TE_kmeans.png",sep="",collapse=""))$postscripts)
}else{
if(mode=="multiple")
{
cat('<h2 id="sample_dist"><font color=#ff0000>SAMPLE DISTRIBUTION</font></h2>\n',
file = htmloutfile, append = TRUE);
# TE Boxplot
outplot = paste(outdir,"/Box_TE_all_rep.png",sep="",collape="");
temp_df_te_data = data.frame(TE_df[,1], log(TE_df[,2:length(TE_df)]));
colnames(temp_df_te_data) = colnames(TE_df);
multisample_boxplot(temp_df_te_data, sampleinfo_df, outplot, "Yes", "Samples", "Transcript Abundance (log)");
lines <- extractWidgetCode(outplot)
prescripts <- c(prescripts, lines$prescripts)
postscripts <- c(postscripts, lines$postscripts)
cat('<table border=1 cellspacing=0 cellpadding=5 style="table-layout:auto; ">\n',
'<tr bgcolor="#7a0019"><th><font color=#ffcc33>Boxplot: Transcriptome data</font></th><th><font color=#ffcc33>Boxplot: Proteome data</font></th></tr>\n',
"<tr><td align=center>", file = htmloutfile, append = TRUE);
cat('<img src="Box_TE_all_rep.png" width=500 height=500>',
lines$widget_div, '</td>', file = htmloutfile, append = TRUE);
# PE Boxplot
outplot = paste(outdir,"/Box_PE_all_rep.png",sep="",collape="");
temp_df_pe_data = data.frame(PE_df[,1], log(PE_df[,2:length(PE_df)]));
colnames(temp_df_pe_data) = colnames(PE_df);
multisample_boxplot(temp_df_pe_data, sampleinfo_df, outplot, "Yes", "Samples", "Protein Abundance (log)");
lines <- extractWidgetCode(outplot)
#prescripts <- c(prescripts, lines$prescripts)
postscripts <- c(postscripts, lines$postscripts)
cat("<td align=center>", '<img src="Box_PE_all_rep.png" width=500 height=500>',
lines$widget_div, '</td></tr></table>\n', file = htmloutfile, append = TRUE);
# Calc TE PCA
outplot = paste(outdir,"/PCA_TE_all_rep.png",sep="",collape="");
multisample_PCA(TE_df, sampleinfo_df, outplot);
PCA_TE <- extractWidgetCode(outplot)
postscripts <- c(postscripts, PCA_TE$postscripts)
# Calc PE PCA
outplot = paste(outdir,"/PCA_PE_all_rep.png",sep="",collape="");
multisample_PCA(PE_df, sampleinfo_df, outplot);
PCA_PE <- extractWidgetCode(outplot)
postscripts <- c(postscripts, PCA_PE$postscripts)
# Replicate mode
templist = mergeReplicates(TE_df,PE_df, sampleinfo_df, method);
TE_df = templist$TE_df_merged;
PE_df = templist$PE_df_merged;
sampleinfo_df = templist$sampleinfo_df_merged;
rownames(sampleinfo_df) = sampleinfo_df[,1];
# TE Boxplot
outplot = paste(outdir,"/Box_TE_rep.png",sep="",collape="");
temp_df_te_data = data.frame(TE_df[,1], log(TE_df[,2:length(TE_df)]));
colnames(temp_df_te_data) = colnames(TE_df);
multisample_boxplot(temp_df_te_data, sampleinfo_df, outplot, "No", "Sample Groups", "Mean Transcript Abundance (log)");
lines <- extractWidgetCode(outplot)
#prescripts <- c(prescripts, lines$prescripts)
postscripts <- c(postscripts, lines$postscripts)
cat('<br><font color="#ff0000"><h3>Sample wise distribution (Box plot) after using ',method,' on replicates </h3></font><table border=1 cellspacing=0 cellpadding=5 style="table-layout:auto; "> <tr bgcolor="#7a0019"><th><font color=#ffcc33>Boxplot: Transcriptome data</font></th><th><font color=#ffcc33>Boxplot: Proteome data</font></th></tr>\n',
"<tr><td align=center>", '<img src="Box_TE_rep.png" width=500 height=500>', lines$widget_div, '</td>\n', file = htmloutfile, append = TRUE);
# PE Boxplot
outplot = paste(outdir,"/Box_PE_rep.png",sep="",collape="");
temp_df_pe_data = data.frame(PE_df[,1], log(PE_df[,2:length(PE_df)]));
colnames(temp_df_pe_data) = colnames(PE_df);
multisample_boxplot(temp_df_pe_data, sampleinfo_df, outplot, "No", "Sample Groups", "Mean Protein Abundance (log)");
lines <- extractWidgetCode(outplot)
#prescripts <- c(prescripts, lines$prescripts)
postscripts <- c(postscripts, lines$postscripts)
cat("<td align=center>", '<img src="Box_PE_rep.png" width=500 height=500>', lines$widget_div, '</td></tr></table>\n', file = htmloutfile, append = TRUE);
#===============================================================================
# Calculating log fold change and running the "single" code part
#===============================================================================
TE_df = data.frame("Genes"=TE_df[,1], "LogFold"=apply(TE_df[,c(which(colnames(TE_df)==condition_g_name),which(colnames(TE_df)==control_g_name))],1,function(x) log(x[1]/x[2],base=2)));
PE_df = data.frame("Genes"=PE_df[,1], "LogFold"=apply(PE_df[,c(which(colnames(PE_df)==condition_g_name),which(colnames(PE_df)==control_g_name))],1,function(x) log(x[1]/x[2],base=2)));
#===============================================================================
# Treat missing values
#===============================================================================
TE_df[is.infinite(TE_df[,2]),2] = NA;
PE_df[is.infinite(PE_df[,2]),2] = NA;
TE_df[is.na(TE_df)] = 0;
PE_df[is.na(PE_df)] = 0;
sampleinfo_df = data.frame("Sample"= c("LogFold"), "Group"=c("Fold_Change"))
#===============================================================================
# Find common samples
#===============================================================================
common_samples = intersect(sampleinfo_df[,1], colnames(TE_df)[-1]) %>% intersect(., colnames(PE_df)[-1]);
TE_df = select(TE_df, 1, common_samples);
PE_df = select(PE_df, 1, common_samples);
sampleinfo_df = filter(sampleinfo_df, Sample %in% common_samples);
rownames(sampleinfo_df) = sampleinfo_df[,1];
# TE Boxplot
outplot = paste(outdir,"/Box_TE.png",sep="",collape="");
multisample_boxplot(TE_df, sampleinfo_df, outplot, "Yes", "Sample (log2(case/control))", "Transcript Abundance fold-change (log2)");
lines <- extractWidgetCode(outplot)
postscripts <- c(postscripts, lines$postscripts)
cat('<br><font color="#ff0000"><h3>Distribution (Box plot) of log fold change </h3></font>', file = htmloutfile, append = TRUE);
cat('<table border=1 cellspacing=0 cellpadding=5 style="table-layout:auto; "> <tr bgcolor="#7a0019"><th><font color=#ffcc33>Boxplot: Transcriptome data</font></th><th><font color=#ffcc33>Boxplot: Proteome data</font></th></tr>\n',
"<tr><td align=center>", '<img src="Box_TE.png" width=500 height=500>', lines$widget_div, '</td>\n', file = htmloutfile, append = TRUE);
# PE Boxplot
outplot = paste(outdir,"/Box_PE.png",sep="",collape="");
multisample_boxplot(PE_df, sampleinfo_df, outplot, "Yes", "Sample (log2(case/control))", "Protein Abundance fold-change(log2)");
lines <- extractWidgetCode(outplot)
postscripts <- c(postscripts, lines$postscripts)
cat("<td align=center>", '<img src="Box_PE.png" width=500 height=500>', lines$widget_div,'</td></tr></table>\n', file = htmloutfile, append = TRUE);
# Log Fold Data
perform_Test_Volcano(TE_df_orig,PE_df_orig,TE_df, PE_df,sampleinfo_df_orig,method,correction_method,volc_with)
postscripts <- c(postscripts, extractWidgetCode(paste(outdir,"/TE_volcano.png",sep="",collapse=""))$postscripts)
postscripts <- c(postscripts, extractWidgetCode(paste(outdir,"/PE_volcano.png",sep="",collapse=""))$postscripts)
# Print PCA
cat('<br><br><table border=1 cellspacing=0 cellpadding=5 style="table-layout:auto; "> <tr bgcolor="#7a0019"><th><font color=#ffcc33>PCA plot: Transcriptome data</font></th><th><font color=#ffcc33>PCA plot: Proteome data</font></th></tr>\n',
"<tr><td align=center>", '<img src="PCA_TE_all_rep.png" width=500 height=500>', PCA_TE$widget_div, '</td>\n',
"<td align=center>", '<img src="PCA_PE_all_rep.png" width=500 height=500>', PCA_PE$widget_div, '</td></tr></table>\n',
file = htmloutfile, append = TRUE);
cat('<hr/><h2 id="corr_data"><font color=#ff0000>CORRELATION</font></h2>\n',
file = htmloutfile, append = TRUE);
PE_TE_data = data.frame(PE_df, TE_df);
colnames(PE_TE_data) = c("PE_ID","PE_abundance","TE_ID","TE_abundance");
# TE PE scatter
outplot = paste(outdir,"/TE_PE_scatter.png",sep="",collape="");
cat('<br><table border=1 cellspacing=0 cellpadding=5 style="table-layout:auto; "> <tr bgcolor="#7a0019"><th><font color=#ffcc33>Scatter plot between Proteome and Transcriptome Abundance</font></th></tr>\n', file = htmloutfile, append = TRUE);
singlesample_scatter(PE_TE_data, outplot);
lines <- extractWidgetCode(outplot);
postscripts <- c(postscripts, lines$postscripts);
cat("<tr><td align=center>", '<img src="TE_PE_scatter.png" width=800 height=800>', gsub('width:500px;height:500px', 'width:800px;height:800px' , lines$widget_div),
'</td>\n', file = htmloutfile, append = TRUE);
# TE PE Cor
cat("<tr><td align=center>\n", file = htmloutfile, append = TRUE);
singlesample_cor(PE_TE_data, htmloutfile, append=TRUE);
cat('<font color="red">*Note that <u>correlation</u> is <u>sensitive to outliers</u> in the data. So it is important to analyze outliers/influential observations in the data.<br> Below we use <u>Cook\'s distance based approach</u> to identify such influential observations.</font>\n',
file = htmloutfile, append = TRUE);
cat('</td></table>',
file = htmloutfile, append = TRUE);
cat('<hr/><h2 id="regression_data"><font color=#ff0000>REGRESSION ANALYSIS</font></h2>\n',
file = htmloutfile, append = TRUE);
# TE PE Regression
singlesample_regression(PE_TE_data,htmloutfile, append=TRUE);
postscripts <- c(postscripts, c(extractWidgetCode(paste(outdir,"/PE_TE_lm_1.png",sep="",collapse=""))$postscripts,
extractWidgetCode(paste(outdir,"/PE_TE_lm_2.png",sep="",collapse=""))$postscripts,
extractWidgetCode(paste(outdir,"/PE_TE_lm_5.png",sep="",collapse=""))$postscripts,
extractWidgetCode(paste(outdir,"/PE_TE_lm_cooksd.png",sep="",collapse=""))$postscripts,
extractWidgetCode(paste(outdir,"/AbundancePlot_scatter_without_outliers.png",sep="",collapse=""))$postscripts,
gsub('data-for="html', 'data-for="secondhtml"',
extractWidgetCode(paste(outdir,"/TE_PE_scatter.png",sep="",collapse=""))$postscripts)));
cat('<hr/><h2 id="cluster_data"><font color=#ff0000>CLUSTER ANALYSIS</font></h2>\n',
file = htmloutfile, append = TRUE);
#TE PE Heatmap
singlesample_heatmap(PE_TE_data, htmloutfile, hm_nclust);
lines <- extractWidgetCode(paste(outdir,"/PE_TE_heatmap.png",sep="",collapse=""))
postscripts <- c(postscripts, lines$postscripts)
prescripts <- c(prescripts, lines$prescripts)
#TE PE Clustering (kmeans)
singlesample_kmeans(PE_TE_data, htmloutfile, nclust=as.numeric(numCluster))
postscripts <- c(postscripts, extractWidgetCode(paste(outdir,"/PE_TE_kmeans.png",sep="",collapse=""))$postscripts);
}
}
cat("<h3>Go To:</h3>\n",
"<ul>\n",
"<li><a href=#sample_dist>Sample distribution</a></li>\n",
"<li><a href=#corr_data>Correlation</a></li>\n",
"<li><a href=#regression_data>Regression analysis</a></li>\n",
"<li><a href=#inf_obs>Influential observations</a></li>\n",
"<li><a href=#cluster_data>Cluster analysis</a></li></ul>\n",
"<br><a href=#>TOP</a>",
file = htmloutfile, append = TRUE);
cat("</body></html>\n", file = htmloutfile, append = TRUE);
#===============================================================================
# Add masked-javascripts tags to HTML file in the head and end
#===============================================================================
htmllines <- readLines(htmloutfile)
htmllines[1] <- paste('<html>\n<head>\n', paste(prescripts, collapse='\n'), '\n</head>\n<body>')
cat(paste(htmllines, collapse='\n'), file = htmloutfile)
cat('\n', paste(postscripts, collapse='\n'), "\n",
"</body>\n</html>\n", file = htmloutfile, append = TRUE);
|
d964d6f0e7e6a36f3be87022d2a0bd5d9729b60f | c981caf103a3540f7964e6c41a56ca34d67732c4 | /R/mice_imputation_get_states.R | 7c17f1b226cd47d4cd9658a67b9f788db2640133 | [] | no_license | alexanderrobitzsch/miceadds | 8285b8c98c2563c2c04209d74af6432ce94340ee | faab4efffa36230335bfb1603078da2253d29566 | refs/heads/master | 2023-03-07T02:53:26.480028 | 2023-03-01T16:26:31 | 2023-03-01T16:26:31 | 95,305,394 | 17 | 2 | null | 2018-05-31T11:41:51 | 2017-06-24T15:16:57 | R | UTF-8 | R | false | false | 1,108 | r | mice_imputation_get_states.R | ## File Name: mice_imputation_get_states.R
## File Version: 0.407
mice_imputation_get_states <- function( pos=parent.frame(n=1), n_index=1:20 )
{
if ( is.null(pos) ){
pos <- parent.frame(n=1)
}
#-- newstate
# newstate <- ma_exists_get(x='newstate', pos=pos, n_index=n_index )
res <- ma_exists(x='newstate', pos=pos, n_index=n_index )
newstate <- res$obj
pos <- res$pos
n_index <- res$n
#-- vname
vname <- ma_exists_get(x='yname', pos=pos, n_index=n_index )
dep <- newstate$dep
vname <- dep
if (is.null(vname)){
vname <- ma_exists_get(x='vname', pos=pos, n_index=1:20 )
subst <- FALSE
}
#-- blocks
blocks <- ma_exists_get(x='blocks', pos=pos, n_index=n_index )
block <- blocks[[ dep ]]
#-- data
data <- ma_exists_get(x='data', pos=pos, n_index=n_index )
#*** output
res <- list(newstate=newstate, vname=vname, dep=dep, blocks=blocks, block=block,
data=data, pos=pos, n_index=n_index)
return(res)
}
## see also
## https://github.com/stefvanbuuren/mice/blob/master/R/sampler.R
|
e0911fbe0741430583e70f673fd9c9802707a2cf | 6fba092cf7db695c2d0f4491bdc5a3ce7d4fcc49 | /tests/testthat/test-if_else.R | 2a40120c140738e844673598ab46d687009dbe72 | [
"MIT"
] | permissive | stjordanis/funs | 03b0fab3159eaf9e3a6ee2116b56c3148690165d | 37387ac97a7088e2c4e2ed73d2de59e123738561 | refs/heads/master | 2023-05-14T19:54:20.057387 | 2021-05-17T08:18:30 | 2021-05-17T08:18:30 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 730 | r | test-if_else.R | test_that("result has expected values", {
expect_equal(if_else(c(TRUE, FALSE, NA), 1, 2), c(1, 2, NA))
expect_equal(if_else(c(TRUE, FALSE, NA), 1, 2, 3), c(1, 2, 3))
})
test_that("size comes from condition", {
expect_vector(if_else(rep(TRUE, 3), 1, 1), size = 3)
})
test_that("ptype come from true/false/na", {
expect_vector(if_else(TRUE, 1L, 1.5), ptype = double())
expect_vector(if_else(TRUE, 1.5, 1L), ptype = double())
expect_vector(if_else(TRUE, 1L, 1L, 1.5), ptype = double())
})
test_that("if_else() errors informatively", {
expect_snapshot(error = TRUE, {
if_else(TRUE, 1:3, 1)
if_else(TRUE, 1, 1:3)
if_else(TRUE, 1, 1, 1:3)
if_else(c(TRUE, FALSE), 1:3, 1)
if_else(1, 1, "2")
})
})
|
eb9d2d11a893ce52fef922ef7172dbda5802e89b | ed084be9b1964f7072587863919ef4cddbb79e59 | /tests/testthat/test-urls.R | aab82dc56c5572aaace32444499007e6c62b987f | [
"MIT"
] | permissive | fmichonneau/checker | 008644f37edc0470a211243e41b66fe23400cd5b | 02d2003c99a2410faeebdfe7f0492ac674b6d65a | refs/heads/master | 2021-07-10T09:28:56.897958 | 2020-06-29T07:45:36 | 2020-06-29T07:45:36 | 157,784,463 | 7 | 0 | NOASSERTION | 2019-04-22T20:35:09 | 2018-11-15T23:14:56 | R | UTF-8 | R | false | false | 22,631 | r | test-urls.R | build_html_page <- function(o, self_contained = TRUE) {
f <- file.path("rmd_files", "test_urls_img.Rmd")
rmarkdown::render(
f,
output_file = o,
quiet = TRUE,
output_options = list(self_contained = self_contained))
}
## Common variables -----------------------------------------------------------
expected_cols <- list(
"file" = "character",
"tag_type" = "character",
"link" = "character",
"scheme" = "character",
"link_text" = "character",
"full_path" = "character",
"error_level" = "character",
"message" = "character",
"alt_text" = "character"
)
## Self-contained files -------------------------------------------------------
context("self contained files")
out_self_contained <- build_html_page(
"test_self_contained.html",
self_contained = TRUE)
expect_message(
all_links_self_contained <- check_links(
dir = dirname(out_self_contained),
regexp = "test_self_contained.html",
only_with_issues = FALSE,
show_summary = FALSE),
regexp = "^Error"
)
expect_message(
with_issues_self_contained <- check_links(
dir = dirname(out_self_contained),
regexp = "test_self_contained.html",
only_with_issues = TRUE,
show_summary = FALSE),
regexp = "^Error"
)
test_that("output has correct format for self-contained", {
expect_true(inherits(all_links_self_contained, "tbl_df"))
expect_true(inherits(with_issues_self_contained, "tbl_df"))
expect_identical(lapply(all_links_self_contained, class), expected_cols)
expect_identical(lapply(with_issues_self_contained, class), expected_cols)
expect_identical(nrow(all_links_self_contained), 26L)
expect_true(nrow(with_issues_self_contained) >= 4)
})
test_that("404 are working", {
links_404 <- all_links_self_contained[all_links_self_contained$link_text == "404", ]
expect_identical(nrow(links_404), 1L)
expect_identical("HTTP status code: 404", unique(links_404$message))
})
test_that("internal links are working as expected", {
expect_false("valid" %in% with_issues_self_contained$link_text)
expect_true("valid" %in% all_links_self_contained$link_text)
expect_true("broken" %in% with_issues_self_contained$link_text)
expect_true("broken" %in% all_links_self_contained$link_text)
sub_with_issues <- with_issues_self_contained[with_issues_self_contained$link_text == "broken", ]
expect_identical(nrow(sub_with_issues), 1L)
expect_match(sub_with_issues$message, "File referenced by URL doesn't exist")
sub_links <- all_links_self_contained[all_links_self_contained$link_text == "broken", ]
expect_identical(nrow(sub_links), 1L)
expect_match(sub_links$message, "File referenced by URL doesn't exist")
expect_false("local within valid" %in% with_issues_self_contained$link_text)
expect_true("local within valid" %in% all_links_self_contained$link_text)
expect_false("local outside valid link valid fragment" %in% with_issues_self_contained$link_text)
expect_true("local outside valid link valid fragment" %in% all_links_self_contained$link_text)
expect_true("local outside valid link invalid fragment" %in% with_issues_self_contained$link_text)
expect_true("local outside valid link invalid fragment" %in% all_links_self_contained$link_text)
})
test_that("external links with fragments", {
## Valid links
sub_links_valid <- all_links_self_contained[all_links_self_contained$link_text == "valid external with valid fragment", ]
sub_with_issues_valid <- with_issues_self_contained[with_issues_self_contained$link_text == "valid external with valid fragment", ]
expect_identical(nrow(sub_links_valid), 1L)
expect_identical(nrow(sub_with_issues_valid), 0L)
## Invalid links
sub_links_invalid <- all_links_self_contained[all_links_self_contained$link_text == "valid external with invalid fragment", ]
sub_with_issues_invalid <- with_issues_self_contained[with_issues_self_contained$link_text == "valid external with invalid fragment", ]
expect_identical(nrow(sub_links_invalid), 1L)
expect_identical(nrow(sub_with_issues_invalid), 1L)
})
test_that("local links with fragments for file that doesn't exist", {
sub_with_issues_fragment <- with_issues_self_contained[with_issues_self_contained$link_text == "local outside invalid link irrelevant fragment", ]
expect_identical(nrow(sub_with_issues_fragment), 1L)
expect_match(sub_with_issues_fragment$message, "Local URL .+ doesn't exist")
sub_links_fragment <- all_links_self_contained[all_links_self_contained$link_text == "local outside invalid link irrelevant fragment", ]
expect_identical(nrow(sub_links_fragment), 1L)
expect_match(sub_links_fragment$message, "Local URL .+ doesn't exist")
})
### mailto: --------------------------------------------------------------------
context("self-contained dealing with mailto:")
test_that("mailto: only appears when `only_with_issues=FALSE`", {
expect_identical(
length(grep("^mailto:", all_links_self_contained$full_path)),
1L
)
expect_identical(
all_links_self_contained$error_level[
grepl("^mailto:", all_links_self_contained$full_path)
],
"ok"
)
expect_identical(
length(grep("^mailto:", with_issues_self_contained$full_path)), 1L
)
expect_identical(
with_issues_self_contained$error_level[
grepl("^mailto:", with_issues_self_contained$full_path)
],
"ok"
)
})
test_that("mailto: has 'ok' for error-level and 'not checked' as message", {
sub_mailto <- all_links_self_contained[
grepl("^mailto", all_links_self_contained$full_path),
]
expect_identical(sub_mailto$error_level, "ok")
expect_identical(sub_mailto$message, "not checked.")
})
### data URI -------------------------------------------------------------------
context("self-contained data URI")
## not sure what we can test for here...
### valid links ----------------------------------------------------------------
context("self-contained valid links")
test_that("check for status code of valid links + message for fragments", {
sub_valid <- all_links_self_contained[
all_links_self_contained$error_level == "success" &
!is.na(all_links_self_contained$error_level), ]
expect_true(length(grep("HTTP status code: 200", sub_valid$message)) > 1)
expect_true(length(grep("Fragment .+ checked and found", sub_valid$message)) > 1)
expect_true(length(grep("File exists", sub_valid$message)) > 0)
})
### images ---------------------------------------------------------------------
context("self-contained images")
test_that("no alt correctly parsed", {
sub_no_alt <- all_links_self_contained[
all_links_self_contained$tag_type == "img" &
is.na(all_links_self_contained$alt_text),
]
expect_identical(nrow(sub_no_alt), 3L)
expect_output(
summary_check_images(all_links_self_contained),
"No 'alt' text for the following images"
)
})
test_that("alt correctly parsed", {
sub_with_alt <- all_links_self_contained[
all_links_self_contained$tag_type == "img" &
!is.na(all_links_self_contained$alt_text),
]
expect_identical(nrow(sub_with_alt), 5L)
expect_identical(sum(sub_with_alt$error_level == "error"), 1L)
})
test_that("http test passes", {
expect_output(
summary_check_images(all_links_self_contained),
"All images passed the HTTP checks")
})
###### -------------------------------------------------------------------------
## not self-contained files ----------------------------------------------------
###### -------------------------------------------------------------------------
context("not self-contained files")
out_not_contained <- build_html_page(
"test_not_contained.html",
self_contained = FALSE)
expect_message({
all_links_not_contained <- check_links(
dir = dirname(out_not_contained),
regexp = "test_not_contained.html",
only_with_issues = FALSE,
show_summary = FALSE
)},
regexp = "^Error:"
)
expect_message(
with_issues_not_contained <- check_links(
dir = dirname(out_not_contained),
regexp = "test_not_contained.html",
only_with_issues = TRUE,
show_summary = FALSE),
regexp = "^Error:"
)
test_that("output has correct format for not contained", {
expect_true(inherits(all_links_not_contained, "tbl_df"))
expect_true(inherits(with_issues_not_contained, "tbl_df"))
expect_identical(lapply(all_links_not_contained, class), expected_cols)
expect_identical(lapply(with_issues_not_contained, class), expected_cols)
expect_identical(nrow(all_links_not_contained), 33L)
expect_true(nrow(with_issues_not_contained) >= 4)
})
test_that("404 are working", {
links_404 <- all_links_not_contained[all_links_not_contained$link_text == "404", ]
expect_identical(nrow(links_404), 1L)
expect_identical("HTTP status code: 404", unique(links_404$message))
})
test_that("internal links are working as expected", {
expect_false("valid" %in% with_issues_not_contained$link_text)
expect_true("valid" %in% all_links_not_contained$link_text)
expect_true("broken" %in% with_issues_not_contained$link_text)
expect_true("broken" %in% all_links_not_contained$link_text)
sub_with_issues <- with_issues_not_contained[with_issues_not_contained$link_text == "broken", ]
expect_identical(nrow(sub_with_issues), 1L)
expect_match(sub_with_issues$message, "File referenced by URL doesn't exist")
sub_links <- all_links_not_contained[all_links_not_contained$link_text == "broken", ]
expect_identical(nrow(sub_links), 1L)
expect_match(sub_links$message, "File referenced by URL doesn't exist")
expect_false("local within valid" %in% with_issues_not_contained$link_text)
expect_true("local within valid" %in% all_links_not_contained$link_text)
expect_false("local outside valid link valid fragment" %in%
with_issues_not_contained$link_text)
expect_true("local outside valid link valid fragment" %in%
all_links_not_contained$link_text)
expect_true("local outside valid link invalid fragment" %in% with_issues_not_contained$link_text)
expect_true("local outside valid link invalid fragment" %in% all_links_not_contained$link_text)
})
test_that("external links with fragments", {
## Valid links
sub_links_valid <- all_links_not_contained[all_links_not_contained$link_text == "valid external with valid fragment", ]
sub_with_issues_valid <- with_issues_not_contained[with_issues_not_contained$link_text == "valid external with valid fragment", ]
expect_identical(nrow(sub_links_valid), 1L)
expect_identical(nrow(sub_with_issues_valid), 0L)
## Invalid links
sub_links_invalid <- all_links_not_contained[all_links_not_contained$link_text == "valid external with invalid fragment", ]
sub_with_issues_invalid <- with_issues_not_contained[with_issues_not_contained$link_text == "valid external with invalid fragment", ]
expect_identical(nrow(sub_links_invalid), 1L)
expect_identical(nrow(sub_with_issues_invalid), 1L)
})
test_that("local links with fragments for file that doesn't exist", {
sub_with_issues_fragment <- with_issues_not_contained[with_issues_not_contained$link_text == "local outside invalid link irrelevant fragment", ]
expect_identical(nrow(sub_with_issues_fragment), 1L)
expect_match(sub_with_issues_fragment$message, "Local URL .+ doesn't exist")
sub_links_fragment <- all_links_not_contained[all_links_not_contained$link_text == "local outside invalid link irrelevant fragment", ]
expect_identical(nrow(sub_links_fragment), 1L)
expect_match(sub_links_fragment$message, "Local URL .+ doesn't exist")
})
### mailto: --------------------------------------------------------------------
context("not contained dealing with mailto:")
test_that("mailto: only appears when `only_with_issues=FALSE`", {
expect_identical(
length(grep("^mailto:", all_links_not_contained$full_path)), 1L)
expect_identical(
length(grep("^mailto:", with_issues_not_contained$full_path)), 1L)
})
test_that("mailto: has NA for valid and no message", {
sub_mailto <- all_links_not_contained[grepl("^mailto", all_links_not_contained$full_path), ]
expect_identical(sub_mailto$error_level, "ok")
expect_identical(sub_mailto$message, "not checked.")
})
### data URI -------------------------------------------------------------------
context("not contained data URI")
test_that("data URI only appears when `only_with_issues=FALSE`", {
expect_identical(
length(grep("^data:", all_links_not_contained$full_path)), 0L
)
expect_identical(
length(grep("^data:", with_issues_not_contained$full_path)), 0L
)
})
test_that("data URI has 3L for valid", {
sub_datauri <- all_links_not_contained[grepl("^data:", all_links_not_contained$full_path), ]
expect_true(all(sub_datauri$error_level == 3L))
expect_true(all(sub_datauri$message == ""))
})
### valid links ----------------------------------------------------------------
context("not contained valid links")
test_that("check for status code of valid links + message for fragments", {
sub_valid <- all_links_not_contained[
all_links_not_contained$error_level == "success" &
!is.na(all_links_not_contained$error_level), ]
expect_true(length(grep("HTTP status code: 200", sub_valid$message)) > 1)
expect_true(length(grep("Fragment .+ checked and found", sub_valid$message)) > 1)
expect_true(length(grep("File exists", sub_valid$message)) > 0)
})
### images ---------------------------------------------------------------------
context("not-contained images")
test_that("no alt correctly parsed", {
sub_no_alt <- all_links_not_contained[
all_links_not_contained$tag_type == "img" &
is.na(all_links_not_contained$alt_text),
]
expect_identical(nrow(sub_no_alt), 3L)
expect_output(
summary_check_images(all_links_not_contained),
"No 'alt' text for the following images"
)
})
test_that("alt correctly parsed", {
sub_with_alt <- all_links_not_contained[
all_links_not_contained$tag_type == "img" &
!is.na(all_links_not_contained$alt_text),
]
expect_identical(nrow(sub_with_alt), 5L)
## TODO: expect_identical(sum(!sub_with_alt$error_level), 1L)
})
test_that("http test passes", {
expect_output(
summary_check_images(all_links_not_contained),
"Using HTTP protocol for the following images")
})
###### -------------------------------------------------------------------------
### Pages with no links
###### -------------------------------------------------------------------------
context("page with no links")
no_links_file <- file.path("html_files", "test_no_links.html")
all_links_no_links <- check_links(
dir = dirname(no_links_file),
regexp = "test_no_links.html",
only_with_issues = FALSE,
show_summary = FALSE
)
with_issues_no_links <- check_links(
dir = dirname(no_links_file),
regexp = "test_no_links.html",
only_with_issues = TRUE,
show_summary = FALSE
)
test_that("data structure of object return when there is no links is OK", {
expect_identical(all_links_no_links, with_issues_no_links)
expect_identical(lapply(all_links_no_links, class), expected_cols)
expect_identical(lapply(with_issues_no_links, class), expected_cols)
expect_identical(nrow(all_links_no_links), 0L)
expect_identical(nrow(with_issues_no_links), 0L)
})
###### -------------------------------------------------------------------------
### Pages with no broken links
###### -------------------------------------------------------------------------
context("page with no broken links")
no_broken_file <- file.path("html_files", "test_all_valid.html")
all_links_no_broken <- check_links(
dir = dirname(no_broken_file),
regexp = no_broken_file,
only_with_issues = FALSE,
show_summary = FALSE
)
with_issues_no_broken <- check_links(
dir = dirname(no_broken_file),
regexp = no_broken_file,
only_with_issues = TRUE,
show_summary = FALSE
)
test_that("valid values are all TRUE", {
expect_identical(
nrow(all_links_no_broken), 4L
)
expect_true(all(all_links_no_broken$error_level == "success"))
})
test_that("empty tibble when there are no broken links", {
expect_identical(
nrow(with_issues_no_broken), 0L
)
})
###### -------------------------------------------------------------------------
### Invalid regexp or glob
###### -------------------------------------------------------------------------
context("invalid regexp or glob")
test_that("warning is returned when no file match the regexp", {
expect_warning(
check_links(dir = dirname(no_broken_file), regexp = "^foobar$",
show_summary = FALSE)
)
})
test_that("warning when no file match the glob", {
expect_warning(
check_links(dir = dirname(no_broken_file),
regexp = "*alongstringnotfoundinfolder*",
show_summary = FALSE)
)
})
test_that("error when both glob and regexp are specified", {
expect_error(
## throws error because of default value set to regexp
check_links(dir = dirname(no_broken_file), glob = "foo",
show_summary = FALSE)
)
expect_error(
check_links(dir = dirname(no_broken_file),
glob = "foo", regexp = "bar",
show_summary = FALSE
)
)
})
context("compare regexp and glob")
test_that("regexp and glob give the same result", {
with_glob <- check_links(dir = dirname(no_broken_file),
glob = "*_all_valid.html", regexp = NULL,
only_with_issues = FALSE,
show_summary = FALSE)
with_regexp <- check_links(dir = dirname(no_broken_file),
regexp = "_all_valid.html$",
only_with_issues = FALSE,
show_summary = FALSE)
expect_identical(with_glob, with_regexp)
})
##### --------------------------------------------------------------------------
##### Test different types of outputs
##### --------------------------------------------------------------------------
quick_broken_file <- file.path("html_files", "quick_broken.html")
context("check different types of output")
test_that("output with no broken links", {
expect_output(
all_links_no_broken <- check_links(
dir = dirname(no_broken_file),
regexp = no_broken_file,
only_with_issues = FALSE,
show_summary = TRUE
),
"No broken links found"
)
expect_output(
with_issues_no_broken <- check_links(
dir = dirname(no_broken_file),
regexp = no_broken_file,
only_with_issues = TRUE,
show_summary = TRUE
),
"No broken links found"
)
})
test_that("output with broken links (by page)", {
expect_output(
all_links_quick_broken <- check_links(
dir = dirname(quick_broken_file),
regexp = quick_broken_file,
only_with_issues = FALSE,
show_summary = TRUE
),
"link: `no_file.html`"
)
expect_output(
with_issues_quick_broken <- check_links(
dir = dirname(quick_broken_file),
regexp = quick_broken_file,
only_with_issues = TRUE,
show_summary = TRUE
),
"link: `no_file.html`"
)
})
test_that("output with broken links (by resource)", {
expect_output(
all_links_quick_broken <- check_links(
dir = dirname(quick_broken_file),
regexp = quick_broken_file,
only_with_issues = FALSE,
show_summary = TRUE,
by = "resource"
),
"Resource: `no_file.html`"
)
expect_output(
with_issues_quick_broken <- check_links(
dir = dirname(quick_broken_file),
regexp = quick_broken_file,
only_with_issues = TRUE,
show_summary = TRUE,
by = "resource"),
"Resource: `no_file.html`"
)
})
#### ---------------------------------------------------------------------------
#### Test for ignores
#### ---------------------------------------------------------------------------
expect_message(
ign_pattern_1 <- check_links(dirname(out_self_contained),
regexp = "test_self_contained.html",
ignore_pattern = c("^mailto:"),
only_with_issues = FALSE, show_summary = FALSE)
)
expect_message(
ign_pattern_2 <- check_links(dirname(out_self_contained),
regexp = "test_self_contained.html",
ignore_pattern = c("^mailto:", "^data"),
only_with_issues = FALSE, show_summary = FALSE)
)
expect_message(
ign_pattern_foo <- check_links(dirname(out_self_contained),
regexp = "test_self_contained.html",
ignore_pattern = c("semi_random_string_not_found_in_file"),
only_with_issues = FALSE, show_summary = FALSE)
)
expect_message(
ign_tag_1 <- check_links(dirname(out_self_contained),
regexp = "test_self_contained.html",
ignore_tag = "a",
only_with_issues = FALSE, show_summary = FALSE)
)
expect_message(
ign_tag_2 <- check_links(dirname(out_self_contained),
regexp = "test_self_contained.html",
ignore_tag = c("a", "script"),
only_with_issues = FALSE, show_summary = FALSE)
)
expect_message(
ign_tag_foo <- check_links(dirname(out_self_contained),
regexp = "test_self_contained.html",
ignore_tag = "foo",
only_with_issues = FALSE, show_summary = FALSE)
)
expect_silent(
ign_pat_tag <- check_links(dirname(out_self_contained),
regexp = "test_self_contained.html",
ignore_pattern = "^data:",
ignore_tag = c("a", "script"),
only_with_issues = FALSE, show_summary = FALSE)
)
context("test for ignore_pattern")
test_that("1 value for ignore_pattern", {
expect_true(any(grepl("^mailto:", all_links_self_contained$full_path)))
expect_false(any(grepl("^mailto:", ign_pattern_1$full_path)))
})
test_that("2 values for ignore_pattern", {
expect_true(any(grepl("^mailto:", all_links_self_contained$full_path)) &
any(grepl("^data:", all_links_self_contained$full_path)))
expect_false(any(grepl("^mailto:", ign_pattern_2$full_path)) &
any(grepl("^data:", ign_pattern_2$full_path)))
})
test_that("no effect for non-matching pattern filter", {
expect_identical(
ign_pattern_foo[sort(ign_pattern_foo$full_path), ],
all_links_self_contained[sort(all_links_self_contained$full_path), ]
)
})
context("test for ignore_tag")
test_that("1 value for ignore tag", {
expect_true("a" %in% all_links_self_contained$tag_type)
expect_false("a" %in% ign_tag_1$tag_type)
})
test_that("2 values for ignore tag", {
expect_true("a" %in% all_links_self_contained$tag_type &
"script" %in% all_links_self_contained$tag_type)
expect_false("a" %in% ign_tag_1$tag_type &
"script" %in% ign_tag_1$tag_type)
})
test_that("no effect for non-matching tag filter", {
expect_identical(
ign_tag_foo[sort(ign_tag_foo$full_path), ],
all_links_self_contained[sort(all_links_self_contained$full_path), ]
)
})
context("test for ignore_tag and ignore_pattern combined")
test_that("combined filter work", {
expect_identical(nrow(ign_pat_tag), 0L)
})
|
9c1be220cbf96f0ffe5898e3614b2ed74925ca2f | 83fc27eb524842ae12bed6a5f75cfd2557cffbc2 | /antcolorStats/generateSpeciesDistributionPolys.R | 3bfa40bf351233852050409e44602e4162756005 | [] | no_license | calacademy-research/antcolor | c6dd4891b20bf8f055d640326e7ca4ff78e313a4 | 93832e0c570faed1c629834f4adaa83d7f3998cd | refs/heads/master | 2021-11-23T11:37:48.460709 | 2021-10-30T04:53:05 | 2021-10-30T04:53:05 | 140,758,363 | 3 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,277 | r | generateSpeciesDistributionPolys.R | #Generate species distributions using raw AntWeb data
install.packages("devtools")
devtools::install_github("marlonecobos/rangemap")
library(rangemap)
unimagedspecimens <- as.data.frame(allspecimens)
View(summary(unimagedspecimens$antwebTaxonName))
unimagedset <- dplyr::filter(unimagedspecimens, grepl('formicinaecamponotus reaumuri', antwebTaxonName))
unimagedset <- cbind(as.character(unimagedset$genus), as.numeric(as.character(unimagedset$decimalLongitude)), as.numeric(as.character(unimagedset$decimalLatitude)))
unimagedset <- unimagedset[complete.cases(unimagedset),]
colnames(unimagedset) <- c('Species','Longitude', 'Latitude')
unimagedset <- as.data.frame(unimagedset)
unimagedset$Longitude <- as.numeric(as.character(unimagedset$Longitude))
unimagedset$Latitude <- as.numeric(as.character(unimagedset$Latitude))
#View occurrences on map
rangemap_explore(occurrences = unimagedset)
#Make buffered rangemap
buff_range <- rangemap_buff(occurrences = unimagedset, buffer_distance = 75000,
save_shp = FALSE, name = 'test')
rangemap_fig(buff_range, zoom = 1)
#Make non-disjunct hull rangemap
hull_range <- rangemap_hull(occurrences = unimagedset, hull_type = 'concave', buffer_distance = 10000,
split = FALSE, save_shp = FALSE, name = 'test1')
hull_range2 <- rangemap_hull(occurrences = unimagedset, hull_type = 'convex', buffer_distance = 10000,
split = TRUE, cluster_method = 'hierarchical', split_distance = 500000, save_shp = FALSE, name = 'test1')
hull_range3 <- rangemap_hull(occurrences = unimagedset, hull_type = 'convex', buffer_distance = 10000,
split = TRUE, cluster_method = 'k-means', n_k_means = 5, save_shp = FALSE, name = 'test1')
rangemap_fig(hull_range2, zoom = 1, add_occurrences = TRUE)
#generate from imaged species of Camponotus and save as shape files to /distributiondata
allspecimens <- as.data.frame(allspecimens)
df <- filter(allspecimens, genus == "Camponotus", bioregion == "Malagasy")
df <- group_by(df, antwebTaxonName)
df <- summarize(df, n())
colnames(df) <- c('taxon', 'n')
threshedtaxa <- filter(df, n >= 5)
filter(allspecimens, antwebTaxonName %in% df$taxon)
num <- function(vector)
{
return(as.numeric(as.character(vector)))
}
root <- getwd()
setwd('Data/distributionSHPs')
test <- threshedtaxa[1:10,]
for(taxon in test$taxon) #nrow(threshedtaxa)threshedtaxa$taxon
{
specimens <- filter(allspecimens, antwebTaxonName == taxon)
specimens <- cbind(as.character(specimens$antwebTaxonName), num(specimens$decimalLongitude), num(specimens$decimalLatitude))
specimens <- specimens[complete.cases(specimens),]
colnames(specimens) <- c('Species','Longitude', 'Latitude')
specimens <- as.data.frame(specimens)
specimens$Longitude <- num(specimens$Longitude)
specimens$Latitude <- num(specimens$Latitude)
rangemap_hull(occurrences = specimens, hull_type = 'convex', buffer_distance = 10000,
split = TRUE, cluster_method = 'hierarchical', split_distance = 500000, save_shp = TRUE, name = taxon)
}
View(df)
source("Helpers/oldSubset.R")
colorspecimensCladeMadagtaxa <- prepareSubsetDataframe(df= colorspecimensCladeMadag,subsetBy= 'antwebTaxonName', threshold = 5)
|
5244019b1e819cf5ea0173f582b413e8ad74b25a | 6700a5a2525b1d5eeaa47913f2d859c386ed2f81 | /tests/testthat/test-plot.R | 7e8e0f32922562647551c5e85a0509d66ba21149 | [] | no_license | amanirad/DMCfun | fbba2d0183b0d1c94e0e60bc6e11f0562a1baf29 | e717cda849df4234df56837b92a64be1ebe19aec | refs/heads/master | 2023-07-30T14:35:52.483186 | 2021-09-20T10:36:37 | 2021-09-20T10:36:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,311 | r | test-plot.R | context("plot")
test_that("plot.dmcsim", {
# test 1
dmc <- dmcSim(fullData = TRUE, printInputArgs = FALSE, printResults = FALSE)
# should give error
testthat::expect_error(plot(dmc, figType = "xxx"))
# just check code does not error
testthat::expect_error(plot(dmc, figType = "summary1"), NA)
testthat::expect_error(plot(dmc, figType = "summary2"), NA)
testthat::expect_error(plot(dmc, figType = "summary3"), NA)
testthat::expect_error(plot(dmc, figType = "activation"), NA)
testthat::expect_error(plot(dmc, figType = "trials"), NA)
testthat::expect_error(plot(dmc, figType = "pdf"), NA)
testthat::expect_error(plot(dmc, figType = "cdf"), NA)
testthat::expect_error(plot(dmc, figType = "caf"), NA)
testthat::expect_error(plot(dmc, figType = "delta"), NA)
testthat::expect_error(plot(dmc, figType = "rtCorrect", errorBars = TRUE), NA)
testthat::expect_error(plot(dmc, figType = "rtErrors"), NA)
testthat::expect_error(plot(dmc, figType = "errorRate"), NA)
testthat::expect_error(plot(dmc, figType = "all"), NA)
# test 2
dmc <- dmcSim(fullData = FALSE, printInputArgs = FALSE, printResults = FALSE)
# should give error
testthat::expect_error(plot(dmc, figType = "activation"))
testthat::expect_error(plot(dmc, labels = c("a", "b", "c")))
# just check code does not error
testthat::expect_error(plot(dmc, figType = "summary1"), NA)
testthat::expect_error(plot(dmc, figType = "summary2"), NA)
testthat::expect_error(plot(dmc, figType = "cdf"), NA)
testthat::expect_error(plot(dmc, figType = "caf"), NA)
testthat::expect_error(plot(dmc, figType = "delta"), NA)
testthat::expect_error(plot(dmc, figType = "rtCorrect"), NA)
testthat::expect_error(plot(dmc, figType = "rtErrors"), NA)
testthat::expect_error(plot(dmc, figType = "errorRate"), NA)
testthat::expect_error(plot(dmc, figType = "all"), NA)
# test 3
params <- list(amp = seq(20, 30, 2))
dmc <- dmcSims(params)
plot(dmc, ncol = 2, col = c("red", "green"), legendPos = "topright")
})
test_that("plot.dmcob", {
# real datasets
testthat::expect_error(plot(DMCfun::flankerData), NA)
testthat::expect_error(plot(DMCfun::flankerData, errorBars = TRUE), NA)
testthat::expect_error(plot(DMCfun::flankerData, errorBars = TRUE, errorBarType = "sd"), NA)
testthat::expect_error(plot(DMCfun::flankerData, errorBars = TRUE, errorBarType = "xxx"))
testthat::expect_error(plot(DMCfun::flankerData, figType = "summary"), NA)
testthat::expect_error(plot(DMCfun::flankerData, figType = "rtCorrect"), NA)
testthat::expect_error(plot(DMCfun::flankerData, figType = "errorRate"), NA)
testthat::expect_error(plot(DMCfun::flankerData, figType = "rtErrors"), NA)
testthat::expect_error(plot(DMCfun::flankerData, figType = "cdf"), NA)
testthat::expect_error(plot(DMCfun::flankerData, figType = "caf"), NA)
testthat::expect_error(plot(DMCfun::flankerData, figType = "delta"), NA)
testthat::expect_error(plot(DMCfun::flankerData, figType = "all"), NA)
testthat::expect_error(plot(DMCfun::flankerData, figType = "xxx"))
testthat::expect_error(plot(DMCfun::flankerData, labels = c("a", "b", "c")))
testthat::expect_error(plot(DMCfun::flankerData, legend = "xxx"))
testthat::expect_error(plot(DMCfun::flankerData, subject = 1), NA)
testthat::expect_error(plot(DMCfun::flankerData, subject = 999))
testthat::expect_error(plot(DMCfun::flankerData, xlabs = FALSE, ylabs = FALSE, xaxts = FALSE, yaxts = FALSE), NA)
testthat::expect_error(plot(DMCfun::flankerData, legend = function() {}), NA)
testthat::expect_error(plot(DMCfun::flankerData, cafBinLabels = TRUE), NA)
testthat::expect_error(plot(DMCfun::simonData), NA)
testthat::expect_error(plot(DMCfun::simonData, errorBars = TRUE), NA)
testthat::expect_error(plot(DMCfun::simonData, errorBars = TRUE, errorBarType = "sd"), NA)
testthat::expect_error(plot(DMCfun::simonData, errorBars = TRUE, errorBarType = "xxx"))
testthat::expect_error(plot(DMCfun::simonData, figType = "summary"), NA)
testthat::expect_error(plot(DMCfun::simonData, figType = "rtCorrect"), NA)
testthat::expect_error(plot(DMCfun::simonData, figType = "errorRate"), NA)
testthat::expect_error(plot(DMCfun::simonData, figType = "rtErrors"), NA)
testthat::expect_error(plot(DMCfun::simonData, figType = "cdf"), NA)
testthat::expect_error(plot(DMCfun::simonData, figType = "caf"), NA)
testthat::expect_error(plot(DMCfun::simonData, figType = "delta"), NA)
testthat::expect_error(plot(DMCfun::simonData, figType = "all"), NA)
testthat::expect_error(plot(DMCfun::simonData, figType = "xxx"))
testthat::expect_error(plot(DMCfun::simonData, labels = c("a", "b", "c")))
testthat::expect_error(plot(DMCfun::simonData, legend = "xxx"))
testthat::expect_error(plot(DMCfun::simonData, subject = 1), NA)
testthat::expect_error(plot(DMCfun::simonData, subject = 999))
testthat::expect_error(plot(DMCfun::simonData, xlabs = FALSE, ylabs = FALSE, xaxts = FALSE, yaxts = FALSE), NA)
testthat::expect_error(plot(DMCfun::simonData, legend = function() {}), NA)
testthat::expect_error(plot(DMCfun::simonData, cafBinLabels = TRUE), NA)
# simulated datasets
dat <- createDF(nSubjects = 50, nTrl = 50,
design = list("Comp" = c("comp", "incomp")))
dat <- addDataDF(dat,
RT = list("Comp_comp" = c(420, 100, 80),
"Comp_incomp" = c(470, 100, 95)),
Error = list("Comp_comp" = c(5, 3, 2, 1, 2),
"Comp_incomp" = c(15, 8, 4, 2, 2)))
datOb <- dmcObservedData(dat)
testthat::expect_error(plot(datOb), NA)
# plot combined data
dat <- dmcCombineObservedData(DMCfun::flankerData, DMCfun::simonData) # combine flanker/simon data
testthat::expect_error(plot(dat, figType = "all", cols = c("black", "darkgrey"),
pchs = c(1, 2), resetPar = FALSE), NA)
testthat::expect_error(plot(dat, subject = 1, figType = "all", cols = c("black", "darkgrey"),
pchs = c(1, 2), resetPar = FALSE), NA)
})
test_that("plot.dmcfit", {
# just check code does not error
# test 1
resTh <- dmcFit(DMCfun::flankerData, nTrl = 1000,
printInputArgs = FALSE, printResults = FALSE)
testthat::expect_error(plot(resTh, DMCfun::flankerData), NA)
# test 2
resTh <- dmcFit(DMCfun::simonData, nTrl = 1000,
printInputArgs = FALSE, printResults = FALSE)
testthat::expect_error(plot(resTh, DMCfun::simonData), NA)
# test 3
resTh <- dmcFitSubject(DMCfun::flankerData, nTrl = 1000, subjects = 10,
printInputArgs = FALSE, printResults = FALSE)
testthat::expect_error(plot(resTh, DMCfun::flankerData, subject = 10), NA)
# test 4
dat <- createDF(nSubjects = 50, nTrl = 50,
design = list("Comp" = c("comp", "incomp")))
dat <- addDataDF(dat,
RT = list("Comp_comp" = c(500, 150, 100),
"Comp_incomp" = c(530, 150, 150)),
Error = list("Comp_comp" = c(5, 3, 2, 1, 1),
"Comp_incomp" = c(15, 12, 5, 2, 1)))
datOb <- dmcObservedData(dat)
resTh <- dmcFit(datOb, nTrl = 1000, printInputArgs = FALSE, printResults = FALSE)
testthat::expect_error(plot(resTh, datOb), NA)
})
|
71e607857e4a146140a969aa16e8db62dbaf10af | 40b8c18de5170436038973ed1478e9817a4270f8 | /scripts/04_predict_MICs_all_data.R | ce39649f8ec2d756e4c60d76fee5317aa0c0d161 | [] | no_license | desval/ResazurinMIC | a97eb7e12a8407e8af27cfb7de77d3fc60c269fe | 19b6aac10a24280eec77139c36e7192e280468c5 | refs/heads/master | 2021-09-15T16:56:18.775159 | 2017-07-10T10:46:04 | 2017-07-10T10:46:04 | 79,265,781 | 0 | 0 | null | 2017-01-17T19:54:13 | 2017-01-17T19:54:13 | null | UTF-8 | R | false | false | 5,318 | r | 04_predict_MICs_all_data.R |
# Description
## ----------------------------------------------------------------------------------------------------------------------------
# In this file we want to validate the method by predicting the E-Test starting from the MIC and its sd from the dose-response
# model. We do this by bootstrap, because we need to take in account the sd of MIC from the dose-response.
# For each antibiotic we use the linear regression parameters variance~covariance matrix to sample new estimates of the lm model.
# Similarly, we sample new estimates for the MIC. We combine the two and get confidence intervals for the predicted E-Test
# Linear regression parameters are the same for each antibiotic.
# Load dependencies and set options
## ----------------------------------------------------------------------------------------------------------------------------
rm(list=ls())
packages <- c("plyr", "mvtnorm")
to.install <- setdiff(packages, rownames(installed.packages()))
if (length(to.install) > 0) {
install.packages(to.install)
}
lapply(packages, library, character.only = TRUE)
options(scipen = 4)
# Load data
## ----------------------------------------------------------------------------------------------------------------------------
df <- read.csv("output/tables/alldata.csv",stringsAsFactors = F)
parameters <- readRDS("output/tables/lm_parameters_variance_covariance_matrix_list.rds")
# count data just to make sure everything is alright
su1 <- ddply(df, ~antibiotic + strain, summarise, count=length(antibiotic))
su2 <- ddply(su1, ~antibiotic , summarise, count=length(antibiotic))
# we make compute the CI by resampling both the
# estimates of the MIC and the estimates of the linear regression.
# Note that we have the same regression parameters for all observations.
## ----------------------------------------------------------------------------------------------------------------------------
size <- 1e5 # size of the random samples for bootstrapping
set.seed(1234)
# First we get the antibiotic specific parameters (parameters are no antibiotic specific anymore, as there is no max correlation anymore)
coefficients <- coef(parameters[[1]]$Estimates)
intercept <- coefficients[[1]]
slope <- coefficients[[2]]
variance.covariance <- parameters[[1]]$Matrix[[1]]
# check if everything is ok: Estimate and its sd cannot be NA!!
if(any(is.na(df$Estimate[df$quality=="quality ok"]))==T |
any(is.na(df$EstimateStderror[df$quality=="quality ok"])==T)
)stop("There are NAs in either the Estimate or sd. These obs cannot be included in the loop")
if(any(duplicated(df$ID))==T)stop("ID is not unique!!")
# Create the vars where we will save the confidence intervals
df$Etest_predicted_log <- NA
df$Etest_predicted <- NA
df$CI_low <- NA
df$CI_up <- NA
for(ID in unique(df$ID[df$quality=="quality ok"])){ ## we only do the boostrap for the quality ok
## debug:
# ID <- "27_Azithromycin_80strains12.txt"
# ID <- "65_Penicillin_80strains8.txt"
# ID <- "22_Gentamicin_80strains3.txt" sd=NA
# ID <- "WHO_L_Gentamicin_4.12.15.exp3.txt" sd=NA
# ID <- "60_Gentamicin_80strains8.txt" sd=NA
MIC.estimated <- df$Estimate[df$ID==ID]
MIC.estimated.sd <- df$EstimateStderror[df$ID==ID]
# get predicted value
ETest_predicted_log <- intercept + slope * (MIC.estimated)
ETest_predicted <- exp(ETest_predicted_log)
# now we want the confidence interval:
# sample the EC50
MIC.sample_log <- rnorm(size, MIC.estimated, MIC.estimated.sd)
# sample the coefficients
linear_coeff.sample <- rmvnorm(size, mean = coefficients, sigma = variance.covariance )
ETest_sample_log <- linear_coeff.sample[,1] + linear_coeff.sample[,2] * MIC.sample_log
ETest_sample <- exp(ETest_sample_log)
#hist(ETest_sample)
# now we can compute the confidence intervals via cut-off
ETest_sample_CI <- quantile(ETest_sample, probs = c(0.025, 0.975),na.rm=T)
# save the results
df$Etest_predicted_log[df$ID==ID] <- ETest_predicted_log
df$Etest_predicted[df$ID==ID] <- ETest_predicted
df$CI_low[df$ID==ID] <- ETest_sample_CI[1]
df$CI_up[df$ID==ID] <- ETest_sample_CI[2]
}
# fix double observations problem: should do this before putting data in the regression
## ----------------------------------------------------------------------------------------------------------------------------
df$quality[grepl("<", df$MIC)] <- "Etest below limit of detection"
df$quality[grepl(">", df$MIC)] <- "Etest above limit of detection"
# fill in values for above limit of detection
## ----------------------------------------------------------------------------------------------------------------------------
df$Etest_predicted[grepl("limit of detection",
df$quality)] <- exp(df$Estimate[grepl("limit of detection", df$quality)])
df$Etest_predicted_log[grepl("limit of detection",
df$quality)] <- log(df$Etest_predicted[grepl("limit of detection", df$quality)])
# save
## ----------------------------------------------------------------------------------------------------------------------------
write.csv(df,file="output/tables/Predicted_Etest_all_data.csv", row.names = F)
# file end
## -----------------------------------------------------------------------------------------------------------
|
6be0dd53747b8d98f49d18fa78be82a68e830df7 | 1152b0d52b9244257977f3b97b8c503bc9a6508b | /tests/testthat/helper.R | 45e460f236e9b29b8b2523007b81e702128ba6a1 | [] | no_license | ruaridhw/r-AMPL | 48c5310b7a693db573b4890ca09ae5514fa3d6e5 | 508431845e84c9f0327caba600647bc521e1901e | refs/heads/master | 2021-03-19T18:50:14.669663 | 2018-03-07T12:24:23 | 2018-03-07T12:24:23 | 118,458,045 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 87 | r | helper.R | library(magrittr)
library(reticulate)
reticulate::use_python("~/anaconda3/bin/python")
|
aa88b9cd072b684321ef8f804d5630806b01c8b6 | e7ed2ebcd4f42e4b4eac115ef15fdc6d3a191b5d | /app/dataProcessor.R | 3025b5f99c8dd0b9d3b386e36e7e056144262e78 | [] | no_license | HonHo/DataProducts | f703fd572edb11e9ce6f7025986c94b66f472f4c | c1bb9cce5c682a04048800fba5f794b05a2bfe51 | refs/heads/master | 2021-01-10T17:52:29.513790 | 2016-02-28T03:38:18 | 2016-02-28T03:38:18 | 52,702,471 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,205 | r | dataProcessor.R | library(lattice)
library(ggplot2)
library(caret)
#library(rattle) # Not supported by shinyapp.io!
library(e1071)
library(randomForest)
library(MASS)
library(survival)
library(splines)
library(parallel)
library(gbm)
library(plyr)
library(rpart)
library(reshape2)
accuDataFile <- "./data/accuracies.data"
accuData <- read.table(accuDataFile, header = TRUE, sep = ",")
accuDataMelt <- melt(accuData, id = "percent")
names(accuDataMelt)[2:3] <- c("model", "accuracy")
cNames <- c("buying", "maint", "doors", "persons", "lug_boot", "safety", "classe")
carsDataFile <- "./data/car.data"
carsData <- read.table(carsDataFile, header = FALSE, sep = ",", col.names = cNames)
carsData$buying <- ordered(carsData$buying, levels = c("low", "med", "high", "vhigh"))
carsData$maint <- ordered(carsData$maint, levels = c("low", "med", "high", "vhigh"))
carsData$doors <- ordered(carsData$doors, levels = c("2", "3", "4", "5more"))
carsData$persons <- ordered(carsData$persons, levels = c("2", "4", "more"))
carsData$lug_boot <- ordered(carsData$lug_boot, levels = c("small", "med", "big"))
carsData$safety <- ordered(carsData$safety, levels = c("low", "med", "high"))
# Get cars data
getCarsData <- function() {
carsData
}
# Get accuracy data
getAccuData <- function() {
accuData
}
# Get melted accuracy data
getAccuDataMelt <- function() {
accuDataMelt
}
getIdxTrain <- function(percent) {
idxTrain <- createDataPartition(y = carsData$classe, p = percent/100, list = FALSE)
idxTrain
}
getTrainData <- function(idxTrain) {
trainData <- carsData[idxTrain,]
trainData
}
getTestData <- function(idxTrain) {
testData <- carsData[-idxTrain,]
testData
}
# Plot charts of percents of training data used vs. accuracies
getAccuracyPlot <- function(inPercent) {
g <- ggplot(accuDataMelt, aes(percent, accuracy, group = model, colour = model)) +
geom_line(size = 1) + geom_point(size = 3.5) + theme_bw() +
geom_vline(aes(xintercept = inPercent), color = "blue", size = 0.6) +
labs(title = "Training Data Size vs. Accuracy") +
labs(x = "Training Data Size in Percentage of Sample Data Set") +
labs(y = "Accuracy in Percentage")
g
}
|
46a5cffff7dba8171a01d54b5d1bf3d6c85db46f | 387f07b6ecb1f1218fc3b78889703bb4bab2f4f5 | /RUNME.R | 4242541c46989bd196310e5b4311243f69d654b4 | [
"MIT"
] | permissive | JouniVatanen/stools | 94dee1eb23e1883b782220e8dd1f749dcba895fa | e0035fd5aa26dcb06fc7a0b1c4088e7f71f489d0 | refs/heads/master | 2023-01-28T21:39:41.085342 | 2023-01-11T07:31:22 | 2023-01-11T07:31:22 | 142,003,341 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,221 | r | RUNME.R | # What you need to do, for install() to work
# 1. Install Rtools 4.0 and R 4.0.3
# 2. Set ...\Rtools\usr\bin to your PATH e.g. with Powershell
# https://stackoverflow.com/questions/714877/setting-windows-powershell-environment-variables
# 3. Set ...\Rtools\mingw`$(WIN)\bin to your BINPREF e.g. with Powershell
# 4. Check RTOOLS40_HOME points to Rtools ...\Rtools directory at ..\R\etc\x64\Makeconf
# Make sure required checkpoint and devtools are installed
if(tryCatch(
packageVersion("checkpoint") <= '1.0.0' | packageVersion("devtools") <= "2.0.0",
error = function(e) T)) { install.packages(c("checkpoint", "devtools"))}
# Checkpoint installs packages
checkpoint::create_checkpoint(
"2022-02-13", checkpoint_location = Sys.getenv("USERPROFILE"),
project_dir = "./R")
checkpoint::use_checkpoint(
"2022-02-13", checkpoint_location = Sys.getenv("USERPROFILE"))
# Document and install package
devtools::document()
devtools::install(upgrade = FALSE)
# Commit changes and push the files to the github
# 1. Commit changes shell "git add .;git commit -m 'comment'" OR Rstudio UI
# 2. Push changes either shell "git push" OR Rstudio UI
# 3. Install from github with devtools::install_github(JouniVatanen/stools)
|
5b3ea04183ac67768d31045cc6a394a40184361b | 43123af59b384a0adb1d2f1440e1c33dce4d5449 | /examples/jobs/test_aggregate_gr.R | bbf8aac22d530351f5138a00f9a4d6916bd3cbff | [] | no_license | timemod/regts | 8723e6b2f2e4f088564c648fbdecf1a74ade7203 | 754490eb024354d37774e929bf640f1af58ccb5a | refs/heads/master | 2023-03-06T16:45:31.978567 | 2023-03-03T13:36:53 | 2023-03-03T13:36:53 | 107,998,574 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 92 | r | test_aggregate_gr.R | library(regts)
ts <- regts(matrix(1:10, ncol = 2), start = "2010Q3")
ts
aggregate_gr(ts)
|
a8512878e2cc7cc38bca5a437355ef86aeb9b74b | 3ad602aee1c2c0efea2a34a38511322c29b65f45 | /man/summarize_enrichment_results.Rd | d9cc7a247890b343aac19408655ca46aa535ae98 | [
"MIT"
] | permissive | AntonioGPS/pathfindR | f773480d490c76954692b11e5758e1a67360a35d | 3ab885e04ae798e82ddbf2bac7a5169b447ebcfe | refs/heads/master | 2020-08-01T02:13:04.450534 | 2019-09-25T09:50:43 | 2019-09-25T09:50:43 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,774 | rd | summarize_enrichment_results.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/enrichment_functions.R
\name{summarize_enrichment_results}
\alias{summarize_enrichment_results}
\title{Summarize Enrichment Results}
\usage{
summarize_enrichment_results(enrichment_res,
list_active_snw_genes = FALSE)
}
\arguments{
\item{enrichment_res}{a dataframe of combined enrichment results. Columns are: \describe{
\item{ID}{KEGG ID of the enriched pathway}
\item{Pathway}{Description of the enriched pathway}
\item{Fold_Enrichment}{Fold enrichment value for the enriched pathway}
\item{p_value}{p value of enrichment}
\item{adj_p}{adjusted p value of enrichment}
\item{non_DEG_Active_Snw_Genes (OPTIONAL)}{the non-DEG active subnetwork genes, comma-separated}
}}
\item{list_active_snw_genes}{boolean value indicating whether or not to report
the non-DEG active subnetwork genes for the active subnetwork which was enriched for
the given pathway with the lowest p value (default = FALSE)}
}
\value{
a dataframe of summarized enrichment results (over multiple iterations). Columns are: \describe{
\item{ID}{KEGG ID of the enriched pathway}
\item{Pathway}{Description of the enriched pathway}
\item{Fold_Enrichment}{Fold enrichment value for the enriched pathway}
\item{occurrence}{the number of iterations that the given pathway was found to enriched over all iterations}
\item{lowest_p}{the lowest adjusted-p value of the given pathway over all iterations}
\item{highest_p}{the highest adjusted-p value of the given pathway over all iterations}
\item{non_DEG_Active_Snw_Genes (OPTIONAL)}{the non-DEG active subnetwork genes, comma-separated}
}
}
\description{
Summarize Enrichment Results
}
\examples{
\dontrun{
summarize_enrichment_results(enrichment_res)
}
}
|
3f05a27f0d4711e1789d53126bc340e844f1a082 | 09b60cc0cf3fdd7b90e957d9e4c1681d0fd639e2 | /R/head.R | 67819626cea02c92b28936853dc7a8914f9996f1 | [] | no_license | mobilizingcs/mobilizr | 2eaf7642e7f31ed023f6ffbb508db4622cf1a57a | e9b50361ba6b0eec32876e4f595fd18c13f86736 | refs/heads/master | 2023-08-08T01:39:54.796438 | 2023-07-23T23:39:55 | 2023-07-23T23:39:55 | 35,117,092 | 9 | 13 | null | 2022-08-15T19:59:29 | 2015-05-05T18:34:13 | R | UTF-8 | R | false | false | 440 | r | head.R | #' Print first six observations.
#'
#' \code{head} will print the first six values of an object.
#'
#' @param x Formula or other object to print.
#' @param data Data frame. The data where the variables can be found.
#' @examples
#' head(~hp, data = mtcars)
#' tail(~hp, data = mtcars)
#'
#' @importFrom mosaic aggregatingFunction1
#' @export
# Allow user to use formula syntax with head function.
head <- aggregatingFunction1(utils::head)
|
58aa1535429e8213d6d765552c3368767900a5ea | 009d8646c21cb4687765e653a21a34b67e28bf72 | /Airpollution.R | 39b2beeb643336192d91bf38a129d2fb08d7a207 | [] | no_license | rutujak24/AirPollution | 3be86c9eb22018a29b7b22b7c73b076cbc9d1c02 | 787aa06267e0d717c03fc1396e5266f2b670c17f | refs/heads/master | 2022-05-30T03:24:32.637207 | 2020-05-04T04:31:12 | 2020-05-04T04:31:12 | 261,074,428 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,450 | r | Airpollution.R |
R version 3.6.3 (2020-02-29) -- "Holding the Windsock"
Copyright (C) 2020 The R Foundation for Statistical Computing
Platform: x86_64-w64-mingw32/x64 (64-bit)
R is free software and comes with ABSOLUTELY NO WARRANTY.
You are welcome to redistribute it under certain conditions.
Type 'license()' or 'licence()' for distribution details.
Natural language support but running in an English locale
R is a collaborative project with many contributors.
Type 'contributors()' for more information and
'citation()' on how to cite R or R packages in publications.
Type 'demo()' for some demos, 'help()' for on-line help, or
'help.start()' for an HTML browser interface to help.
Type 'q()' to quit R.
[Previously saved workspace restored]
> pollutantmean <- function(directory, pollutant, id ) {
+ files_full <- list.files(directory, full.names = TRUE)
+
+ dat <- data.frame()
+
+ for (i in id) {
+ dat <- rbind(dat, read.csv(files_full[i]))
+ }
+
+ mean(dat[, pollutant], na.rm = TRUE)
+ }
> pollutantmean <- function(directory, pollutant, id ) {
+ files_full <- list.files(directory, full.names = TRUE)
+
+ dat <- data.frame()
+
+ for (i in id) {
+ dat <- rbind(dat, read.csv(files_full[i]))
+ }
+
+ mean(dat[, pollutant], na.rm = TRUE)
+ }
> pollutantmean("specdata", "sulfate", 1:10)
[1] 4.064128
> pollutantmean("specdata", "nitrate", 70:72)
[1] 1.706047
> pollutantmean("specdata", "sulfate", 34)
[1] 1.477143
> pollutantmean("specdata", "nitrate")
Error in pollutantmean("specdata", "nitrate") :
argument "id" is missing, with no default
> pollutantmean("specdata", "nitrate", 1:332)
[1] 1.702932
> complete <- function(directory, id = 1:332) {
+ files_full <- list.files(directory, full.names = TRUE)
+ dat <- data.frame()
+
+ for (i in id) {
+ moni_i <- read.csv(files_full[i])
+ nobs <- sum(complete.cases(moni_i))
+ tmp <- data.frame(i, nobs)
+ dat <- rbind(dat, tmp)
+ }
+
+ colnames(dat) <- c("id", "nobs")
+ dat
+ }
> cc <- complete("specdata", c(6, 10, 20, 34, 100, 200, 310))
> print(cc$nobs)
[1] 228 148 124 165 104 460 232
> cc <- complete("specdata", 54)
> print(cc$nobs)
[1] 219
> RNGversion("3.5.1")
Warning message:
In RNGkind("Mersenne-Twister", "Inversion", "Rounding") :
non-uniform 'Rounding' sampler used
> set.seed(42)
> cc <- complete("specdata", 332:1)
> use <- sample(332, 10)
> print(cc[use, "nobs"])
[1] 711 135 74 445 178 73 49 0 687 237
> corr <- function(directory, threshold = 0) {
+ files_full <- list.files(directory, full.names = TRUE)
+ dat <- vector(mode = "numeric", length = 0)
+
+ for (i in 1:length(files_full)) {
+ moni_i <- read.csv(files_full[i])
+ csum <- sum((!is.na(moni_i$sulfate)) & (!is.na(moni_i$nitrate)))
+ if (csum > threshold) {
+ tmp <- moni_i[which(!is.na(moni_i$sulfate)), ]
+ submoni_i <- tmp[which(!is.na(tmp$nitrate)), ]
+ dat <- c(dat, cor(submoni_i$sulfate, submoni_i$nitrate))
+ }
+ }
+
+ dat
+ }
> cr <- corr("specdata")
> cr <- sort(cr)
> RNGversion("3.5.1")
Warning message:
In RNGkind("Mersenne-Twister", "Inversion", "Rounding") :
non-uniform 'Rounding' sampler used
> set.seed(868)
> out <- round(cr[sample(length(cr), 5)], 4)
> print(out)
[1] 0.2688 0.1127 -0.0085 0.4586 0.0447
> cr <- corr("specdata", 129)
> cr <- sort(cr)
> n <- length(cr)
> RNGversion("3.5.1")
Warning message:
In RNGkind("Mersenne-Twister", "Inversion", "Rounding") :
non-uniform 'Rounding' sampler used
> set.seed(197)
> out <- c(n, round(cr[sample(n, 5)], 4))
> print(out)
[1] 243.0000 0.2540 0.0504 -0.1462 -0.1680 0.5969
> cr <- corr("specdata", 2000)
> n <- length(cr)
> cr <- corr("specdata", 1000)
> cr <- sort(cr)
> print(c(n, round(cr, 4)))
[1] 0.0000 -0.0190 0.0419 0.1901
> save.image("C:\\Users\\Rutuja\\Documents\\R\\Rprograms\\Airpollution.R")
> save.image("C:\\Users\\Rutuja\\Documents\\R\\Rprograms\\Airpollution")
>
|
977f10fe092f783e74bc2e57171fb925b2d2a390 | 384c3dbc571be91c6f743d1427dec00f13e0d8ae | /r/kernels/zhangyan0623-titanic-analysis/script/titanic-analysis.r | 22ad1380c0f266d4c0f8bf34ddf819e1d75bb51f | [] | no_license | helenaK/trustworthy-titanic | b9acdd8ca94f2fa3f7eb965596eed4a62821b21e | ade0e487820cf38974561da2403ebe0da9de8bc6 | refs/heads/master | 2022-12-09T20:56:30.700809 | 2020-09-10T14:22:24 | 2020-09-10T14:22:24 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,410 | r | titanic-analysis.r |
# This R environment comes with all of CRAN preinstalled, as well as many other helpful packages
# The environment is defined by the kaggle/rstats docker image: https://github.com/kaggle/docker-rstats
# For example, here's several helpful packages to load in
library(ggplot2) # Data visualization
library(readr) # CSV file I/O, e.g. the read_csv function
# Input data files are available in the "../input/" directory.
# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory
system("ls ../input")
# Any results you write to the current directory are saved as output.
library(tidyverse)
library(rpart)
library(rpart.plot)
library(caret)
library(ggplot2)
library(Hmisc)
#import dataset
train<-read_csv("../input/train.csv")
test<-read_csv("../input/test.csv")
#How dose Embarked impact on the survival or pessengers
ggplot(train, aes(x = Embarked, fill = factor(Survived))) +
geom_bar(stat='count', position='dodge') +
labs(x = 'Embarked')
#How different the Pclass impact on survial of male & female
ggplot(train,aes(x=Sex,fill=factor(Survived)))+
geom_bar(position='dodge')+
facet_grid(.~Pclass)+
labs(title = "How Different Pclass impact the survival of male&female passengers",x = "Pclass",y = "Count")
#Test how dose the family size impact the survival of pessengers
train$FamilySize<-train$SibSp+train$Parch
ggplot(train, aes(x = FamilySize, fill = factor(Survived))) +
geom_bar(stat='count', position='dodge') +
scale_x_continuous(breaks=c(1:11)) +
labs(x = 'Family Size')
train$Child[train$Age < 16] <- 'Child'
train$Child[train$Age >= 16] <- 'Adult'
table(train$Child,train$Survived)
#Deal with the missing values
ggplot(train, aes(x=Embarked,y=Fare))+geom_boxplot(aes(fill=factor(Pclass)))
train$Embarked[is.na(train$Embarked)]<-'C'
test[is.na(test$Fare),]
test1<-test[c(test$Embarked=='S'),]
test2<-test1[c(test1$Pclass==3),]
test3<-test2[complete.cases(test2$Fare),]
test$Fare[is.na(test$Fare)]<-mean(test3$Fare)
#feature engineering
# create title from passenger names
full<-bind_rows(train,test)
full$Child[full$Age < 16] <- 'Child'
full$Child[full$Age >= 16] <- 'Adult'
full$FamilySize<-full$SibSp+full$Parch
full$FsizeD[full$FamilySize == 0] <- 'singleton'
full$FsizeD[full$FamilySize< 4 & full$FamilySize > 0] <- 'small'
full$FsizeD[full$FamilySize >=4 ] <- 'large'
full$Title <- gsub('(.*, )|(\\..*)', '', full$Name)
rare_title <- c('Dona', 'Lady', 'the Countess','Capt', 'Col', 'Don',
'Dr', 'Major', 'Rev', 'Sir', 'Jonkheer')
full$Title[full$Title == 'Mlle'] <- 'Miss'
full$Title[full$Title == 'Ms'] <- 'Miss'
full$Title[full$Title == 'Mme'] <- 'Mrs'
full$Title[full$Title %in% rare_title] <- 'Rare Title'
table(full$Sex, full$Title)
#Different combinations of feature
full$Sex <- as.factor(full$Sex)
full$Pclass <- as.factor(full$Pclass)
full$Title<-as.factor(full$Title)
full$Embarked<-as.factor(full$Embarked)
full$FsizeD<-as.factor(full$FsizeD)
train <- full[1:891,]
test <- full[892:1309,]
#Bulid our Modeling
fol <- formula(Survived ~Title+ Fare+ Pclass+Age)
model <- rpart(fol, method="class", data=train)
#Identify the change of the tree
rpart.plot(model,branch=0,branch.type=2,type=1,extra=102,shadow.col="pink",box.col="gray",split.col="magenta",
main="Decision tree for model")
rpred <- predict(model, newdata=test, type="class")
|
ad99a750cff662485fb3478d4b7a06a66c2953bc | d796eddc2063b019a0972009ac50dcc2388a1d4a | /R/Order_of_Predictors.R | ac213ffacd4841f0c02d8df64fa96d62a0758890 | [] | no_license | jbryer/DATA606Spring2021 | 6139f786bc27c847621ca2b9bcae5347877b8a87 | 94a5c58d481484cd3440c5e3a40beea17291fd7f | refs/heads/master | 2023-06-08T14:06:32.861816 | 2021-07-01T17:23:24 | 2021-07-01T17:23:24 | 312,899,219 | 2 | 7 | null | null | null | null | UTF-8 | R | false | false | 454 | r | Order_of_Predictors.R | lm1 <- lm(poverty ~ female_house + white, data=poverty)
summary(lm1)
anova(lm1)
132.57 + 8.21
lm2 <- lm(poverty ~ white + female_house, data=poverty)
summary(lm2)
anova(lm2)
45.71 + 95.06
lm3 <- lm(poverty ~ female_house, data = poverty)
summary(lm3)
lm4 <- lm(poverty ~ white, data = poverty)
summary(lm4)
lm5 <- lm(poverty ~ white * female_house, data = poverty)
summary(lm5)
lm5 <- lm(poverty ~ white : female_house, data = poverty)
summary(lm5)
|
4736f964d6a2ba29cc62d70e1761fd1d5b4b8c06 | 13adac8ae3ee91265aa08327cb8e9d4a0bd877d0 | /results/rt_graphs.R | a68f23ffca1a783f9fa58d4c8c16bdfb60280eb0 | [] | no_license | mmcauliffe/mcauliffe-phd | 6b29be70bedf4cdd21741983bdc796d255989c3f | 99f0ecfdcf102826bf44e07a9e47aa2289b0a04d | refs/heads/master | 2020-05-20T06:01:12.507561 | 2015-11-25T01:44:14 | 2015-11-25T01:44:14 | 20,273,235 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,403 | r | rt_graphs.R |
### EXPERIMENT 1 RESULTS
### EXPOSURE
for.plot <- subset(expose.word, Experiment == 'exp2')
for.plot$TrialCat <- "1-50"
for.plot[for.plot$Trial < 101 & for.plot$Trial > 50,]$TrialCat <- "51-100"
for.plot[for.plot$Trial < 151 & for.plot$Trial > 100,]$TrialCat <- "101-150"
for.plot[for.plot$Trial > 150,]$TrialCat <- "151-200"
for.plot$TrialCat <- factor(for.plot$TrialCat, levels = c("1-50", "51-100", "101-150", "151-200"), ordered = T)
# RT
plotData <- summarySEwithin(data = for.plot, measurevar = 'RT', betweenvars = c('itemtype2'), withinvars = c('TrialCat'), idvar = 'Subject')
exp1.expose.rt <- ggplot(plotData, aes(x = TrialCat,y = RT, colour = itemtype2, shape = itemtype2, group = itemtype2))
exp1.expose.rt <- exp1.expose.rt + geom_point(size = 1.7) + geom_line() + geom_errorbar(aes(ymin = RT - ci, ymax = RT + ci), linetype = 'solid', size = 0.1)
exp1.expose.rt <- exp1.expose.rt + ylab('Reaction time (ms)') + xlab('Exposure trial block')
exp1.expose.rt <- exp1.expose.rt + theme_bw()
exp1.expose.rt <- exp1.expose.rt + theme(text = element_text(size = 10),
legend.title = element_text(size = 8),
legend.text = element_text(size = 8))
exp1.expose.rt <- exp1.expose.rt +scale_shape_manual(name='Trial Type',
values = c(21, 22, 23),
labels = c('Filler', '/s/', '/ʃ/'))
exp1.expose.rt <- exp1.expose.rt + scale_colour_manual(name='Trial Type',
values = c('#000000', "#0072B2", "#D55E00"),
labels = c('Filler', '/s/', '/ʃ/'))
cairo_pdf('../thesis/graphs/exp1_exprt.pdf', width = 6.69, height = 3.15)
exp1.expose.rt
dev.off()
### CATEGORIZATION
plotData <- summarySEwithin(data = subset(categ, Experiment == 'exp2'), measurevar = 'RT', betweenvars = c('Attention', 'ExposureType'), withinvars = c('Step'), idvar = 'Subject')
contPlotData <- summarySEwithin(data = cont, measurevar = 'RT', withinvars = c('Step'), idvar = 'Subject')
contPlotData <- rbind(contPlotData,contPlotData)
contPlotData <- cbind(contPlotData,data.frame(Attention = rep('control', 12),
ExposureType = c(rep('final', 6), rep('initial', 6))))
#contPlotData$Experiment <- 'control'
plotData <- rbind(plotData,contPlotData)
plotData$Step <- as.numeric(as.character(plotData$Step)) + 3.5
### MAIN PLOT
exp1.rt.results <- ggplot(plotData, aes(x = Step,y = RT, colour = Attention, shape = Attention, group = Attention))
exp1.rt.results <- exp1.rt.results + geom_point(size = 1.7) + geom_line() + geom_errorbar(aes(ymin = RT - ci, ymax = RT + ci), linetype = 'solid', size = 0.1)
exp1.rt.results <- exp1.rt.results + facet_grid(~ExposureType, labeller = if_labeller)
exp1.rt.results <- exp1.rt.results + ylab('Proportion /s/ response') + xlab('Continua step')
exp1.rt.results <- exp1.rt.results + theme_bw()
exp1.rt.results <- exp1.rt.results + theme(text = element_text(size = 10),
legend.title = element_text(size = 8),
legend.text = element_text(size = 8),
#legend.justification = c(0, 0),
#legend.position = c(0, 0),
legend.background = element_blank())
exp1.rt.results <- exp1.rt.results + scale_x_continuous(breaks = 1:6)
exp1.rt.results <- exp1.rt.results +scale_shape_manual(values = c(21, 22, 23),
labels = c('No attention', 'Attention', 'Control'))
exp1.rt.results <- exp1.rt.results +scale_colour_manual(values = c("#0072B2", "#D55E00", "#000000"),
labels = c('No attention', 'Attention', 'Control'))
exp1.rt.results
ggsave('../thesis/graphs/exp1_rt_categresults.pdf', width = 170, height = 80, units = 'mm', dpi = 600)
### EXPERIMENT 2
### EXPOSURE
for.plot <- subset(expose.word, Experiment == 'exp1')
for.plot$TrialCat <- "1-50"
for.plot[for.plot$Trial < 101 & for.plot$Trial > 50,]$TrialCat <- "51-100"
for.plot[for.plot$Trial < 151 & for.plot$Trial > 100,]$TrialCat <- "101-150"
for.plot[for.plot$Trial > 150,]$TrialCat <- "151-200"
for.plot$TrialCat <- factor(for.plot$TrialCat)
# RT
plotData <- summarySEwithin(data = for.plot, measurevar = 'RT', betweenvars = c('ExposureType'), withinvars = c('TrialCat', 'itemtype2'), idvar = 'Subject')
exp2.expose.rt <- ggplot(plotData,aes(x = TrialCat, y = RT, colour = ExposureType, shape = ExposureType, group = ExposureType))
exp2.expose.rt <- exp2.expose.rt + geom_point(size = 1.7) + geom_line() + geom_errorbar(aes(ymin = RT - ci, ymax = RT + ci), linetype = 'solid', size = 0.1)
exp2.expose.rt <- exp2.expose.rt + facet_grid(~itemtype2, labeller = if_labeller)
exp2.expose.rt <- exp2.expose.rt + ylab('Reaction time (ms)') + xlab('Exposure trial block')
exp2.expose.rt <- exp2.expose.rt + theme_bw()
exp2.expose.rt <- exp2.expose.rt + theme(text = element_text(size = 10),
legend.title = element_text(size = 8),
legend.text = element_text(size = 8),
legend.justification = c(0, 1),
legend.position = c(0, 1))
exp2.expose.rt <- exp2.expose.rt + scale_y_continuous(limits = c(900,1300))
exp2.expose.rt <- exp2.expose.rt + scale_shape_manual(name = 'Exposure Type',
values = c(21, 22),
labels = c('Word-initial', 'Word-final'))
exp2.expose.rt <- exp2.expose.rt +scale_colour_manual(name = 'Exposure Type',
values = c("#0072B2", "#D55E00"),
labels = c('Word-initial', 'Word-final'))
cairo_pdf('../thesis/graphs/exp2_exprt.pdf', width = 6.69, height = 3.15)
exp2.expose.rt
dev.off()
### CATEGORIZATION
plotData <- summarySEwithin(data = subset(categ, Experiment == 'exp1'), measurevar = 'RT', betweenvars = c('Attention', 'ExposureType'), withinvars = c('Step'), idvar = 'Subject')
contPlotData <- summarySEwithin(data = cont, measurevar = 'RT', withinvars = c('Step'), idvar = 'Subject')
contPlotData <- rbind(contPlotData,contPlotData)
contPlotData <- cbind(contPlotData,data.frame(Attention = rep('control', 12),
ExposureType = c(rep('final', 6), rep('initial', 6))))
#contPlotData$Experiment <- 'control'
plotData <- rbind(plotData,contPlotData)
#contPlotData$Step <- as.numeric(as.character(contPlotData$Step)) + 3.5
plotData$Step <- as.numeric(as.character(plotData$Step)) + 3.5
### MAIN PLOT
exp2.results <- ggplot(plotData, aes(x = Step, y = RT, colour = Attention, shape = Attention, group = Attention))
exp2.results <- exp2.results + geom_point(size = 1.7) + geom_line() + geom_errorbar(aes(ymin = RT - ci, ymax = RT + ci), linetype = 'solid', size = 0.1)
exp2.results <- exp2.results + facet_grid(~ExposureType, labeller = if_labeller)
exp2.results <- exp2.results + ylab('Proportion /s/ response') + xlab('Continua step')
exp2.results <- exp2.results + theme_bw()
exp2.results <- exp2.results + theme(text = element_text(size = 10),
legend.title = element_text(size = 8),
legend.text = element_text(size = 8),
#legend.justification = c(0, 0),
#legend.position = c(0, 0)
legend.background = element_blank())
exp2.results <- exp2.results + scale_x_continuous(breaks = 1:6)
exp2.results <- exp2.results +scale_shape_manual(values = c(21, 22, 23),
labels = c('No attention', 'Attention', 'Control'))
exp2.results <- exp2.results +scale_colour_manual(values = c("#0072B2", "#D55E00", "#000000"),
labels = c('No attention', 'Attention', 'Control'))
exp2.results
ggsave('../thesis/graphs/exp2_rt_categresults.pdf', width = 170, height = 80, units = 'mm', dpi = 600)
### EXPERIMENT 3
### EXPOSURE
for.plot <- expose3
for.plot$itemtype2 <- 'Filler'
for.plot[for.plot$Type == 'S-final',]$itemtype2 <- 'S'
for.plot[for.plot$Type == 'SH-final',]$itemtype2 <- 'SH'
for.plot$TrialCat <- "1-25"
for.plot[for.plot$Trial < 51 & for.plot$Trial > 25,]$TrialCat <- "26-50"
for.plot[for.plot$Trial < 76 & for.plot$Trial > 50,]$TrialCat <- "51-75"
for.plot[for.plot$Trial > 75,]$TrialCat <- "75-100"
for.plot$TrialCat <- factor(for.plot$TrialCat)
# RT
plotData <- summarySEwithin(data = for.plot, measurevar = 'RT', betweenvars = c('Attention'), withinvars = c('TrialCat', 'itemtype2'), idvar = 'Subject')
exp3.expose.rt <- ggplot(plotData, aes(x = TrialCat, y = RT, colour = Attention, shape = Attention, group = Attention))
exp3.expose.rt <- exp3.expose.rt + geom_point(size = 1.7) + geom_line() + geom_errorbar(aes(ymin = RT - ci, ymax = RT + ci), linetype = 'solid', size = 0.1)
exp3.expose.rt <- exp3.expose.rt + facet_grid(~itemtype2, labeller = if_labeller)
exp3.expose.rt <- exp3.expose.rt + ylab('Reaction time (ms)') +xlab('Exposure trial block')
exp3.expose.rt <- exp3.expose.rt + theme_bw()
exp3.expose.rt <- exp3.expose.rt + theme(text = element_text(size = 10),
legend.title = element_text(size = 8),
legend.text = element_text(size = 8),
legend.justification = c(1,1),
legend.position = c(1,1),
legend.background = element_blank())
exp3.expose.rt <- exp3.expose.rt +scale_shape_manual(values = c(21, 22),labels = c('No attention', 'Attention'))
exp3.expose.rt <- exp3.expose.rt +scale_colour_manual(values = c("#0072B2", "#D55E00"),
labels = c('No attention', 'Attention'))
CairoPDF('../thesis/graphs/exp3_exprt.pdf', width = 6.69, height = 3.15)
exp3.expose.rt
dev.off()
### CATEGORIZATION
plotData <- summarySEwithin(data = categ3, measurevar = 'RT', betweenvars = c('Attention', 'ExposureType'), withinvars = c('Step'), idvar='Subject')
contPlotData <- summarySEwithin(data = cont, measurevar = 'RT', withinvars = c('Step'), idvar = 'Subject')
contPlotData <- rbind(contPlotData, contPlotData)
contPlotData <- cbind(contPlotData, data.frame(Attention=rep('control', 12),
ExposureType=c(rep('predictive', 6), rep('unpredictive', 6))))
plotData <- rbind(plotData,contPlotData)
plotData$Step <- as.numeric(as.character(plotData$Step)) + 3.5
### MAIN PLOT
exp3.results <- ggplot(plotData,aes(x = Step, y = RT, colour = Attention, shape = Attention, group = Attention))
exp3.results <- exp3.results + geom_point(size=1.7)+ geom_line() + geom_errorbar(aes(ymin = RT - ci, ymax = RT + ci), linetype = 'solid', size = 0.1)
exp3.results <- exp3.results + facet_grid(~ExposureType, labeller = if_labeller)
exp3.results <- exp3.results + ylab('Proportion /s/ response') + xlab('Continua step')
exp3.results <- exp3.results + scale_x_continuous(breaks = 1:6)
exp3.results <- exp3.results + theme_bw()
exp3.results <- exp3.results + theme(text = element_text(size = 10),
legend.title = element_text(size = 8),
legend.text = element_text(size = 8)#,
#legend.justification = c(0, 0),
#legend.position = c(0, 0)
)
exp3.results <- exp3.results + scale_shape_manual(values = c(21, 22, 23),
labels = c('No attention', 'Attention', 'Control'))
exp3.results <- exp3.results + scale_colour_manual(values = c("#0072B2", "#D55E00", "#000000"),
labels = c('No attention', 'Attention', 'Control'))
exp3.results
ggsave('../thesis/graphs/exp3_rt_categresults.pdf',width=170,height=80,units='mm',dpi=600)
### EXPERIMENT 5
plotData <- summarySEwithin(data = categ5, measurevar = 'RT', betweenvars = c('ExposureType'), withinvars = c('Step'), idvar = 'Subject')
contPlotData <- summarySEwithin(data=cont, measurevar = 'RT', withinvars = c('Step'), idvar='Subject')
contPlotData <- cbind(contPlotData,data.frame(ExposureType=rep('control', 6)))
plotData <- rbind(plotData, contPlotData)
plotData$Step <- as.numeric(as.character(plotData$Step)) + 3.5
plotData$ExposureType <- factor(plotData$ExposureType, levels = c('control','unpredictive','predictive'))
### MAIN PLOT
exp5.results <- ggplot(plotData,aes(x = Step, y = RT, colour = ExposureType, shape = ExposureType, group = ExposureType))
exp5.results <- exp5.results + geom_point(size = 2.8) + geom_line(size=1.4) + geom_errorbar(aes(ymin = RT - ci, ymax = RT + ci), linetype = 'solid', size = 0.4)
exp5.results <- exp5.results + ylab('Proportion /s/ response') + xlab('Continua step')
exp5.results <- exp5.results + theme_bw()
exp5.results <- exp5.results + theme(text=element_text(size = 22),
legend.title=element_text(size = 18),
legend.text=element_text(size = 18),
#legend.justification = c(0, 0),
#legend.position = c(0, 0),
legend.background = element_blank())
exp5.results <- exp5.results + scale_x_continuous(breaks = 1:6)
exp5.results <- exp5.results + scale_shape_manual(values = c(21, 22, 23),
labels = c('Control','Unpredictive', 'Predictive' ))
exp5.results <- exp5.results + scale_colour_manual(values = c( "#000000","#0072B2", "#D55E00"),
labels = c('Control','Unpredictive', 'Predictive'))
exp5.results
ggsave('../thesis/graphs/exp5_rt_categresults.pdf', width = 10, height = 6, units = 'in', dpi = 600) |
541330bc1708ba56c8ddfa9ef88bf44f73a413e5 | 43baa73789bcda4ddfedf65142e6cb0bc7c3149a | /R/StatCharrms-internal.R | ca9e3e3fa73d2a544704abfb200e99e75e851fdb | [] | no_license | histopathology/StatCharrms | 575e1d4ca2efdd1d78b84821431151a730a3ad77 | e8892cb101bb4b8464da20c22830648a4c5e025b | refs/heads/master | 2020-06-04T22:43:18.381077 | 2019-02-11T15:33:15 | 2019-02-11T15:33:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 44,227 | r | StatCharrms-internal.R | .changeStatusValues <-
function(){
#Changes the Cencerd Value to 0
#Changes the Event Value to 1
#' @export
TempData<-.time2EventEnv$UseData
#Change Event status to 1
.time2EventEnv$UseData[which(TempData[ ,.time2EventEnv$StatusVar]==.time2EventEnv$StatusEventVal),.time2EventEnv$StatusVar]<-1
if (length(which(TempData[ ,.time2EventEnv$StatusVar]==.time2EventEnv$StatusEventVal)) == 0){
popMessage('There are no event. Analysis aborted.')
return(FALSE)
}
if (identical(.time2EventEnv$StatusCenVal,'Not Used') == FALSE){
#Change Censored status to 0
.time2EventEnv$UseData[which(TempData[ ,.time2EventEnv$StatusVar]==.time2EventEnv$StatusCenVal),.time2EventEnv$StatusVar]<-0
}
return(TRUE)
}
.getTimeData <-
function(Data,TimeVar,Format,TimeInt,ReplicateVar,TreatmentVar,EndPointVar){
#' @export
#Averages Time over multiple possible endpoints
Data[ ,TimeVar]<-date2Numeric(Data[ ,TimeVar],Format)
colnames(Data)[which(colnames(Data)==TimeVar)]<-'Numeric_Time'
TempData<-tranformTime(Data,'Numeric_Time',as.numeric(TimeInt),
ReplicateVar, TreatmentVar,EndPointVar[1])
if (length(EndPointVar)>1){
Remove<-which(is.element(colnames(TempData),EndPointVar[-1]))
TempData<-TempData[ -Remove]
for (i in 2:length(EndPointVar)){
TimeData<-tranformTime(Data,'Numeric_Time',as.numeric(TimeInt),
ReplicateVar, TreatmentVar,EndPointVar[i])
TimeData<-TimeData[ ,c('UniqueID.SC',EndPointVar[i])]
TempData<-merge(TempData,TimeData,by ='UniqueID.SC')
}
}
TempData<-TempData[ ,-which(colnames(TempData)=='UniqueID.SC')]
return(TempData)
}
.lengthNa <-
function(Vec){
#' @export
#This is a length function that acts like a rm.na=TRUE option
if (sum(is.na(Vec))>0) Vec<-Vec[-which(is.na(Vec))]
return(length(Vec))
}
.makePlot <-
function(Data,Response,Results,EndPoints,TreatmentVar,Scale,PlotType,ByVar){
#' @export
#This function will make a plot
#uses
DataUse<-Data
NameResponse<-Response
TrasnStr<-''
#Convert the response and names to indicate the transformation used
if (Scale==TRUE){ #Add Transform used to Title
DataUse<-responseTransform(Data,NameResponse,Results[[NameResponse]]$TransformationUsed) #Transform Data
#Filter out non-numbers
if (length(which(is.na(DataUse$TransformedResponse)))>0){
DataUse<-DataUse[-which(is.na(DataUse$TransformedResponse)), ]
}
if (length(which(is.finite(DataUse$TransformedResponse) == FALSE))>0){
DataUse<-DataUse[-which(is.finite(DataUse$TransformedResponse) == FALSE), ]
}
TrasnStr<-paste(Results[[NameResponse]]$TransformationUsed,'Transformed')
Response<-'TransformedResponse'
if (Results[[NameResponse]]$TransformationUsed=='None'){ #If no transform was used tell user of that
TrasnStr<-gsub('None','Not',TrasnStr)
Response<-NameResponse
DataUse<-Data
}
}
#Box-Plot
if (PlotType=='Box'){
print(bwplot(DataUse[ ,Response]~DataUse[ ,TreatmentVar],
main=paste(NameResponse,'for each',TreatmentVar,'\n',TrasnStr),
xlab=TreatmentVar,ylab=paste(NameResponse,TrasnStr), horizontal = FALSE))
}
#Quantile-Quantile-Plot
if (PlotType=='Quantile-Quantile'){
Model<-aov(DataUse[ ,Response]~DataUse[ ,TreatmentVar])
qqnorm(Model$residuals,main=paste('Normal Q-Q Plot for\n',NameResponse,TrasnStr,'by',TreatmentVar))
qqline(Model$residuals)
}
#Violin-Plot
if (PlotType=='Violin'){
print(bwplot(DataUse[ ,Response]~DataUse[ ,TreatmentVar],
main=paste(NameResponse,'for each',TreatmentVar,'\n',TrasnStr),
xlab=TreatmentVar,ylab=paste(NameResponse,TrasnStr), horizontal = FALSE,panel=panel.violin))
}
if (PlotType=='Interaction'){
print(interaction.plot(DataUse[[ByVar]],DataUse[[TreatmentVar]],DataUse[[Response]],col=1:nlevels(DataUse[[TreatmentVar]])
,ylab=Response,xlab=ByVar,trace.label=TreatmentVar,
main=paste('Interaction plot for\n',Response,'by',ByVar)))
}
if (PlotType==''){
print(bwplot(DataUse[ ,Response]~DataUse[ ,TreatmentVar],
main=paste(NameResponse,'for each',TreatmentVar,'\n',TrasnStr),
xlab=TreatmentVar,ylab=paste(NameResponse,TrasnStr), horizontal = FALSE,panel=panel.violin))
}
}
.Random.seed <-
c(403L, 282L, 1162133435L, -678811484L, -1593745541L, 2106825706L,
-1678737836L, 1633203179L, -178222632L, -280844829L, -504858409L,
-1019680754L, -205248251L, 622865580L, -636933109L, 2070308299L,
1785834012L, -1889448849L, 857695142L, 1922890729L, 2074082809L,
-1918552742L, 1782288988L, -1877740848L, 1812705903L, 1568310097L,
958635801L, -2104666161L, -1890052541L, 1225410922L, -457955346L,
1261700976L, -1050527673L, 788643394L, 1743462805L, -1983764406L,
-739351685L, 1082545014L, 1886878320L, 1664695152L, -244735773L,
1120199621L, -1813426959L, 655847875L, -1594734210L, 146214903L,
-1536937992L, -487966224L, -811928753L, -372861730L, -1442372922L,
820545090L, -1827433373L, -377223378L, -1782364834L, -1895365642L,
490729850L, 1021693743L, -2058185580L, 2012767611L, 415656057L,
1256776554L, 1523553563L, 174562481L, 384260713L, -1279217361L,
140342715L, -432283109L, 752717893L, 190418328L, 638220211L,
-1014217614L, -241239724L, 558900487L, -806440591L, 591004430L,
1123659807L, 1566725783L, 1668197378L, 1782292927L, -230253327L,
-921656255L, 250215752L, -346350154L, -1446599642L, -1545942360L,
-467111404L, -260071525L, -904308751L, 715110066L, 27820359L,
-1505676965L, -669211734L, 55877703L, 1516406641L, -179362094L,
1182757848L, -87993505L, -443573777L, -848529479L, 1410639063L,
1733733990L, 1552427635L, -1410558840L, -923231239L, 1778263216L,
-88621814L, 944181719L, -908402477L, -1964230458L, -68260451L,
370210474L, -203330636L, -110527764L, -306830062L, -1844164942L,
356934239L, 1153862192L, 1827130701L, -1023924432L, -1325208562L,
446256585L, -1511579674L, -401276224L, 558732826L, 1211097982L,
-7108707L, 1081505079L, -556570576L, -2102306880L, 515600534L,
1783476971L, 461368234L, -2124695521L, 634193785L, 419000795L,
1704301114L, 260683547L, -2135494210L, 1612963945L, -1957241876L,
1637200155L, 615364106L, 205113521L, 2039953261L, 680458464L,
-1320332590L, -160041677L, -1269798435L, -1482175343L, 497382740L,
-1808146897L, -1629870346L, -636607368L, -115989528L, -205759044L,
143021243L, -1064240833L, -1790107806L, -764385255L, -1868417218L,
-547430889L, 1806279311L, -309645209L, -2048280616L, -1945328917L,
444953471L, 1113475168L, 75009294L, -2054457965L, -1582116747L,
-2063498591L, -2138450706L, 1528430716L, 1205642502L, -510803022L,
2071053186L, 1754168550L, -1294388622L, 1259845265L, 1589038911L,
-368699262L, 980256190L, 1019682878L, -1862270137L, -2008816426L,
908225093L, -679139679L, -1566519870L, -405135640L, 1170431759L,
-5759967L, -1695764139L, 1972591832L, 1438000531L, -172793616L,
2029385177L, -334679731L, -367761608L, -94542792L, 1686288779L,
-188586744L, -1016190536L, 1669778645L, -1247747524L, 1627351245L,
1701915586L, 838870517L, 2012471539L, -75795345L, -1439816594L,
1650694195L, 1869676921L, 94190937L, 177487894L, 1032132993L,
1580753355L, 740132587L, 8806165L, -1151339185L, -720198889L,
-802543905L, -1729029599L, 1761439035L, 964042058L, -501370125L,
57028940L, -2068693360L, -1375760215L, 127909631L, -1249828033L,
1787329797L, -800727495L, 1888439524L, 2056668880L, 1433258465L,
457148438L, -1629544342L, -1297854474L, 563316568L, 239141524L,
-1572792673L, -266314278L, -569817898L, 1254417333L, -627113968L,
-481618736L, -111297827L, 433030640L, -1347890334L, 2125356571L,
-724137378L, -433662223L, 873046402L, 4593955L, 385290537L, -940303747L,
-1461034719L, 230269722L, 871943627L, -1454548662L, 960165776L,
776835566L, -910488301L, -1820463273L, -1152556172L, 879300460L,
-548604069L, 798144010L, 1279980320L, 1443718913L, 686929769L,
1967196537L, -757044886L, -1549739762L, -444428159L, -17785943L,
-1715443148L, -1706817268L, 920743226L, 1050827671L, -124362190L,
-906927247L, 1287781056L, 1620192221L, -1016343542L, 909085360L,
1702526930L, -1271785758L, 1855162874L, -1830366261L, -522463503L,
76392688L, -370317003L, 674173338L, -1944545668L, -909920018L,
-2072559843L, 1797311981L, -1473929550L, -1585023036L, -809761489L,
12896123L, 920408505L, 825209791L, 842790912L, 1128087117L, 1744152299L,
518021485L, 2011345563L, -892558946L, -410874927L, 526823333L,
1487481681L, -228036726L, 573159059L, 277857711L, 1443510418L,
687474517L, 1057324266L, -343032461L, 866879474L, 1501606780L,
836851613L, 1832820660L, -569919054L, 1516696156L, -1385677050L,
-620363606L, -2057771040L, 189301237L, -992924602L, 1547346055L,
-540550677L, 1828396432L, -924280122L, -2025963647L, 711128340L,
-1957122083L, -1993766836L, 770455017L, 1617469818L, -1850216766L,
-1380110491L, -62875635L, -581066695L, 1551102353L, 822965885L,
1581911369L, -228424782L, -836867838L, -1892161174L, 1654924970L,
1486728224L, 1039118640L, -677174811L, 37891313L, 2092240392L,
179439152L, -2024395008L, -1153309733L, -324112116L, -2022836353L,
1364775643L, -661267391L, -644077858L, -1628475148L, 2135351374L,
-404300907L, 431966341L, 744795696L, 1160708350L, 251166947L,
-454879234L, -2075099609L, 211576601L, -1252845013L, -1953153030L,
1248804188L, -365254523L, 1630711509L, 877097527L, 287760291L,
896461951L, 218314457L, 449438339L, 1646027099L, 1982114252L,
-933323933L, 872527703L, 2093672667L, 1654133965L, -923107805L,
-1100371240L, -1953760114L, 1785795084L, -1262053850L, -1579718771L,
-1438282129L, 954870104L, -1500869748L, -1439490733L, 201434211L,
-1755473355L, 1932004561L, 1938640571L, -100049067L, 1848026076L,
1580613060L, -1603748553L, -610213474L, 1977747527L, -1791216647L,
439994909L, 641337947L, -1279890861L, -296067617L, 351155024L,
-964902773L, 1464907900L, 1768671306L, 1742961614L, -1727145793L,
-850690234L, 160665382L, 1007106842L, -782747785L, 190310585L,
-444739791L, 2043437113L, 792706977L, 517835517L, 180239403L,
-1686921815L, -1689686983L, 863732342L, 2120693036L, 63199838L,
1368103336L, 218290319L, 709094221L, -1092194413L, 920267795L,
-1103155669L, -1828411155L, -1747377191L, -1598919629L, 811144341L,
1304133046L, 669019091L, -618440663L, -1273053303L, -980496530L,
1548122683L, -1500935511L, -66279750L, 316845629L, 1251668550L,
1600426526L, -2089687935L, -221541194L, 2115350639L, 1506837741L,
760105634L, 901610792L, -1386855948L, 979962081L, 1058327003L,
-650351128L, 1473705109L, 1368671555L, -1332189733L, 722393049L,
-1447376846L, 64180222L, 624755165L, -557804635L, -815264047L,
-540439224L, -1452352822L, 815693272L, 921211028L, -1287929916L,
1894787878L, 947347578L, 1970380440L, 481000236L, 1994182966L,
-1167752614L, 413235821L, 1698396772L, 1215614980L, 57094304L,
-464510674L, -141570436L, -1184479999L, 1677473790L, 40473374L,
1426159891L, 1151137317L, -2069548746L, -1027241591L, -138969472L,
1723628378L, 1449241510L, -1215816388L, 1880264454L, 1204072705L,
523639348L, -92355678L, 1220049893L, 725755810L, -1969891730L,
-549969326L, 727192462L, -2028955162L, -1904002133L, -701258952L,
-458885622L, -558661528L, -695606958L, -329369790L, 1687931532L,
-1687109393L, 249949317L, -815100187L, -1359190104L, -997574851L,
529021189L, -1249962315L, -1287707913L, -110312119L, 858713982L,
652777897L, 764632784L, -849385676L, 1261239308L, 1166261027L,
112459976L, -165547535L, -1648356183L, -2100203412L, 25990888L,
1885877522L, -1295954470L, -444924266L, -1724970947L, -1383339594L,
808943297L, -1668288298L, 1718417723L, 1939282489L, -408633664L,
-1831894543L, 1857974256L, -1374536488L, -1625056786L, 1130806768L,
-2019333642L, 518155237L, -1542294598L, -432978505L, 59095710L,
1361588652L, -1540049188L, 1375936391L, -2035081738L, -1835437866L,
-1031847736L, -370913946L, 1457756373L, -1735710327L, 1099161836L,
-986657773L, -1058272751L, -1524375190L, 56699864L, -1182278994L,
-1711032888L, 493356832L, -1062210400L, 537949249L, -1696757075L,
302869586L, 1190779852L, 1998899032L, -1193151450L, 1500622802L,
843758549L, 1766678578L, -532991053L, -1198614715L, 1029174974L,
394221393L, 1290101002L, -2062287277L, 851220537L, -999014578L,
1023575866L, -1190408884L, 1973582043L, -2007698456L, 1340786922L,
-1106398623L, -2065254085L, -103828140L, -1833693719L, -150982777L,
438212440L, 450090037L, 1654107702L, -222631722L, -530281056L,
-614587639L, -607890640L, 1079415192L, 147247995L, 165499753L,
-1649964388L, 1603449989L, 1154637966L, 678878811L, 1083684733L,
416073391L, 383395771L, 300900717L)
.saveGraph <-
function(Dir,FileName,Response){ #One Response
#' @export
#This function saves graphs specificity from the GUI for the One-Way Anova Part
#It uses the global names for the data set and values
#Save graphs as PDF
options(warn=-1)
dir.create(Dir)
options(warn=0)
FileNameG<-paste(Dir,'\\',FileName,sep='')
#Start PDF Output
pdf(paste(FileNameG,Response,'-Graphs.pdf',sep=''))
for (Scale in c(FALSE,TRUE)){
for (PlotType in .stdEndEnv$PlotTypeList){ #PlotTypeList.sa is a global from the GUI
.makePlot(.stdEndEnv$PlotData,Response,.stdEndEnv$Results,.stdEndEnv$EndPointVar,
.stdEndEnv$TreatmentVar,Scale,PlotType,.stdEndEnv$ByVar)
}
}
#Ends PDF output
dev.off()
dev.set(dev.prev())
}
.saveGraphs <-
function(Dir,FileName){ #All Graphs
#' @export
#This function saves graphs specificity from the GUI for the One-Way Anova Part
#It uses the global names for the data set and values
#Save graphs as PDF
dir.create(Dir , showWarnings = FALSE)
if (identical(.stdEndEnv$TimeVar,'Not Used')==FALSE){
.stdEndEnv$PlotData<-.getTimeData(.stdEndEnv$DataSub,.stdEndEnv$TimeVar,.stdEndEnv$Format,.stdEndEnv$TimeIntGraph,.stdEndEnv$ReplicateVar,.stdEndEnv$TreatmentVar,.stdEndEnv$EndPointVar)
colnames(.stdEndEnv$PlotData)[which(colnames(.stdEndEnv$PlotData)=='Averaged_Numeric_Time')]<-'Time'
.stdEndEnv$PlotData$Time<-as.factor(.stdEndEnv$PlotData$Time)
}
FileNameG<-paste(Dir,'\\',FileName,sep='')
for (Response in .stdEndEnv$EndPointVar){
#Start PDF Output
pdf(paste(FileNameG,'-',Response,'-',.stdEndEnv$TreatmentVar,'-Graphs.pdf',sep=''))
for (Scale in c(FALSE,TRUE)){
for (PlotType in .stdEndEnv$PlotTypeList){ #PlotTypeList.sa is a global from the
.makePlot(.stdEndEnv$PlotData,Response,.stdEndEnv$Results,.stdEndEnv$EndPointVar,
.stdEndEnv$TreatmentVar,Scale,PlotType,'Time')
}
}
#Ends PDF output
dev.off()
dev.set(dev.prev())
}
#if time is used
if (identical(.stdEndEnv$TimeVar,'Not Used')==FALSE){
for (Response in .stdEndEnv$EndPointVar){
#Start PDF Output
pdf(paste(FileNameG,'-',Response,'-Time','-Graphs.pdf',sep=''))
for (Scale in c(FALSE,TRUE)){
for (PlotType in .stdEndEnv$PlotTypeList){ #PlotTypeList.sa is a global from the
.makePlot(.stdEndEnv$PlotData,Response,.stdEndEnv$Results,.stdEndEnv$EndPointVar,
'Time',Scale,PlotType,.stdEndEnv$TreatmentVar)
}
}
#Ends PDF output
dev.off()
dev.set(dev.prev())
}
}
}
.saveGraphs.te <-
function(Dir,FileName){ #One Response
#This function saves graphs specificity from the GUI for the Time to effect module
#It uses the global names for the data set and values
#' @export
#Save graphs as PDF
options(warn=-1)
dir.create(Dir)
options(warn=0)
FileNameG<-paste(Dir,'\\',FileName,sep='')
#Start PDF Output
pdf(paste(FileNameG,'-Graphs.pdf',sep=''))
#Make the plot
Colors=c("Black", "red", "blue", "orange","purple","green")
Lines = c(rep(1,ceiling(nlevels(as.factor(.time2EventEnv$UseData[ ,.time2EventEnv$TreatmentVar]))/2)),
rep(2,floor(nlevels(as.factor(.time2EventEnv$UseData[ ,.time2EventEnv$TreatmentVar]))/2)))
Sys.sleep(.1)
plot(.time2EventEnv$Results$FitS, conf.int = FALSE, main='Plot of Raw Data By Treatment',xlab=.time2EventEnv$TimeVar,ylab='Percent In Original Status',
col = Colors,lty=Lines,lwd=1.5)
legend('bottomleft',levels(as.factor(.time2EventEnv$UseData[ ,.time2EventEnv$TreatmentVar])),lty = Lines,
col =Colors)
#Ends PDF output
dev.off()
dev.set(dev.prev())
}
.saveResults <-
function(Results,Dir,FileName){
#' @export
#This function will save the results of the of an mg analysis
options(warn=-1)
dir.create(Dir)
options(warn=0)
#I want to put in a message to the user in the output
#Extract Results
Response<-Results$Response
SummaryTable<-Results$SummaryTable
WilksResults<-Results$WilksResults
LeveneResults<-Results$LeveneResults
AnovaResults<-Results$AnovaResults
OneWayDunnetResults<-Results$OneWayDunnetResults
JonckheereTerpstraResults<-Results$JonckheereTerpstraResults
MonocityTable<-Results$MonocityTable
DunnsTable<-Results$DunnsTable
WilliamsTableUp<-Results$WilliamsTableUp #2017-10-17
WilliamsTableDown<-Results$WilliamsTableDown #2017-10-17
Transform<-Results$TransformationUsed
TestType<-Results$TestType
FileNameUse<-paste(FileName,Response,'-')
#Start HTML OutPut
HTMLStart(outdir=Dir, filename= FileNameUse,
extension="html", echo=FALSE, HTMLframe=TRUE)
Title<-paste('<center>Results from ',Response,'</center>',sep='')
HTML.title(Title, HR=1,CSSstyle='')
#HTML for Data Summary Table
if (is.null(SummaryTable)==FALSE){
HTML('<center><b>Data Summary Table for Untransformed/Unweighted Data </b></center>')
HTML(SummaryTable,row.name=FALSE,innerBorder = 1,CSSstyle='')}
#HTML for Monotonicity Test
if (is.null(MonocityTable )==FALSE){
HTML('<center><b>Test for Monotonicity </b></center>')
HTML(MonocityTable ,row.name=FALSE,innerBorder = 1,CSSstyle='')}
#HTML for Jonckheere-Terpstra Test
if (is.null(JonckheereTerpstraResults )==FALSE){
HTML('<center><b>Jonckheere-Terpstra Test</b></center>')
HTML(JonckheereTerpstraResults ,row.name=FALSE,innerBorder = 1,CSSstyle='')}
#HTML for Shapiro Wilks Test Table
if (is.null(WilksResults)==FALSE){
HTML('<center><b>Shapiro Wilks Test for Normality</b></center>')
HTML(WilksResults,row.name=FALSE,innerBorder = 1,CSSstyle='')}
#HTML for Levene's Test Table
if (is.null(LeveneResults)==FALSE){
HTML("<center><b>Levene's test for equality of variances</b></center>")
HTML(LeveneResults,row.name=FALSE,innerBorder = 1,CSSstyle='')}
#HTML for Anova
if (is.null(AnovaResults)==FALSE){
HTML('<center><b>AnovaTable</b></center>')
HTML(AnovaResults,row.name=TRUE,innerBorder = 1,HR=1)}
#HTML for Dunnets Test
if (is.null(OneWayDunnetResults)==FALSE){
HTML('<center><b>Dunnets Test</b></center>')
HTML(OneWayDunnetResults,row.name=FALSE,innerBorder = 1,CSSstyle='')}
#HTML for Dunn Test
if (is.null(DunnsTable)==FALSE){
HTML('<center><b>Dunns Test</b></center>')
HTML(DunnsTable,row.name=FALSE,innerBorder = 1,CSSstyle='')}
#HTML for Williams Test 2017-10-18
if (is.null(WilliamsTableUp)==FALSE){
HTML('<center><b>Williams Test for Increasing Trend</b></center>')
HTML(WilliamsTableUp,row.name=FALSE,innerBorder = 1,CSSstyle='')}
#HTML for Williams Test 2017-10-18
if (is.null(WilliamsTableDown)==FALSE){
HTML('<center><b>Williams Test for Decreasing Trend</b></center>')
HTML(WilliamsTableDown,row.name=FALSE,innerBorder = 1,CSSstyle='')}
#Stamp the output
.stdEndEnv$Message<-.stampOutput(Transform,TestType)
HTML(.stdEndEnv$Message)
HTMLStop() #End HTML OutPut
#Deletes Junk files
unlink(paste(Dir,'\\',FileNameUse,'.html',sep=''))
unlink(paste(Dir,'\\',FileNameUse,'_menu.html',sep=''))
Sys.sleep(.01)
}
.saveResults.te <-
function(Results,Dir,FileName){
#' @export
#This function will save the results of the of an mg analysis
options(warn=-1)
dir.create(Dir)
options(warn=0)
#I want to put in a message to the user in the output
#Extract Results
FileNameUse<-paste(FileName)
#Start HTML OutPut
HTMLStart(outdir=Dir, filename= FileNameUse,
extension="html", echo=FALSE, HTMLframe=TRUE)
Title<-'<center>Results</center>'
HTML.title(Title, HR=1,CSSstyle='')
#Effects Table
EffectsTable<-as.data.frame(Results$EffectsTable)
EffectsTable<-cbind(rownames(EffectsTable), EffectsTable)
colnames(EffectsTable)[1]<-'Comparison'
.time2EventEnv$Results$FitMe$coefficients
#HTML for Data Table
if (is.null(.time2EventEnv$UseData)==FALSE){
HTML('<center><b>Data Used in Analysis </b></center>')
HTML(.time2EventEnv$UseData,row.name=FALSE,innerBorder = 1,CSSstyle='')}
#HTML for Main Effects Results
if (is.null(EffectsTable)==FALSE){
HTML('<center><b>Main Effects Table</b></center>')
HTML(EffectsTable ,row.name=FALSE,innerBorder = 1,CSSstyle='')}
#HTML for Main Effects Results
MedianTable<-as.data.frame(Results$MedianTable)
if (is.null(MedianTable)==FALSE){
if (sum(is.na(MedianTable)) > 0){
NAs<-which(is.na(MedianTable)==TRUE,arr.ind = TRUE)
MedianTable[NAs]<-'-'
}
HTML('<center><b>Median Time to Effect with 95% CI</b></center>')
HTML(MedianTable ,row.name=FALSE,innerBorder = 1,CSSstyle='',nsmall=1)
}
#Stamp the output
.stampOutput.te()
HTMLStop() #End HTML OutPut
#Deletes Junk files
unlink(paste(Dir,'\\',FileNameUse,'.html',sep=''))
unlink(paste(Dir,'\\',FileNameUse,'_menu.html',sep=''))
Sys.sleep(.01)
}
.stampOutput <-
function(Transform,TestType){
#' @export
#This function writes the user's selection to the output file created.
#This function assumes that it is called from the StdAnylsis saving function
.stdEndEnv$TempList<-strsplit(.stdEndEnv$FileName,'\\\\')
.stdEndEnv$File<-.stdEndEnv$TempList[[1]][length(.stdEndEnv$TempList[[1]])]
Message1<-paste("<br>Use Test Type of: <b>",TestType,'</b>',"<br>Transformation of: <b>",Transform,'</b>',sep='')
Message2<-paste("<br>Using Generation variable: <b>",.stdEndEnv$GenerationVar,'</b>'," and Generation value: <b>",.stdEndEnv$GenerationVal,'</b>', sep='')
Message3<-paste("<br>Treatment Variable: <b>",.stdEndEnv$TreatmentVar,'</b>',"<br>Replicate Variable: <b>",.stdEndEnv$ReplicateVar,'</b>',sep='')
Message4<-paste("<br>Using Gender variable: <b>",.stdEndEnv$GenderVar,'</b>'," and Gender value: <b>",.stdEndEnv$GenderVal,'</b>', sep='')
Message5<-paste("<br>Using Age variable: <b>",.stdEndEnv$AgeVar,'</b>'," and Age value: <b>",.stdEndEnv$AgeVal,'</b>', sep='')
Message6<-paste("<br>Using Time variable: <b>",.stdEndEnv$TimeVar,'</b>'," and Time Increment: <b>",.stdEndEnv$TimeInt,'</b>', sep='')
Message7<-paste("<br>Using as weights: <b>",.stdEndEnv$WeightList,'</b>', sep='')
Message8<-paste("<br>Using Test Direction: <b>",.stdEndEnv$TestDirection,'</b>',"<br>Using Alpha Level: <b>",.stdEndEnv$AlphaLevel,'</b>', sep='')
Message9<-"<br> Alpha Level only applies to calculations for Confidence Intervals and the Jonckheere-Terpstra trend test"
Message<-paste(Message1,Message2,Message3,Message4,Message5,Message6,Message7,Message8,Message9,'</b><br>',sep='')
return(Message)
}
.stampOutput.te <-
function(){
#' @export
#This function writes the user's selection to the output file created.
#This function assumes that it is called from the time to event saving function
Message1<-paste("<br>Treatment Variable: <b>",.time2EventEnv$TreatmentVar,'</b>',"<br>Replicate Variable: <b>",.time2EventEnv$ReplicateVar,'</b>',sep='')
Message2<-paste("<br>Time Variable: <b>",.time2EventEnv$TimeVar,'</b>', sep='')
Message3<-paste("<br>Status Variable: <b>",.time2EventEnv$StatusVar,'</b>',sep='')
Message4<-paste("<br>Event Status: <b>",.time2EventEnv$StatusEventVal,'</b>',"<br>Censored Status: <b>",.time2EventEnv$StatusCenVal,'</b>', sep='')
Message<-paste(Message1,Message2,Message3,Message4,'</b><br>',sep='')
#HTML('</div></center>')
HTML(Message)
return(Message)
}
.TestEndPoints <-
function(){
#' @export
#Tests Endpoints to see if they can be ran
#Turn off warnings
oldw <- getOption("warn")
options(warn=-1)
#This tests to see if the end point can be tested on
for (Response in .stdEndEnv$EndPointVar){
RemoveMessage<-NULL
Remove<-FALSE
#Checks to see if more then 1 Treatment has a response
MeansTest<-MeansTest<-tapply(.stdEndEnv$UseData[ ,Response],.stdEndEnv$UseData[ ,.stdEndEnv$TreatmentVar],mean,na.rm=TRUE)
if (var(MeansTest,na.rm=TRUE)==0 | is.na(var(MeansTest,na.rm=TRUE))==TRUE ){
RemoveMessage<-paste(RemoveMessage,Response, 'has a value for only one treatment: removing it from the analysis.\n')
Remove<-TRUE
}
#Checks to see if the vector is a constant vector
if (var(.stdEndEnv$UseData[ ,Response],na.rm=TRUE)==0 |is.na(var(.stdEndEnv$UseData[ ,Response],na.rm=TRUE))==TRUE ){
RemoveMessage<-paste(RemoveMessage,Response, 'is a consent vector: removing it from the analysis.\n')
Remove<-TRUE
}
if (is.factor(.stdEndEnv$UseData[ ,Response])==TRUE ){
RemoveMessage<-paste(RemoveMessage,Response, 'is not a number: removing it from the analysis.\n')
Remove<-TRUE
}
if (Response==.stdEndEnv$TreatmentVar){
RemoveMessage<-paste(RemoveMessage,Response, 'can not be used as a treatment and a response: removing it from the analysis.\n')
Remove<-TRUE
}
if (Remove==TRUE){
.stdEndEnv$EndPointVar<-.stdEndEnv$EndPointVar[-which(.stdEndEnv$EndPointVar==Response)]
popMessage(RemoveMessage)
}
}
#Revert Warnings
options(warn = oldw)
}
.updateEnviroment <-
function(){
#' @export
#Updates the environment .stdEndEnv from the values in the GUI
.stdEndEnv$UseData<-.stdEndEnv$MainData
#Gather values from the GUI
.stdEndEnv$TreatmentVar<-svalue(.stdEndEnv$TreatmentVarCbx)
.stdEndEnv$ReplicateVar<-svalue(.stdEndEnv$ReplicateVarCbx)
.stdEndEnv$ReplicateVar<-svalue(.stdEndEnv$ReplicateVarCbx)
.stdEndEnv$GenerationVar<-svalue(.stdEndEnv$GenerationVarCbx)
.stdEndEnv$GenerationVal<-svalue(.stdEndEnv$GenerationValCbx)
.stdEndEnv$AgeVar<-svalue(.stdEndEnv$AgeVarCbx)
.stdEndEnv$AgeVal<-svalue(.stdEndEnv$AgeValCbx)
.stdEndEnv$GenderVar<-svalue(.stdEndEnv$GenderVarCbx)
.stdEndEnv$GenderVal<-svalue(.stdEndEnv$GenderValCbx)
.stdEndEnv$WeightVar<-svalue(.stdEndEnv$WeightVarCbx)
.stdEndEnv$TestDirection<-svalue(.stdEndEnv$TestDirectionCbx)
.stdEndEnv$AlphaLevel<-as.numeric(svalue(.stdEndEnv$AlphaLvCbx))
.stdEndEnv$TimeVar<-svalue(.stdEndEnv$TimeVarCbx)
.stdEndEnv$Format<-FindFormat(svalue(.stdEndEnv$TimeValCbx),.stdEndEnv$CurrentDate)
.stdEndEnv$TimeInt<-as.numeric(svalue(.stdEndEnv$TimeIntCbx))
.stdEndEnv$TimeIntGraph<-as.numeric(svalue(.stdEndEnv$TimeIntGraphCbx))
#Convert to factors
if (identical(.stdEndEnv$TreatmentVar, 'Not Used')==FALSE){
.stdEndEnv$UseData[ ,.stdEndEnv$TreatmentVar]<-as.factor(.stdEndEnv$UseData[ ,.stdEndEnv$TreatmentVar])
}
if (identical(.stdEndEnv$ReplicateVar, 'Not Used')==FALSE){
.stdEndEnv$UseData[ ,.stdEndEnv$ReplicateVar]<-as.factor(.stdEndEnv$UseData[ ,.stdEndEnv$ReplicateVar])
}
#Apply Subsets
if(identical(.stdEndEnv$GenerationVar,'Not Used')==FALSE){
.stdEndEnv$UseData<-subset(.stdEndEnv$UseData,.stdEndEnv$UseData[ ,.stdEndEnv$GenerationVar] == .stdEndEnv$GenerationVal)
}
if(identical(.stdEndEnv$GenderVar,'Not Used')==FALSE){
.stdEndEnv$UseData<-subset(.stdEndEnv$UseData,.stdEndEnv$UseData[ ,.stdEndEnv$GenderVar] == .stdEndEnv$GenderVal)
}
if(identical(.stdEndEnv$AgeVar,'Not Used')==FALSE){
.stdEndEnv$UseData<-subset(.stdEndEnv$UseData,.stdEndEnv$UseData[ ,.stdEndEnv$AgeVar] == .stdEndEnv$AgeVal)
}
if (identical(.stdEndEnv$ReplicateVar, 'Not Used')==FALSE){
.stdEndEnv$UseData[ ,.stdEndEnv$ReplicateVar]<-as.factor(.stdEndEnv$UseData[ ,.stdEndEnv$ReplicateVar])
#If there is 1 unit per replicate
if (nlevels(.stdEndEnv$UseData[ ,.stdEndEnv$ReplicateVar]) == dim(.stdEndEnv$UseData)[1]){
.stdEndEnv$ReplicateVar<-'Not Used'
}
}
#Exclude times 2018-3-20
if (length(.stdEndEnv$TimeExcludeVal)>0 & identical(.stdEndEnv$TimeExcludeVal, 'Not Used')==FALSE){
.stdEndEnv$UseData<-.stdEndEnv$UseData[is.element(.stdEndEnv$UseData[ ,.stdEndEnv$TimeVar],.stdEndEnv$TimeExcludeVal)==FALSE, ] #only times that are not excluded
}
.stdEndEnv$DataSub<-.stdEndEnv$UseData #DataSub is used to dynamically change the time interval for graphing
#Test to see if there is still data
if (dim(.stdEndEnv$UseData)[1]==0){
popMessage('The Selection of Age, Generation, or Gender has caused
the data to become an empty set. Please Reselect one of the above groups')
return(FALSE)
}
if (is.na(.stdEndEnv$AlphaLevel)==TRUE){
popMessage('Please select a number between 0 and 1 for the Alpha Level ')
return(FALSE)
}
if (.stdEndEnv$AlphaLevel < 0 || .stdEndEnv$AlphaLevel > 1){
popMessage('Please select a number between 0 and 1 for the Alpha Level ')
return(FALSE)
}
return(TRUE)
}
.updateT2E <-
function(){
#' @export
#Updates the environment for Time2Event
#Also checks to see if the analysis can be ran
.time2EventEnv$UseData<-.time2EventEnv$MainData
#Gather values from the GUI
.time2EventEnv$TreatmentVar<-svalue(.time2EventEnv$TreatmentVarCbx)
.time2EventEnv$ReplicateVar<-svalue(.time2EventEnv$ReplicateVarCbx)
.time2EventEnv$GenerationVar<-svalue(.time2EventEnv$GenerationVarCbx)
.time2EventEnv$GenerationVal<-svalue(.time2EventEnv$GenerationValCbx)
.time2EventEnv$GenderVar<-svalue(.time2EventEnv$GenderVarCbx)
.time2EventEnv$GenderVal<-svalue(.time2EventEnv$GenderValCbx)
.time2EventEnv$TimeVar<-svalue(.time2EventEnv$TimeVarCbx)
.time2EventEnv$Format<-FindFormat(svalue(.time2EventEnv$TimeValCbx),.time2EventEnv$CurrentDate)
.time2EventEnv$StatusVar<-svalue(.time2EventEnv$StatusVarCbx)
.time2EventEnv$StatusEventVal<-svalue(.time2EventEnv$StatusEventValCbx)
.time2EventEnv$StatusCenVal<-svalue(.time2EventEnv$StatusCenValCbx)
.time2EventEnv$CanRun<-FALSE
#Convert to factors
if (identical(.time2EventEnv$TreatmentVar, 'Not Used')==FALSE){
.time2EventEnv$UseData[ ,.time2EventEnv$TreatmentVar]<-as.factor(.time2EventEnv$UseData[ ,.time2EventEnv$TreatmentVar])
}else{
popMessage("A treatment variable needs to be defined.")
return(FALSE)
}
if (identical(.time2EventEnv$StatusVar, 'Not Used')==TRUE){
popMessage("A statues variable needs to be defined.")
return(FALSE)
}
if (identical(.time2EventEnv$StatusEventVal, 'Not Used')==TRUE){
popMessage("A value representing events needs to be defined")
return(FALSE)
}
if (identical(.time2EventEnv$StatusEventVal, 'Not Used')==TRUE){
popMessage("A value representing censored events needs to be defined")
return(FALSE)
}
if (identical(.time2EventEnv$TimeVar, 'Not Used')==TRUE){
popMessage("A time variable needs to be defined.")
return(FALSE)
}
if (identical(.time2EventEnv$ReplicateVar, 'Not Used')==FALSE){
.time2EventEnv$UseData[ ,.time2EventEnv$ReplicateVar]<-as.factor(.time2EventEnv$UseData[ ,.time2EventEnv$ReplicateVar])
#If there is 1 unit per replicate
if (nlevels(.time2EventEnv$UseData[ ,.time2EventEnv$ReplicateVar]) == dim(.time2EventEnv$UseData)[1]){
.time2EventEnv$ReplicateVar<-'Not Used'
}
}
#Apply Subsets
if(identical(.time2EventEnv$GenderVar,'Not Used')==FALSE){
.time2EventEnv$UseData<-subset(.time2EventEnv$UseData,.time2EventEnv$UseData[ ,.time2EventEnv$GenerationVar] == .time2EventEnv$GenerationVal)
}
if(identical(.time2EventEnv$GenderVar,'Not Used')==FALSE){
.time2EventEnv$UseData<-subset(.time2EventEnv$UseData,.time2EventEnv$UseData[ ,.time2EventEnv$GenderVar] == .time2EventEnv$GenderVal)
}
#Test to see if there is still data
if (dim(.time2EventEnv$UseData)[1]==0){
popMessage('The Selection of Generation, or Gender has caused
the data to become an empty set. Please Reselect one of the above groups')
return(FALSE)
}
#Change time
if (is.null(.time2EventEnv$TimeTemp)==TRUE){
try(.time2EventEnv$TimeTemp<-date2Numeric(.time2EventEnv$UseData[ ,.time2EventEnv$TimeVar],.time2EventEnv$Format))
UseTime<-checkTime(.time2EventEnv$TimeTemp) & .time2EventEnv$CanRun #Update CanRun
if (UseTime == FALSE){
return(FALSE)
}else{
UseData[ ,.time2EventEnv$TimeVar]<-.time2EventEnv$TimeTemp
}
}
#Change Status Vars
Out<-.changeStatusValues()
return(Out)
}
.writeExamples <-
function(Folder){
#' @export
popMessage('This may take a moment')
dir.create(Folder, showWarnings = FALSE)
dir.create(paste(Folder,'\\Histology Example',sep=''), showWarnings = FALSE)
dir.create(paste(Folder,'\\Fecundity Example',sep=''), showWarnings = FALSE)
dir.create(paste(Folder,'\\Length - Weight Example',sep=''), showWarnings = FALSE)
dir.create(paste(Folder,'\\Time to Event Example',sep=''), showWarnings = FALSE)
Sys.sleep(.5)
fecundityData<-StatCharrms::fecundityData
lengthWeightData<-StatCharrms::lengthWeightData
eventTimeData<-StatCharrms::eventTimeData
exampleHistData<-RSCABS::exampleHistData
#Write Data
write.table(exampleHistData,paste(Folder,'\\Histology Example','\\Histology Example Data.csv',sep=''),
row.names=FALSE,sep=',' )
write.table(fecundityData,paste(Folder,'\\Fecundity Example','\\Fecundity Example Data.csv',sep=''),
row.names=FALSE,sep=',' )
write.table(lengthWeightData,paste(Folder,'\\Length - Weight Example','\\Length - Weight Data.csv',sep=''),
row.names=FALSE,sep=',' )
write.table(eventTimeData,paste(Folder,'\\Time to Event Example','\\Time to Event Example Data.csv',sep=''),
row.names=FALSE,sep=',' )
##################################################################################
#RSCABS
#Take the subset corresponding to F0-females of 16 weeks of age
exampleHistData.sub<-exampleHistData[which(exampleHistData$Generation=='F2' &
exampleHistData$Genotypic_Sex=='Male' & exampleHistData$Age=='16_wk' ), ]
#Run RSCABS
exampleResults<-runRSCABS(exampleHistData.sub,'Treatment','Replicate',test.type='RS')
#Create Dir
HistoDir<-paste(Folder,'\\Histology Example\\Results',sep='')
dir.create(HistoDir, showWarnings = FALSE)
write.table(exampleResults,paste(HistoDir,'\\Histology Example Results.csv',sep=''),row.names=FALSE,sep=',')
#Find endpoints that can be graphed
Names<-strsplit(as.character(exampleResults$Effect),split='')
Names<-lapply(Names,function(X){paste0(X[-length(X)],collapse = '')})
Names<-unique(Names)
Files<-lapply(Names,function(X){paste(HistoDir,'\\',X,sep='')})
#Graph all endpoints
CantPrint<-''
for (i in 1:length(Files)){
Msg<-try(plotRSCABS(exampleHistData,Names[[i]],'Treatment','Percent',
'Remove',NULL,'png',File=Files[[i]]))
if (is(Msg)[1]=='try-error'){
CantPrint<-paste(CantPrint,Names[[i]],sep=' \n ')
print(CantPrint)
dev.off()
}
}
##################################################################################
#Length Weight
LWDir=paste(Folder,'\\Length - Weight Example\\Results',sep='')
dir.create(LWDir, showWarnings = FALSE)
FileName<-'Length-Weight Example Results'
#Initializes variables
.stdEndEnv$TimeVar<-'Not Used'
.stdEndEnv$TimeInt<-21 #Time interval used
.stdEndEnv$TimeIntGraph<-7 #Time interval used for graphing
.stdEndEnv$TimeExcludeVal<-{}
.stdEndEnv$GenerationVar<-'Generation'
.stdEndEnv$GenerationVal<-'F1' #Can be a Character array
.stdEndEnv$GenderVar<-'SEX'
.stdEndEnv$GenderVal<-'M' #Can be a Character array
.stdEndEnv$AgeVar<-'Age'
.stdEndEnv$AgeVal<-'16 week'
.stdEndEnv$ReplicateVar<-'Replicate'
.stdEndEnv$TreatmentVar<-'Treatment'
.stdEndEnv$Format<-"%m/%d/%Y"
.stdEndEnv$Results<-list()
.stdEndEnv$WeightsVar<-'Not Used'
.stdEndEnv$TestDirection<-'Both'
.stdEndEnv$AlphaLevel<-0.05
.stdEndEnv$UseData<-lengthWeightData
#manually upset data
if (identical(.stdEndEnv$TreatmentVar, 'Not Used')==FALSE){
.stdEndEnv$UseData[ ,.stdEndEnv$TreatmentVar]<-as.factor(.stdEndEnv$UseData[ ,.stdEndEnv$TreatmentVar])
}
if (identical(.stdEndEnv$ReplicateVar, 'Not Used')==FALSE){
.stdEndEnv$UseData[ ,.stdEndEnv$ReplicateVar]<-as.factor(.stdEndEnv$UseData[ ,.stdEndEnv$ReplicateVar])
}
#Apply Subsets
if(identical(.stdEndEnv$GenerationVal,'Not Used')==FALSE){
.stdEndEnv$UseData<-subset(.stdEndEnv$UseData,.stdEndEnv$UseData[ ,.stdEndEnv$GenerationVar] == .stdEndEnv$GenerationVal)
}
if(identical(.stdEndEnv$GenderVar,'Not Used')==FALSE){
.stdEndEnv$UseData<-subset(.stdEndEnv$UseData,.stdEndEnv$UseData[ ,.stdEndEnv$GenderVar] == .stdEndEnv$GenderVal)
}
if(identical(.stdEndEnv$AgeVar,'Not Used')==FALSE){
.stdEndEnv$UseData<-subset(.stdEndEnv$UseData,.stdEndEnv$UseData[ ,.stdEndEnv$AgeVar] == .stdEndEnv$AgeVal)
}
if (identical(.stdEndEnv$ReplicateVar, 'Not Used')==FALSE){
.stdEndEnv$UseData[ ,.stdEndEnv$ReplicateVar]<-as.factor(.stdEndEnv$UseData[ ,.stdEndEnv$ReplicateVar])
#If there is 1 unit per replicate
if (nlevels(.stdEndEnv$UseData[ ,.stdEndEnv$ReplicateVar]) == dim(.stdEndEnv$UseData)[1]){
.stdEndEnv$ReplicateVar<-'Not Used'
}
}
.stdEndEnv$Results[['LENGTH']]<-autoStdAnylsis(.stdEndEnv$UseData,'LENGTH',.stdEndEnv$TreatmentVar,
'None',.stdEndEnv$WeightsVar.stdEndEnv$WeightsVar,.stdEndEnv$TimeVar,.stdEndEnv$TestDirection,.stdEndEnv$ReplicateVar,
.stdEndEnv$AlphaLevel)
.stdEndEnv$Results[['WEIGHT']]<-autoStdAnylsis(.stdEndEnv$UseData,'WEIGHT',.stdEndEnv$TreatmentVar,
'Log',.stdEndEnv$WeightsVar.stdEndEnv$WeightsVar,.stdEndEnv$TimeVar,.stdEndEnv$TestDirection,.stdEndEnv$ReplicateVar,
.stdEndEnv$AlphaLevel)
.stdEndEnv$Results[['WEIGHT']]$TestType<-'Auto'
.stdEndEnv$Results[['LENGTH']]$TestType<-'Auto'
#Save Results
.stdEndEnv$FileName<-'Example'
.saveResults(.stdEndEnv$Results[['WEIGHT']],LWDir,FileName)
.saveResults(.stdEndEnv$Results[['LENGTH']],LWDir,FileName)
#plots
.stdEndEnv$PlotTypeList<-c('Box','Quantile-Quantile','Violin')
.stdEndEnv$EndPointVar<-c('LENGTH','WEIGHT')
.stdEndEnv$PlotData<-.stdEndEnv$UseData
.saveGraphs(LWDir,FileName)
#Remove vairables
rm(list=ls(.stdEndEnv) ,envir =.stdEndEnv)
##################################################################################
#fecundity
FDir=paste(Folder,'\\Fecundity Example\\Results',sep='')
dir.create(LWDir, showWarnings = FALSE)
FileName<-'Fecundity Example Results'
.stdEndEnv$TimeVar<-'Date'
.stdEndEnv$TimeInt<-21 #Time interval used
.stdEndEnv$TimeIntGraph<-7 #Time interval used for graphing
.stdEndEnv$TimeExcludeVal<-{}
.stdEndEnv$GenerationVar<-'Generation'
.stdEndEnv$GenerationVal<-'F1' #Can be a Character array
.stdEndEnv$GenderVar<-'Not Used'
.stdEndEnv$GenderVal<-'Not Used' #Can be a Character array
.stdEndEnv$AgeVar<-'Not Used'
.stdEndEnv$AgeVal<-'Not Used'
.stdEndEnv$ReplicateVar<-'Rep'
.stdEndEnv$TreatmentVar<-'Treatment'
.stdEndEnv$Format<-"%m/%d/%Y"
.stdEndEnv$Results<-list()
.stdEndEnv$WeightsVar<-'Not Used'
.stdEndEnv$TestDirection<-'Both'
.stdEndEnv$AlphaLevel<-0.05
.stdEndEnv$UseData<-fecundityData
#manually upset data
if (identical(.stdEndEnv$TreatmentVar, 'Not Used')==FALSE){
.stdEndEnv$UseData[ ,.stdEndEnv$TreatmentVar]<-as.factor(.stdEndEnv$UseData[ ,.stdEndEnv$TreatmentVar])
}
if (identical(.stdEndEnv$ReplicateVar, 'Not Used')==FALSE){
.stdEndEnv$UseData[ ,.stdEndEnv$ReplicateVar]<-as.factor(.stdEndEnv$UseData[ ,.stdEndEnv$ReplicateVar])
}
#Apply Subsets
if(identical(.stdEndEnv$GenderVar,'Not Used')==FALSE){
.stdEndEnv$UseData<-subset(.stdEndEnv$UseData,.stdEndEnv$UseData[ ,.stdEndEnv$GenerationVar] == .stdEndEnv$GenerationVal)
}
if(identical(.stdEndEnv$GenderVar,'Not Used')==FALSE){
.stdEndEnv$UseData<-subset(.stdEndEnv$UseData,.stdEndEnv$UseData[ ,.stdEndEnv$GenderVar] == .stdEndEnv$GenderVal)
}
if(identical(.stdEndEnv$AgeVar,'Not Used')==FALSE){
.stdEndEnv$UseData<-subset(.stdEndEnv$UseData,.stdEndEnv$UseData[ ,.stdEndEnv$AgeVar] == .stdEndEnv$AgeVal)
}
if (identical(.stdEndEnv$ReplicateVar, 'Not Used')==FALSE){
.stdEndEnv$UseData[ ,.stdEndEnv$ReplicateVar]<-as.factor(.stdEndEnv$UseData[ ,.stdEndEnv$ReplicateVar])
#If there is 1 unit per replicate
if (nlevels(.stdEndEnv$UseData[ ,.stdEndEnv$ReplicateVar]) == dim(.stdEndEnv$UseData)[1]){
.stdEndEnv$ReplicateVar<-'Not Used'
}
}
.stdEndEnv$DataSub<-.stdEndEnv$UseData #DataSub is used to dynamically change the time interval for graphing
.stdEndEnv$Results[['Fecundity']]<-autoStdAnylsis(.stdEndEnv$UseData,'Fecundity',.stdEndEnv$TreatmentVar,
'Square_Root',.stdEndEnv$WeightsVar.stdEndEnv$WeightsVar,.stdEndEnv$TimeVar,.stdEndEnv$TestDirection,.stdEndEnv$ReplicateVar,
.stdEndEnv$AlphaLevel)
.stdEndEnv$Results[['Fecundity']]$TestType<-'Auto'
.stdEndEnv$FileName<-'Example'
.saveResults(.stdEndEnv$Results[['Fecundity']],FDir,FileName)
#plots
.stdEndEnv$PlotTypeList<-c('Box','Quantile-Quantile','Violin','Interaction')
.stdEndEnv$EndPointVar<-c('Fecundity')
.stdEndEnv$PlotData<-.getTimeData(.stdEndEnv$DataSub,'Date',.stdEndEnv$Format,.stdEndEnv$TimeIntGraph,
.stdEndEnv$ReplicateVar,.stdEndEnv$TreatmentVar,.stdEndEnv$EndPointVar)
colnames(.stdEndEnv$PlotData)[which(colnames(.stdEndEnv$PlotData)=='Averaged_Numeric_Time')]<-'Time'
.stdEndEnv$PlotData$Time<-as.factor(.stdEndEnv$PlotData$Time)
.stdEndEnv$TimeVar<-'Date'
.saveGraphs(FDir,FileName)
rm(list=ls(.stdEndEnv) ,envir =.stdEndEnv)
##################################################################################
#Time to Effect
Dir=paste(Folder,'\\Time to Event Example\\Results',sep='')
FileName<-'Time to Effect Sample Results'
dir.create(Dir, showWarnings = FALSE)
#Set globals that the function are reliant on
.time2EventEnv$TreatmentVar<-'Trt'
.time2EventEnv$ReplicateVar<-'Rep'
.time2EventEnv$TimeVar<-'Time'
.time2EventEnv$StatusVar<-'Status'
.time2EventEnv$StatusEventVal<-'1'
.time2EventEnv$StatusCenVal<-'0'
.time2EventEnv$CanRun<-FALSE
.time2EventEnv$TimeTemp<-{}
.time2EventEnv$Added.StatusVal<-0 #Used to control the control boxes
.time2EventEnv$GenderVar<-'Not Used'
.time2EventEnv$GenerationVar<-'Not Used'
.time2EventEnv$UseData<-eventTimeData
.time2EventEnv$Results<-analyseTime2Effect(.time2EventEnv$UseData,.time2EventEnv$StatusVar,.time2EventEnv$TimeVar,
.time2EventEnv$TreatmentVar,.time2EventEnv$ReplicateVar)
#Save Results
.saveResults.te(.time2EventEnv$Results,Dir,FileName)
.saveGraphs.te(Dir,FileName)
#Remove globals
rm(list=ls(.time2EventEnv) ,envir =.stdEndEnv)
}
selectPara<-function (VarName, LabelName = NULL, Enviro, What = NULL, Mult = FALSE,
Display = NULL)
{
if (is.null(What) == TRUE) {
Word <- strsplit(VarName, split = "")[[1]]
Type <- paste0(Word[{
length(Word) - 2
}:length(Word)], collapse = "")
if (identical(Type, "Var") == TRUE) {
Varaibles <- c("Not Used", colnames(get("MainData",
envir = get(Enviro))))
if (is.null(Display) == TRUE) {
Display <- paste(paste0(Word[1:{
length(Word) - 3
}], collapse = ""), "Variable")
}
}
if (identical(Type, "Val") == TRUE) {
Word[length(Word)] <- "r"
From <- paste0(Word, collapse = "")
Choices <- levels(as.factor(get("MainData", envir = get(Enviro))[,
get(From, get(Enviro))]))
Varaibles <- c("Not Used", Choices)
if (is.null(Display) == TRUE) {
Display <- paste(paste0(Word[1:{
length(Word) - 3
}], collapse = ""), "Value")
}
}
}
else {
Varaibles <- c("Not Used", What)
}
SelectWindow <- gwindow(paste("Please select the", Display),
visible = FALSE)
group <- ggroup(horizontal = FALSE, container = SelectWindow,
spacing = 20)
SubSetSelect <- gtable(Varaibles, container = group, expand = TRUE,
multiple = Mult)
SelectButton <- gbutton("Select", container = group, handler = function(h,
...) {
assign(VarName, SubSetSelect[svalue(SubSetSelect, index = TRUE),
], envir = get(Enviro))
if (Mult == TRUE) {
if (is.null(LabelName) == FALSE) {
try(temp <- get(LabelName, envir = get(Enviro)))
try(svalue(temp) <- "Multiple Values")
LabelName <- NULL
}
}
if (is.null(LabelName) == FALSE) {
try(tempVar <- get(VarName, envir = get(Enviro)),
silent = TRUE)
try(tempWig <- get(LabelName, envir = get(Enviro)),
silent = TRUE)
try(svalue(tempWig) <- tempVar)
}
dispose(SelectWindow)
})
addHandlerUnrealize(SelectWindow, handler = function(h, ...) {
assign(VarName, "Not Used", envir = get(Enviro))
})
visible(SelectWindow) <- TRUE
return()
}
|
981410379305a738acfdf258e8c83f15d544c51c | aa5903e21db7f9f9720fcad70039c83111cae6c1 | /gis-visualization/winter-2019/R/leaflet-example/app.R | 960e4a577104cba8442a84213e5d1ef751c342c6 | [] | no_license | chloemhall/workshop-scripts | 0e35081c490daf5f8849e611283c6985c6007c83 | f4ab527469c1026362cf094ab696897d290b63d0 | refs/heads/master | 2023-03-17T19:28:51.805613 | 2019-11-19T20:37:29 | 2019-11-19T20:37:29 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,854 | r | app.R | #
# This is a Shiny web application. You can run the application by clicking
# the 'Run App' button above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(leaflet)
library(spData)
library(dplyr)
# Define UI for application that filters map points based on year and minimum population
ui <- fluidPage(
# Application title
titlePanel("World Population Over Time"),
# Sidebar with a slider input for year, numeric input for population
sidebarLayout(
sidebarPanel(
sliderInput("year",
"Year",
min = 1950,
max = 2030,
step = 5,
sep = "",
value = 1950),
numericInput("pop_min",
"Minimum Population (in millions)",
min = 1,
max = 20,
value = 10)
),
# Show the map and table
mainPanel(
# plotOutput("distPlot"),
leafletOutput("map"),
dataTableOutput("table")
)
)
)
# Define server logic required to draw a map and table
server <- function(input, output) {
output$map <- renderLeaflet({
pop_by_year <- filter(urban_agglomerations,
year == input$year,
population_millions > input$pop_min)
leaflet(data = pop_by_year) %>%
addTiles() %>%
addMarkers()
})
output$table <- renderDataTable({
pop_by_year <- filter(urban_agglomerations,
year == input$year,
population_millions > input$pop_min)
pop_by_year
})
}
# Run the application
shinyApp(ui = ui, server = server) |
3f7f7cd32ba2afd4f8ae0b4a333b3928f127ddc3 | 631e0b190ebb86d3fcf42cadd5518897a1207ab0 | /R/mfa.R | 70a780410ae78c895eb7c4b398d3e640e5ffcf43 | [] | no_license | cran/ade4 | 983e580cb214801b255d74f9f72b1d3a2058726c | 847abbecb91b66d7c8674252bbd0b3d5faaef05f | refs/heads/master | 2023-02-24T02:39:38.784756 | 2023-02-06T13:32:37 | 2023-02-06T13:32:37 | 17,694,266 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 7,761 | r | mfa.R | "mfa" <- function (X, option = c("lambda1", "inertia", "uniform", "internal"),
scannf = TRUE, nf = 3)
{
if (!inherits(X, "ktab"))
stop("object 'ktab' expected")
if (option[1] == "internal") {
if (is.null(X$tabw)) {
warning("Internal weights not found: uniform weigths are used")
option <- "uniform"
}
}
lw <- X$lw
cw <- X$cw
sepan <- sepan(X, nf = 4)
nbloc <- length(sepan$blo)
indicablo <- factor(rep(1:nbloc, sepan$blo))
rank.fac <- factor(rep(1:nbloc, sepan$rank))
ncw <- NULL
tab.names <- names(X)[1:nbloc]
auxinames <- ktab.util.names(X)
option <- match.arg(option)
if (option == "lambda1") {
for (i in 1:nbloc) {
ncw <- c(ncw, rep(1/sepan$Eig[rank.fac == i][1],
sepan$blo[i]))
}
}
else if (option == "inertia") {
for (i in 1:nbloc) {
ncw <- c(ncw, rep(1/sum(sepan$Eig[rank.fac == i]),
sepan$blo[i]))
}
}
else if (option == "uniform")
ncw <- rep(1, sum(sepan$blo))
else if (option == "internal")
ncw <- rep(X$tabw, sepan$blo)
ncw <- cw * ncw
tab <- X[[1]]
for (i in 2:nbloc) {
tab <- cbind.data.frame(tab, X[[i]])
}
names(tab) <- auxinames$col
anaco <- as.dudi(tab, col.w = ncw, row.w = lw, nf = nf, scannf = scannf,
call = match.call(), type = "mfa")
nf <- anaco$nf
afm <- list()
afm$tab.names <- names(X)[1:nbloc]
afm$blo <- X$blo
afm$TL <- X$TL
afm$TC <- X$TC
afm$T4 <- X$T4
afm$tab <- anaco$tab
afm$eig <- anaco$eig
afm$rank <- anaco$rank
afm$li <- anaco$li
afm$l1 <- anaco$l1
afm$nf <- anaco$nf
afm$lw <- anaco$lw
afm$cw <- anaco$cw
afm$co <- anaco$co
afm$c1 <- anaco$c1
projiner <- function(xk, qk, d, z) {
w7 <- t(as.matrix(xk) * d) %*% as.matrix(z)
iner <- apply(w7 * w7 * qk, 2, sum)
return(iner)
}
link <- matrix(0, nbloc, nf)
for (k in 1:nbloc) {
xk <- X[[k]]
q <- ncw[indicablo == k]
link[k, ] <- projiner(xk, q, lw, anaco$l1)
}
link <- as.data.frame(link)
names(link) <- paste("Comp", 1:nf, sep = "")
row.names(link) <- tab.names
afm$link <- link
w <- matrix(0, nbloc * 4, nf)
i1 <- 0
i2 <- 0
matl1 <- as.matrix(afm$l1)
for (k in 1:nbloc) {
i1 <- i2 + 1
i2 <- i2 + 4
tab <- as.matrix(sepan$L1[sepan$TL[, 1] == levels(sepan$TL[,1])[k], ])
if (ncol(tab) > 4)
tab <- tab[, 1:4]
if (ncol(tab) < 4)
tab <- cbind(tab, matrix(0, nrow(tab), 4 - ncol(tab)))
tab <- t(tab * lw) %*% matl1
for (i in 1:min(nf, 4)) {
if (tab[i, i] < 0) {
for (j in 1:nf) tab[i, j] <- -tab[i, j]
}
}
w[i1:i2, ] <- tab
}
w <- data.frame(w)
names(w) <- paste("Comp", 1:nf, sep = "")
row.names(w) <- auxinames$tab
afm$T4comp <- w
w <- matrix(0, nrow(sepan$TL), ncol = nf)
i1 <- 0
i2 <- 0
for (k in 1:nbloc) {
i1 <- i2 + 1
i2 <- i2 + length(lw)
qk <- ncw[indicablo == k]
xk <- as.matrix(X[[k]])
w[i1:i2, ] <- (xk %*% (qk * t(xk))) %*% (matl1 * lw)
}
w <- data.frame(w)
row.names(w) <- auxinames$row
names(w) <- paste("Fac", 1:nf, sep = "")
afm$lisup <- w
afm$tabw <- X$tabw
afm$call <- match.call()
class(afm) <- c("mfa", "list")
return(afm)
}
"plot.mfa" <- function (x, xax = 1, yax = 2, option.plot = 1:4, ...) {
if (!inherits(x, "mfa"))
stop("Object of type 'mfa' expected")
nf <- x$nf
if (xax > nf)
stop("Non convenient xax")
if (yax > nf)
stop("Non convenient yax")
opar <- par(mar = par("mar"), mfrow = par("mfrow"), xpd = par("xpd"))
on.exit(par(opar))
mfrow <- n2mfrow(length(option.plot))
par(mfrow = mfrow)
for (j in option.plot) {
if (j == 1) {
coolig <- x$lisup[, c(xax, yax)]
s.class(coolig, fac = as.factor(x$TL[, 2]),
label = row.names(x$li), cellipse = 0, sub = "Row projection",
csub = 1.5)
add.scatter.eig(x$eig, x$nf, xax, yax, posi = "topleft",
ratio = 1/5)
}
if (j == 2) {
coocol <- x$co[, c(xax, yax)]
s.arrow(coocol, sub = "Col projection", csub = 1.5)
add.scatter.eig(x$eig, x$nf, xax, yax, posi = "topleft",
ratio = 1/5)
}
if (j == 3) {
s.corcircle(x$T4comp[x$T4[, 2] == levels(x$T4[,2])[1], ],
fullcircle = FALSE, sub = "Component projection", possub = "topright",
csub = 1.5)
add.scatter.eig(x$eig, x$nf, xax, yax, posi = "bottomleft",
ratio = 1/5)
}
if (j == 4) {
plot(x$link[, c(xax, yax)])
scatterutil.grid(0)
title(main = "Link")
par(xpd = TRUE)
scatterutil.eti(x$link[, xax], x$link[, yax],
label = row.names(x$link), clabel = 1)
}
if (j == 5) {
scatterutil.eigen(x$eig, wsel = 1:x$nf, sub = "Eigen values",
csub = 2, possub = "topright")
}
}
}
"print.mfa" <- function (x, ...) {
if (!inherits(x, "mfa"))
stop("non convenient data")
cat("Multiple Factorial Analysis\n")
cat(paste("list of class", class(x)))
cat("\n$call: ")
print(x$call)
cat("$nf:", x$nf, "axis-components saved\n\n")
sumry <- array("", c(6, 4), list(1:6, c("vector", "length",
"mode", "content")))
sumry[1, ] <- c("$tab.names", length(x$tab.names), mode(x$tab.names),
"tab names")
sumry[2, ] <- c("$blo", length(x$blo), mode(x$blo), "column number")
sumry[3, ] <- c("$rank", length(x$rank), mode(x$rank),
"tab rank")
sumry[4, ] <- c("$eig", length(x$eig), mode(x$eig), "eigen values")
sumry[5, ] <- c("$lw", length(x$lw), mode(x$lw), "row weights")
sumry[6, ] <- c("$tabw", length(x$tabw), mode(x$tabw),
"array weights")
print(sumry, quote = FALSE)
cat("\n")
sumry <- array("", c(11, 4), list(1:11, c("data.frame", "nrow",
"ncol", "content")))
sumry[1, ] <- c("$tab", nrow(x$tab), ncol(x$tab), "modified array")
sumry[2, ] <- c("$li", nrow(x$li), ncol(x$li), "row coordinates")
sumry[3, ] <- c("$l1", nrow(x$l1), ncol(x$l1), "row normed scores")
sumry[4, ] <- c("$co", nrow(x$co), ncol(x$co), "column coordinates")
sumry[5, ] <- c("$c1", nrow(x$c1), ncol(x$c1), "column normed scores")
sumry[6, ] <- c("$lisup", nrow(x$lisup), ncol(x$lisup),
"row coordinates from each table")
sumry[7, ] <- c("$TL", nrow(x$TL), ncol(x$TL), "factors for li l1")
sumry[8, ] <- c("$TC", nrow(x$TC), ncol(x$TC), "factors for co c1")
sumry[9, ] <- c("$T4", nrow(x$T4), ncol(x$T4), "factors for T4comp")
sumry[10, ] <- c("$T4comp", nrow(x$T4comp), ncol(x$T4comp),
"component projection")
sumry[11, ] <- c("$link", nrow(x$link), ncol(x$link),
"link array-total")
print(sumry, quote = FALSE)
cat("other elements: ")
if (length(names(x)) > 19)
cat(names(x)[20:(length(mfa))], "\n")
else cat("NULL\n")
}
"summary.mfa" <- function (object, ...) {
if (!inherits(object, "mfa"))
stop("non convenient data")
cat("Multiple Factorial Analysis\n")
cat("rows:", nrow(object$tab), "columns:", ncol(object$tab))
l0 <- length(object$eig)
cat("\n\n$eig:", l0, "eigen values\n")
cat(signif(object$eig, 4)[1:(min(5, l0))])
if (l0 > 5)
cat(" ...\n")
else cat("\n")
}
|
49bb555956e362007aaf7fa563b6fe200372fb49 | c48e5f42f154a44c5d08142fea2ff34ea8380535 | /inst/bin/gwasio | 35e7f11a79e7584a7801a1fb46d962dba0b5e00c | [] | no_license | aaronwolen/gwasio | 1da1b1ce80c9186131e7e6f923450f73490197c3 | 7e2a3b31e2b19fbf8361af783b070cb4759093ea | refs/heads/master | 2021-03-30T16:00:26.357106 | 2019-12-08T23:16:45 | 2019-12-08T23:16:45 | 67,529,775 | 0 | 1 | null | 2018-01-02T15:05:54 | 2016-09-06T17:11:34 | R | UTF-8 | R | false | false | 684 | gwasio | #!/usr/bin/env Rscript
local({
p = commandArgs(TRUE)
if (length(p) == 0L || any(c('-h', '--help') %in% p)) {
message('usage: gwasio input [-o output]
-h, --help to print help messages
-o output filename(s) for knit()')
q('no')
}
library(gwasio)
o = match('-o', p)
if (is.na(o)) {
output = paste0(tools::file_path_sans_ext(p[1]), ".csv")
} else {
output = tail(p, length(p) - o)
p = head(p, o - 1L)
}
if (length(p) == 0L) stop('No input file provided')
if (!file.exists(p[1])) stop(p[1], 'does not exist')
gwas <- gwasio::read_gwas(p[1])
data.table::fwrite(gwas, path = output, na = "NA", quote = FALSE)
})
| |
4b7041df9d0cef0206cf890c6f4921c27a9c68f9 | 056054deb7225c29388ffb7b4011e67ded8e5d89 | /man/vbglmss.fit.logistic.X.Rd | b7b38b7cb03973e392c5f0a59d7df559ada9561e | [] | no_license | antiphon/vbss | b47c7057c4d3585e085b3dd90ac7078ebc50f2ca | 5e99e04c711c2a873fd2d63679bfc7a66919b4cb | refs/heads/master | 2020-05-31T17:23:11.097733 | 2013-09-25T06:21:34 | 2013-09-25T06:21:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 458 | rd | vbglmss.fit.logistic.X.Rd | \name{vbglmss.fit.logistic.X}
\alias{vbglmss.fit.logistic.X}
\title{Fit logistic regression with only SS coefficients.}
\usage{
vbglmss.fit.logistic.X(y, X, prior, cat2, eps = 0.01,
...)
}
\arguments{
\item{y}{binary response.}
\item{X}{the SS covariates.}
\item{prior}{a list of priors}
\item{cat2}{printer}
\item{eps}{the approximation loop convergence threshold}
}
\description{
Fit logistic regression with only SS coefficients.
}
|
f02c55e6ef1d849e57bf49012379c0c7c94538f9 | ca7ffd847a0af6c75e18995984eadf54c51e8e3a | /R/translator.R | 7641ac67d516543696cb7740878a1000a047fc48 | [
"MIT"
] | permissive | ClaraRC/shiny.i18n | 06de7539eecd6ee7b342a406471ce734eac657a3 | 992df3a89c469c281e1b91546f20e146fff2cbd8 | refs/heads/master | 2020-03-27T23:30:00.408717 | 2018-08-03T13:47:16 | 2018-08-03T13:47:16 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,252 | r | translator.R | #' Translator options
.translator_options <- list(
cultural_bignumer_mark = NULL,
cultural_punctuation_mark = NULL,
cultural_date_format = NULL
)
#' Translator class
#'
#' @field languages character vector with all languages
#' @field options list with options from configuration file
#' @field translations data.frame with translations
#' @field translation_language character current translation language
#' @field mode determines whether data was read from "csv" or "json" files.
#'
#' @return Translator object (for all possible methods look at Methods section)
#'
#' @import jsonlite
#' @export Translator
#' @exportClass Translator
#'
#' @examples
#' \dontrun{
#' i18n <- Translator(translation_json_path = "data/translation.json")
#' i18n$set_translation_language("it")
#' i18n$t("This text will be translated to italian")
#' }
#' @name translator
#' @rdname translator
Translator <- setRefClass(
"Translator",
fields = list(
languages = "character",
translation_language = "character",
options = "list",
translations = "data.frame",
mode = "character"
)
)
Translator$methods(
initialize = function(translation_csvs_path = NULL,
translation_json_path = NULL,
translation_csv_config = NULL) {
options <<- .translator_options
if (!is.null(translation_csvs_path) && !is.null(translation_json_path))
stop(paste("Arguments 'translation_csvs_path' and",
"'translation_json_path' are mutually exclusive."))
else if (!is.null(translation_csvs_path))
.read_csv(translation_csvs_path, translation_csv_config)
else if (!is.null(translation_json_path))
.read_json(translation_json_path)
else
stop("You must provide either translation json or csv files.")
translation_language <<- character(0)
},
.read_json = function(translation_file, key_translation) {
mode <<- "json"
# TODO validate format of a json translation_file
# Update the list of options, or take a default from config.
json_data <- jsonlite::fromJSON(translation_file)
common_fields <- intersect(names(json_data), names(options))
options <<- modifyList(options, json_data[common_fields])
languages <<- as.vector(json_data$languages)
key_translation <- languages[1]
# To make sure that key translation is always first in vector
languages <<- unique(c(key_translation, languages))
translations <<- column_to_row(json_data$translation, key_translation)
},
.read_csv = function(translation_path,
translation_csv_config) {
mode <<- "csv"
local_config <- load_local_config(translation_csv_config)
options <<- modifyList(options, local_config)
tmp_translation <- read_and_merge_csvs(translation_path)
languages <<- as.vector(colnames(tmp_translation))
key_translation <- languages[1]
translations <<- column_to_row(tmp_translation, key_translation)
},
translate = function(keyword) {
"Translates 'keyword' to language specified by 'set_translation_language'"
if (identical(translation_language, character(0)))
return(keyword)
tr <- as.character(translations[keyword, translation_language])
if (is.na(tr)){
warning(sprintf("'%s' translation does not exist.", keyword))
tr <- keyword
}
tr
},
t = function(keyword) {
"Wrapper method. Look at 'translate'"
translate(keyword)
},
set_translation_language = function(transl_language) {
"Specify language of translation. It must exist in 'languages' field."
if (!(transl_language %in% languages))
stop(sprintf("'%s' not in Translator object languages",
transl_language))
key_translation <- languages[1]
if (transl_language == key_translation)
translation_language <<- character(0)
else
translation_language <<- transl_language
},
parse_date = function(date) {
"Parse date to format described in 'cultural_date_format' field in config."
format(as.Date(date), format = options$cultural_date_format)
},
parse_number = function(number) {
"Parse numbers (to be implemented)."
# TODO numbers parsing
warning("This is not implemented yet. Sorry!")
number
}
)
|
bdcf854799e6451b7d72ff7e70c5e2d6c9da7e58 | caa6fc35f1803dfc319f41ed738d6fbd4156d951 | /060-retirement-simulation/server.r | 5c761ca04c69757f3470191f1baafb2650db2873 | [] | no_license | dougsanders79/shiny-examples | 8dea4534ec88d03e391f15aadbc072653fe8b710 | 8b1c940e2a38671204e7aa0ab219d0a0246d1166 | refs/heads/master | 2021-01-14T12:45:46.101981 | 2015-11-16T04:43:18 | 2015-11-16T04:43:18 | 45,952,337 | 0 | 0 | null | 2015-11-11T01:32:06 | 2015-11-11T01:32:06 | null | UTF-8 | R | false | false | 7,887 | r | server.r | library(shiny)
paramNames <- c("start_capital", "annual_mean_return", "annual_ret_std_dev",
"annual_inflation", "annual_inf_std_dev", "monthly_withdrawals", "n_obs",
"n_sim", "monthly_additions_br", "annual_mean_return_br", "n_obs_br")
# simulate_nav <- function(start_capital = 2000000, annual_mean_return = 5.0,
# annual_ret_std_dev = 7.0, annual_inflation = 2.5,
# annual_inf_std_dev = 1.5, monthly_withdrawals = 1000,
# n_obs = 20, n_sim = 200, monthly_additions_br = 1000,
# annual_mean_return_br = 5.0, n_obs_br = 20
# )
# Define server logic required to generate and plot a random distribution
#
# Idea and original code by Pierre Chretien
# Small updates by Michael Kapler
#
shinyServer(function(input, output, session) {
getParams <- function(prefix) {
input[[paste0(prefix, "_recalc")]]
params <- lapply(paramNames, function(p) {
input[[paste0(prefix, "_", p)]]
})
names(params) <- paramNames
params
}
# Function that generates scenarios and computes NAV. The expression
# is wrapped in a call to reactive to indicate that:
#
# 1) It is "reactive" and therefore should be automatically
# re-executed when inputs change
#
navA <- reactive(do.call(simulate_nav, getParams("a")))
# navA <- simulate_nav()
navB <- reactive(do.call(simulate_nav, getParams("b")))
# Expression that plot NAV paths. The expression
# is wrapped in a call to renderPlot to indicate that:
#
# 1) It is "reactive" and therefore should be automatically
# re-executed when inputs change
# 2) Its output type is a plot
output$a_distPlot <- renderPlot({
plot_nav(navA()[[1]])
})
output$b_distPlot <- renderPlot({
plot_nav(navB()[[1]])
})
output$a_retirement_table_br <- DT::renderDataTable({ navA()[[2]] })
output$a_retirement_table_ar <- DT::renderDataTable({ navA()[[3]] })
output$b_retirement_table_br <- DT::renderDataTable({ navB()[[2]] })
output$b_retirement_table_ar <- DT::renderDataTable({ navB()[[3]] })
})
simulate_nav <- function(start_capital = 2000000, annual_mean_return = 5.0,
annual_ret_std_dev = 7.0, annual_inflation = 2.5,
annual_inf_std_dev = 1.5, monthly_withdrawals = 1000,
n_obs = 10, n_sim = 10, monthly_additions_br = 1000,
annual_mean_return_br = 5.0, n_obs_br = 5
) {
#-------------------------------------
# Inputs
#-------------------------------------
# Initial capital
start.capital = start_capital
# Investment
annual.mean.return = annual_mean_return / 100
annual.mean.return.br = annual_mean_return_br / 100
annual.ret.std.dev = annual_ret_std_dev / 100
# Inflation
annual.inflation = annual_inflation / 100
annual.inf.std.dev = annual_inf_std_dev / 100
# Withdrawals
monthly.withdrawals = monthly_withdrawals
monthly.additions.br = monthly_additions_br
# Number of observations (in Years)
n.obs = n_obs
n.obs.br = n_obs_br
# Number of simulations
n.sim = n_sim
#-------------------------------------
# Simulation
#-------------------------------------
# number of months to simulate
n.obs = 12 * n.obs
n.obs.br = 12 * n.obs.br
# monthly Investment and Inflation assumptions
monthly.mean.return = annual.mean.return / 12
monthly.mean.return.br = annual.mean.return.br / 12
monthly.ret.std.dev = annual.ret.std.dev / sqrt(12)
monthly.inflation = annual.inflation / 12
monthly.inf.std.dev = annual.inf.std.dev / sqrt(12)
# simulate Returns # n.obs.br <- 20; n.obs <- 20; n.sim <- 20; monthly.mean.return.br <- .004
monthly.invest.returns.br = matrix(0, n.obs.br, n.sim)
monthly.invest.returns = matrix(0, n.obs, n.sim)
monthly.inflation.returns = matrix(0, n.obs, n.sim)
monthly.inflation.returns.br = matrix(0, n.obs.br, n.sim)
monthly.invest.returns.br12 = matrix(0, n.obs.br/ 12 , n.sim)
# monthly.mean.return.br <- .004; monthly.ret.std.dev <- (2/100)/ sqrt(12)
# monthly.inflation <- .03/12; monthly.inf.std.dev <- (1/100)/ sqrt(12)
monthly.invest.returns.br[] = rnorm(n.obs.br * n.sim, mean = monthly.mean.return.br, sd = monthly.ret.std.dev)
monthly.invest.returns[] = rnorm(n.obs * n.sim, mean = monthly.mean.return, sd = monthly.ret.std.dev)
monthly.inflation.returns[] = rnorm(n.obs * n.sim, mean = monthly.inflation, sd = monthly.inf.std.dev)
monthly.inflation.returns.br[] = rnorm(n.obs.br * n.sim, mean = monthly.inflation, sd = monthly.inf.std.dev)
annualize_returns <- function(M){
rowsM <- nrow(M)
M12 = matrix(0, rowsM/ 12 , n.sim)
for(i in 1: n.sim){
for(k in 1: rowsM/ 12 ){
value <- 1.0
for(j in 1:12){
value <- value*(M[(k-1)*12 + j , i] + 1 )
}
M12[k,i] = round((value - 1)* 100, digits = 1)
}
}
return(M12)
}
monthly.invest.returns.br12 <- annualize_returns(monthly.invest.returns.br - monthly.inflation.returns.br )
monthly.invest.returns.12 <- annualize_returns(monthly.invest.returns - monthly.inflation.returns )
# monthly.invest.returns.br12 = matrix(0, n.obs.br/ 12 , n.sim)
# for(i in 1: n.sim){
# for(k in 1: nrow(monthly.invest.returns.br12) ){
# value <- 1.0
# for(j in 1:12){
# value <- value*(monthly.invest.returns.br[(k-1)*12 + j , i] + 1 )
# }
# monthly.invest.returns.br12[k,i] = round((value - 1)* 100, digits = 1)
# }
# }
# simulate Withdrawals
# start.capital = 2000000;
nav = matrix(start.capital, n.obs.br + n.obs + 1, n.sim)
# take this loop below and create another one that starts at n.obs and fills in for retirement.
# the beauty is that the
if(n.obs.br > 0){
for (j in 1:n.obs.br) {
nav[j + 1, ] = nav[j, ] * (1 + monthly.invest.returns.br[j, ] - monthly.inflation.returns.br[j, ]) + monthly.additions.br
}
}
for (j in 1 :n.obs ) {
nav[j + 1 + n.obs.br , ] = nav[j + n.obs.br , ] * (1 + monthly.invest.returns[j, ] - monthly.inflation.returns[j, ]) - monthly.withdrawals
}
# once nav is below 0 => run out of money
nav[ nav < 0 ] = NA
# convert to millions
nav = nav / 1000000
make_df <- function(M){
DF_i_r <- data.frame(M)
colnums <- seq(1, ncol(DF_i_r), 1)
colnames(DF_i_r) <- as.character(paste0('sim ' , colnums))
return(DF_i_r)
}
DF_invest_returns <- make_df(monthly.invest.returns.br12)
DF_invest_returns_ar <- make_df(monthly.invest.returns.12)
# DF_invest_returns <- data.frame(monthly.invest.returns.br12)
# colnums <- seq(1, ncol(DF_invest_returns), 1)
# colnames( DF_invest_returns) <- as.character(paste0('sim ' , colnums))
nav <- list(nav, DF_invest_returns, DF_invest_returns_ar )
return(nav)
}
plot_nav <- function(nav) {
layout(matrix(c(1,2,1,3),2,2))
palette(c("black", "grey50", "grey30", "grey70", "#d9230f"))
# create matrix at annual slices only
rows <- seq(from = 1, to = (nrow(nav))-1 , by = 12)
nav <- nav[rows ,]
# plot all scenarios
matplot(nav,
type = 'l', lwd = 0.5, lty = 1, col = 1:5,
xlab = 'Years', ylab = 'Millions',
main = 'Projected Value of Initial Capital')
# plot % of scenarios that are still paying
p.alive = 1 - rowSums(is.na(nav)) / ncol(nav)
plot(100 * p.alive, las = 1, xlab = 'Years', ylab = 'Percentage Paying',
main = 'Percentage of Paying Scenarios', ylim=c(0,100))
grid()
last.period = nrow(nav)
# plot distribution of final wealth
final.nav = nav[last.period, ]
final.nav = final.nav[!is.na(final.nav)]
if(length(final.nav) == 0) return()
plot(density(final.nav, from=0, to=max(final.nav)), las = 1, xlab = 'Final Capital',
main = paste0('Distribution of Final Capital\n', 100 * p.alive[last.period], '% are still paying'))
grid()
}
|
6abd061e5034974efe523d047834f507ecd4fa76 | 765c4ced3ddb8eb8d86fdce28f2abe9017b9deec | /biomod2_brisca_version/biomod2/R/FilteringTransformation.R | a5563c088ecb9d53d59c04a673b38008527e8db9 | [] | no_license | DamienGeorges/briscahub | 1e83b60edc24b3b5de927cbcfe96f1964270bd08 | b3443c80a086fe062ad8a0b1e714ca4431ca4099 | refs/heads/master | 2020-05-21T22:34:00.090074 | 2017-07-07T16:06:02 | 2017-07-07T16:06:02 | 42,178,363 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 3,205 | r | FilteringTransformation.R | # `FilteringTransformation` <-
# function(ProbData, CutOffdata)
# {
# if(is.data.frame(ProbData)) {
# N <- dim(ProbData)[2]
# i <- 1
# while(i <= N) {
# if(sum(ProbData[,i])!=0) ProbData[ProbData[,i] < CutOffdata[i, 1],i] <- 0
# i <- i + 1
# }
# }
# else if(sum(ProbData) != 0) ProbData[ProbData < CutOffdata] <- 0
#
# return(ProbData)
# }
#
# FilteringTransformation_v2 <-
# function(ProbData, CutOff){
# ProbData[ProbData < CutOff] <- 0
# return(ProbData)
# }
setGeneric("FilteringTransformation",
function(data, threshold){
standardGeneric("FilteringTransformation")
})
setMethod('FilteringTransformation', signature(data='data.frame'),
function(data, threshold)
{
data <- data.matrix(data)
data[t(t(data)<threshold)] <-0
## check if some thresolds are NAs
if(any(is.na(threshold))){
data[,is.na(threshold)] <- NA
}
if(ncol(data)==1) data <- data[,1]
return(data)
})
setMethod('FilteringTransformation', signature(data='matrix'),
function(data, threshold)
{
data <- as.data.frame(data)
return(FilteringTransformation(data, threshold))
})
setMethod('FilteringTransformation', signature(data='numeric'),
function(data, threshold)
{
data <- as.data.frame(data)
return(FilteringTransformation(data, threshold))
})
setMethod('FilteringTransformation', signature(data='array'),
function(data, threshold)
{
if(length(dim(data)) == length(dim(threshold))){
if(sum( dim(data)[-1] != dim(threshold)[-1] ) > 0 ){
stop("data and threshold dimentions mismatch")
}
} else{
if(sum( dim(data)[-1] != dim(threshold) ) > 0 ){
stop("data and threshold dimentions mismatch")
}
}
return(sweep(data,2:length(dim(data)),threshold,
function(x,y) {
if(!is.na(x)){
return(ifelse(x>y,x,0))
} else {
return(rep(NA,length(x)) )}
}))
})
setMethod('FilteringTransformation', signature(data='RasterLayer'),
function(data, threshold)
{
if(!is.na(threshold)){
return(reclassify(data,c(-Inf,threshold,0)))
} else{ ## return a empty map (NA everywhere)
return(reclassify(data,c(-Inf,Inf,NA)))
}
})
setMethod('FilteringTransformation', signature(data='RasterStack'),
function(data, threshold)
{
if(length(threshold) == 1){
threshold <- rep(threshold, raster::nlayers(data))
}
StkTmp <- raster::stack()
for(i in 1:raster::nlayers(data)){
StkTmp <- raster::addLayer(StkTmp, FilteringTransformation(raster::subset(data,i,drop=TRUE), threshold[i]))
}
names(StkTmp) <- names(data)
return(StkTmp)
})
setMethod('FilteringTransformation', signature(data='RasterBrick'),
function(data, threshold)
{
data <- raster::stack(data, RAT=FALSE)
return(FilteringTransformation(data, threshold))
}) |
fd8a93bc339d6ef89a69ccb6e849e2cfc1085eed | 4564742526c7278b8317133ae42a4fe93184e130 | /vignettes/old/01_2_location_elp.R | 2cc467ab28c04b5309a40839559ca234068cc038 | [] | no_license | livioivil/eegusta | 0bfd0880cc75198b03e4853584d8b0117ad5957c | 70f6693bbf998b6fa3325f3b43a5177514c0b539 | refs/heads/master | 2023-06-22T00:10:37.768054 | 2023-06-19T14:20:11 | 2023-06-19T14:20:11 | 255,547,295 | 1 | 3 | null | 2022-05-12T20:32:47 | 2020-04-14T08:03:00 | HTML | UTF-8 | R | false | false | 624 | r | 01_2_location_elp.R | rm(list=ls())
setwd("~/hdeeg")
library(eeguana)
library(eegusta)
files=dir("./data_bdf",full.names = TRUE,pattern = "elp$")
# for(i in 1:length(files)){
# print(i)
# res=read_elp(files[i])
# }
locs=lapply(files,read_elp)
LOC=locs[[1]]
for(i in 2:length(locs)){
LOC[,5:7]=LOC[,5:7]+locs[[i]][,5:7]
}
LOC[,5:7]=LOC[,5:7]/length(locs)
D=c()
for(i in 1:length(locs)){
D[i]=sum((LOC[,5:7]-locs[[i]][,5:7])^2)
}
plot(D)
plot(sort(D))
summary(D)
dists=rowSums(locs[[1]][,5:7]-LOC[,5:7])^2
summary(dists)
plot(dists)
locs[[1]][which.max(dists),]
LOC[which.max(dists),]
save(LOC, file="locations_eeguana.Rdata")
|
cd5c89496369d83f039dffdeab9f11614d6c7721 | 1926f167463787636fa4c6350600bcd6ba172a6a | /man/theme_light.Rd | 5415e290e2252bb5e702223c11cc0e5ef16d6f54 | [] | no_license | nietzsche1993/ggplot2 | f82bec898ce5fc8fd225ac7f61b93577b4a2cfc9 | 188c8e02b2a101091af1511b03cb30255ddd9c69 | refs/heads/master | 2020-12-31T02:01:53.263340 | 2014-03-17T21:50:21 | 2014-03-17T21:50:21 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 771 | rd | theme_light.Rd | % Generated by roxygen2 (4.0.0): do not edit by hand
\name{theme_light}
\alias{theme_light}
\title{A theme with white background and light grey lines}
\usage{
theme_light(base_size = 12, base_family = "")
}
\arguments{
\item{base_size}{base font size}
\item{base_family}{base font family}
}
\description{
A theme with white background and light grey lines
}
\examples{
p <- ggplot(mtcars) + geom_point(aes(x = wt, y = mpg))
p
# 'classic' dark-on-light theme
p + theme_bw()
# this theme
p + theme_light()
# variation with dark box and axes legends
p + theme_linedraw()
}
\seealso{
Other themes: \code{\link{theme_bw}};
\code{\link{theme_classic}}; \code{\link{theme_gray}},
\code{\link{theme_grey}}; \code{\link{theme_linedraw}};
\code{\link{theme_minimal}}
}
|
28b153b5901329afbeb391db76661629f17768c4 | 160d17e91f168f19d41fdbda99e58ed42467e2f5 | /Package/R/sfc.editEqus.R | a3915a5c87d167605a577121ededefad91e4d6ac | [
"MIT"
] | permissive | andersonjames492/PKSFC | fa981c0d4bd3d4ef4d96b4297b8fe9f31f956532 | c32ef687dddd1f3b4c8d9d2f66bd00ca1aa95395 | refs/heads/master | 2023-03-20T18:45:34.882228 | 2020-03-15T14:37:10 | 2020-03-15T14:37:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 877 | r | sfc.editEqus.R | #' Edit equations.
#'
#' Edit a series of equations in an sfc object.
#'
#' @param model an sfc object.
#' @param list a list containing lists of the indexs of equations to be changes,
#' the equations to be changed, the initial conditions, the lags and the descriptions of the
#' equations to be edited.
#' @return an sfc object.
#'
#' @export
#' @author Antoine Godin
sfc.editEqus<-function(model=stop("Need a model"),list=stop("Need a list f variables")){
for(i in 1:length(list)){
ind=list[[i]]$ind
var=list[[i]]$var
eq=list[[i]]$eq
desc=list[[i]]$desc
if(is.null(ind)&&is.null(var)){stop("each element of list has to contain an element var or an element ind")}
if(is.null(ind)){ind=NA}
if(is.null(var)){var=NA}
if(is.null(eq)){eq=NA}
if(is.null(desc)){desc=NA}
model<-sfc.editEqu(model,ind,var,eq,desc)
}
return(model)
}
|
2b2e937a724b798c5ee6a73dcf934b58354ffcb9 | 37e0837026bb71e1ffcc7596c1a112e01aa67e84 | /SMRD_3.4.4/doc/echapter14.R | fde1014149c7f5c61827df56bdab278001162578 | [] | no_license | erhard1/stat687 | 1726c873524d099b76c612e9a4064319bff7223a | c908e84139e5f3e80c059490897cf4a0c34da7f3 | refs/heads/master | 2022-01-25T18:25:01.740916 | 2018-08-23T23:34:35 | 2018-08-23T23:34:35 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 12,990 | r | echapter14.R | ## ---- echo = FALSE-------------------------------------------------------
SMRD:::vinny()
library(SMRD)
## ------------------------------------------------------------------------
BearingCage.ld <- frame.to.ld(bearingcage,
response.column = 1,
censor.column = 2,
case.weight.column = 3)
DT::datatable(BearingCage.ld)
## ------------------------------------------------------------------------
bcage.prior.weibull.spec1 <-
specify.simple.prior(p = .01,
qdist = "loguniform",
qlower = 100,
qupper = 5000,
sigma.dist = "lognormal",
sigma.lower = 0.2,
sigma.upper = 0.5,
distribution = "Weibull")
## ------------------------------------------------------------------------
bcage.prior.weibull.spec2 <-
specify.simple.prior(p = .01,
qdist = "loguniform",
qlower = 1000,
qupper = 1400,
sigma.dist = "lognormal",
sigma.lower = 1.5,
sigma.upper = 2.5,
distribution = "Weibull")
## ------------------------------------------------------------------------
bcage.prior.weibull.spec3 <-
specify.simple.prior(p = .01,
qdist = "lognormal",
qlower = 1000,
qupper = 1400,
sigma.dist = "lognormal",
sigma.lower = 1.5,
sigma.upper = 2.5,
distribution = "Weibull")
## ------------------------------------------------------------------------
bcage.prior.lognormal.spec1 <-
specify.simple.prior( p = .04,
qdist = "loguniform",
qlower = 100,
qupper = 5000,
sigma.dist = "lognormal",
sigma.lower = 0.2,
sigma.upper = 5,
distribution = "Lognormal")
## ------------------------------------------------------------------------
bcage.prior.lognormal.spec2 <-
specify.simple.prior(p = .01,
qdist = "loguniform",
qlower = 1000,
qupper = 1400,
sigma.dist = "lognormal",
sigma.lower = 1,
sigma.upper = 1.5,
distribution = "Lognormal")
## ------------------------------------------------------------------------
bcage.prior.lognormal.spec3 <-
specify.simple.prior(p = .01,
qdist = "loguniform",
qlower = 1000,
qupper = 1400,
sigma.dist = "lognormal",
sigma.lower = 1.,
sigma.upper = 1.5,
distribution = "Lognormal")
## ------------------------------------------------------------------------
prior2.bcage <-
make.prior(spec = bcage.prior.lognormal.spec1,
number.in.prior = 3000)
prior.and.post2.bcage <-
get.big.posterior(bcage.prior.lognormal.spec1,
BearingCage.ld)
prior.and.post2.bcage$post[1:10,]
prior.and.post3.bcage <-
make.small.posterior.object(prior.and.post2.bcage)
## ------------------------------------------------------------------------
summarize.posterior.or.prior(prior.and.post2.bcage,
post.or.prior = "post",
task = "Marginals only",
marginal.on.sigma = T,
marginal.on.pos = F,
type.position = "Parameter",
newdata = "mu",
include.likelihood = T)
#quantle marginal
summarize.posterior.or.prior(prior.and.post2.bcage,
post.or.prior = "post",
task = "Marginals only",
marginal.on.sigma = F,
marginal.on.pos = T,
type.position = "Quantile",
newdata = .1,
include.likelihood = T)
#sigma marginal
summarize.posterior.or.prior(prior.and.post2.bcage,
post.or.prior = "post",
task = "Marginals only",
marginal.on.sigma = T,
marginal.on.pos = F,
type.position = "Quantile",
newdata = .1,
include.likelihood = T)
#prob
summarize.posterior.or.prior(prior.and.post2.bcage,
post.or.prior = "post",
task = "Marginals only",
marginal.on.sigma = F,
marginal.on.pos = T,
type.position = "Failure probability",
newdata = 1000,
include.likelihood = T)
#Joint only axes.range.default.post = T
summarize.posterior.or.prior(prior.and.post2.bcage,
post.or.prior = "post",
task = "Joint only",
axes.range.default.post = T,
marginal.on.sigma = F,
marginal.on.pos = F,
type.position = "Failure probability",
newdata = 1000,
include.likelihood = T)
#Joint only
summarize.posterior.or.prior(prior.and.post2.bcage,
post.or.prior = "prior",
task = "Joint only",
marginal.on.sigma = F,
marginal.on.pos = F,
type.position = "Parameter",
newdata = "mu",
include.likelihood = T)
#Joint only
summarize.posterior.or.prior(prior.and.post2.bcage,
post.or.prior = "prior",
task = "Joint only",
marginal.on.sigma = F,
marginal.on.pos = F,
type.position = "Quantile",
newdata = .1,
include.likelihood = T)
#Joint only axes.range.default.post = F
summarize.posterior.or.prior(prior.and.post2.bcage,
post.or.prior = "post",
task = "Joint only",
axes.range.default.post = F,
marginal.on.sigma = F,
marginal.on.pos = F,
type.position = "Failure probability",
newdata = 1000,
include.likelihood = F)
#Joint only axes.range.default.post = F
summarize.posterior.or.prior(prior.and.post2.bcage,
post.or.prior = "prior",
task = "Joint only",
axes.range.default.post = F,
marginal.on.sigma = F,
marginal.on.pos = F,
type.position = "Failure probability",
newdata = 1000,
include.likelihood = F)
summarize.posterior.or.prior(prior.and.post2.bcage,
post.or.prior = "prior",
task = "Joint and Marginal",
marginal.on.sigma = F,
marginal.on.pos = F,
type.position = "Parameter",
newdata = "mu")
summarize.posterior.or.prior(prior.and.post2.bcage,
post.or.prior = "post",
task = "Joint only",
marginal.on.sigma = F,
marginal.on.pos = F,
type.position = "Parameter",
newdata = "mu",
include.likelihood = T)
summarize.posterior.or.prior(prior.and.post2.bcage,
post.or.prior = "prior",
task = "Joint only",
marginal.on.sigma = F,
marginal.on.pos = F,
type.position = "Parameter",
newdata = "mu",
include.likelihood = T)
summarize.posterior.or.prior(prior.and.post2.bcage,
post.or.prior = "prior",
task = "Joint and Marginal",
marginal.on.sigma = F,
marginal.on.pos = F,
type.position = "Quantile",
newdata = .1)
summarize.posterior.or.prior(prior.and.post2.bcage,
post.or.prior = "prior",
task = "Joint and Marginal",
marginal.on.sigma = F,
marginal.on.pos = F,
type.position = "Failure probability",
newdata = 1000)
summarize.posterior.or.prior(prior.and.post2.bcage,
post.or.prior = "prior",
task = "Joint and Marginal",
marginal.on.sigma = F,
marginal.on.pos = F,
type.position = "Failure probability",
newdata = 6000)
summarize.posterior.or.prior(prior.and.post2.bcage,
post.or.prior = "post",
task = "Joint only",
marginal.on.sigma = F,
marginal.on.pos = F,
type.position = "Parameter",
newdata = "mu",
include.likelihood = T)
summarize.posterior.or.prior(prior.and.post2.bcage,
post.or.prior = "post",
task = "Joint and Marginal",
marginal.on.sigma = F,
marginal.on.pos = T,
type.position = "Parameter",
newdata = "mu")
summarize.posterior.or.prior(prior.and.post2.bcage,
post.or.prior = "post",
task = "Joint and Marginal",
marginal.on.sigma = F,
marginal.on.pos = F,
type.position = "Quantile",
newdata = .1)
summarize.posterior.or.prior(prior.and.post2.bcage,post.or.prior = "post",
task = "Joint and Marginal",
marginal.on.sigma = F,
marginal.on.pos = F,
type.position = "Failure probability",
newdata = 1000)
summarize.posterior.or.prior(prior.and.post2.bcage,
post.or.prior = "prior",
task = "Joint and Marginal",
marginal.on.sigma = F,
marginal.on.pos = F,
type.position = "Failure probability",
newdata = 1000)
summarize.posterior.or.prior(prior.and.post2.bcage,
post.or.prior = "post",
task = "Joint and Marginal",
marginal.on.sigma = F,
marginal.on.pos = F,
type.position = "Failure probability",
newdata = 6000)
prior.and.post3.bcage <- make.small.posterior.object(prior.and.post2.bcage)
## ------------------------------------------------------------------------
SMRD:::plot.prediction.order(x = 1,
nsamsize = 3,
prior.and.post2.bcage,
time.range = log(c(50,10000)),
xlab = "Hours")
## ------------------------------------------------------------------------
SMRD:::plot.prediction.order(x = 1,
nsamsize = 50,
prior.and.post2.bcage,
time.range = log(c(10,10000)),
xlab = "Hours")
|
d2a15427944341f41e635cdc01ca406d52d2f3d6 | c403ee9c4caaf1fc4e9aefde7262bc8896f2ab59 | /three_sons.R | 151988cfdac636488ab0e308738aaa4ef7c0105c | [] | no_license | gdemin/Experiments-Snippets | 09b45e0c4410b256b77347eb9eaa6cccf19fca9b | d0085fa44ad30b1350585e78e5046d7557e35ed1 | refs/heads/master | 2021-01-18T22:38:51.937423 | 2016-04-20T21:47:12 | 2016-04-20T21:47:12 | 26,373,292 | 1 | 0 | null | null | null | null | WINDOWS-1251 | R | false | false | 3,433 | r | three_sons.R | # Mathematician with three sons.
# http://orabote.net/feedback/show/id/342281
# Возраст сыновей
# Два математика, не достигшие пенсионного возраста, встретились после долгого
# перерыва. Приведем фрагмент их диалога:
# - Ну, а дети у тебя есть?
# - Три сына.
# - А сколько им лет?
# - Если перемножить, будет как раз твой возраст.
# - (После размышления.) Мне этих данных недостаточно.
# - Если сложить их возраст, получится сегодняшнее число.
# - (Вновь после размышления.). Все еще не понимаю.
# - Кстати, средний сын любит танцевать.
# - Понял.
# А Вы можете определить возраст каждого из сыновей?
library(dplyr)
library(magrittr)
# все комбинации возрастов сыновей, возраст не может быть больше 44
a = expand.grid(x=1:44,y=1:44,z=1:44) %>%
filter(y>=x & z>=y) # удаляем комбинации, которые отличаются только перестановкой
b = a %>% mutate(p = x*y*z, s=x+y+z) %>%
filter(p>17 & p<60) %>% # возраст математика меньше 60
group_by(p) %>% mutate(count_p = n()) %>%
filter(count_p>1) %>% # не смог ответить однозначно после информации про произведение
filter(s<32) %>% # дата - число, меньшее 32
group_by(p, s) %>% mutate(count_ps = n()) %>%
filter(count_ps>1) %>% # не смог ответить однозначно после информации про сумму
filter(y>x & z>y) %>% # сыновья разного возраста (из того, что средний любит танцевать)
group_by(p, s) %>% mutate(count_ps = n()) %>% filter(count_ps==1) # после этой информации он смог назвать возраст
b # 1,5,8
# нашел в интернете эту же задачу, там указывается дата - 14.
# все комбинации возрастов сыновей, сумма возраст не может быть больше 44
a = expand.grid(x=1:44,y=1:44,z=1:44) %>%
filter(y>=x & z>=y) # удаляем комбинации, которые отличаются только перестановкой
b = a %>% mutate(p = x*y*z, s=x+y+z) %>%
filter(p>17 & p<60) %>% # возраст математика меньше 60
group_by(p) %>% mutate(count_p = n()) %>%
filter(count_p>1) %>% # не смог ответить однозначно после информации про произведение
filter(s==14) %>% # дата - 14 число
group_by(p, s) %>% mutate(count_ps = n()) %>%
filter(count_ps>1) %>% # не смог ответить однозначно после информации про сумму
filter(y>x & z>y) %>% # сыновья разного возраста
group_by(p, s) %>% mutate(count_ps = n()) %>% filter(count_ps==1) # после этой информации он смог назвать возраст
b # 1,5,8 |
dd89ff9da7591bafd546c2ca398e2bba4ad60cc7 | 0a1d55418d5185ed4d61ef17785a58addf88c56a | /R/api_get.R | 75555498711296d7f4b6872dcb49c7419dd81a57 | [] | no_license | rupertoverall/COVIDminerAPI | 707fe24698ec4b5131874bdb888cbdce62fc2363 | 1b0522f169c1d52c8178abcebdf0463440c55df1 | refs/heads/master | 2023-03-13T17:19:57.145036 | 2021-03-06T10:06:28 | 2021-03-06T10:06:28 | 344,797,447 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,312 | r | api_get.R | #' Get interactions from the COVIDminer database.
#'
#' This function takes a vector of COVIDminer database IDs and allows networks
#' containing these entities to be retrieved.
#'
#' @param query A character vector of text descriptors of database IDs. See
#' Details.
#' @param filters A character vector of filters. Default is \code{all}, which
#' will not apply any filters. A good option for general use is
#' \code{default}, which will only retrieve SARS-COV-2 primary literature.
#' @param level An integer describing the network level that is to be returned.
#' Either 0 (only the entities specified in \code{query}) or 1 (the query entities and their first neighbours; the default).
#' @param unmapped Logical. Should unmapped entities be used?
#' @param meta Logical. If \code{TRUE}, duplicate interactions will be merged
#' into a single directed edge. The results column \code{edge_weight} contains
#' the number of edges that were merged. Default is to show each interaction
#' separately (so that literature source information is retained).
#' @param sort Logical. Should the result be sorted. Currently not implemented.
#' @param timeout An integer specifying the timeout in seconds for the http
#' query.
#' @param show.url Logical. Should the URL be displayed in the console (useful
#' for debugging).
#'
#' @return A nested \code{list} object with top level elements; query (details
#' of the query as interpreted by the server), results (a \code{data.frame}
#' with results for each query element).
#'
#' @examples
#'
#' search.response <- COVIDminerAPI::api_search(query = c("bdnf", "autophagy"))
#' best.matches <- search.response$results$`best-match`
#'
#' get.response <- COVIDminerAPI::api_get(query = best.matches, filters = "default")
#' edge.list <- get.response$results[, c("source", "relation", "target")]
#'
#' @importFrom httr modify_url
#' @importFrom jsonlite fromJSON
#' @importFrom RCurl getURL
#' @importFrom stats setNames
#' @importFrom utils URLencode setTxtProgressBar txtProgressBar
#' @export
api_get = function(
query,
filters = "all",
level = 1,
unmapped = TRUE,
meta = FALSE,
sort = FALSE,
timeout = 100,
show.url = FALSE
){
# These options cannot be user set (or they would break the code).
api.url = "https://rupertoverall.net/covidminer/api/get"
return = "json"
# Collapse query terms into a comma-separated string for the URL.
queries = sapply(query, utils::URLencode, reserved = TRUE)
# Check for clean input.
clean = TRUE
if(query[1] == ""){
clean = FALSE
warning("The 'query' parameter cannot be empty.")
}
if(!as.numeric(level) %in% c(0, 1)){
clean = FALSE
warning("The 'level' parameter must be either 1, 0.")
}
if(!as.numeric(meta) %in% c(0, 1)){
clean = FALSE
warning("The 'meta' parameter must be either 1, 0, TRUE or FALSE.")
}
if(!as.numeric(unmapped) %in% c(0, 1)){
clean = FALSE
warning("The 'unmapped' parameter must be either 1, 0, TRUE or FALSE.")
}
if(!as.numeric(sort) %in% c(0)){
#clean = FALSE
#warning("The 'sort' parameter must be either 1, 0, TRUE or FALSE.")
warning("The 'sort' parameter is not yet implemented")
}
# Abort if input not clean
if(!clean){
#
return(NULL)
}else{
# Construct the API call and retrieve results.
json = tryCatch({
url = httr::modify_url(
paste0(api.url),
query = list(
ids = I(paste(queries, collapse = ",")),
filters = paste(filters, collapse = ","),
level = as.numeric(level),
unmapped = as.numeric(unmapped),
meta = meta,
return = return,
sort = as.numeric(sort)
)
)
if(show.url) message(url)
chars = unlist(strsplit(RCurl::getURL(url), ""))
cleantxt = paste(chars[!grepl("[\u01-\u08\u7F-\u9F]", chars)], collapse = "") # Remove control characters if they slip through.
jsonlite::fromJSON(cleantxt)
},
error = function(e){
warning("Could not connect to the COVIDminer API.")
return(NULL)
})
if(length(json$results) == 1){
message(paste(" Retrieved", length(json$results), "statement."))
}else{
message(paste(" Retrieved", length(json$results), "statements."))
}
json$results = as.data.frame(do.call("rbind", lapply(json$results, unlist)))
json$results$edge_weight = as.numeric(json$results$edge_weight)
return(json)
}
}
|
613626665cd9169d09df3ff89a4273ec071543e2 | 0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb | /cran/paws.cost.management/man/costandusagereportservice_put_report_definition.Rd | 0f84baedeca4cce3ac4557a43eb92bb3b4541bc8 | [
"Apache-2.0"
] | permissive | paws-r/paws | 196d42a2b9aca0e551a51ea5e6f34daca739591b | a689da2aee079391e100060524f6b973130f4e40 | refs/heads/main | 2023-08-18T00:33:48.538539 | 2023-08-09T09:31:24 | 2023-08-09T09:31:24 | 154,419,943 | 293 | 45 | NOASSERTION | 2023-09-14T15:31:32 | 2018-10-24T01:28:47 | R | UTF-8 | R | false | true | 767 | rd | costandusagereportservice_put_report_definition.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/costandusagereportservice_operations.R
\name{costandusagereportservice_put_report_definition}
\alias{costandusagereportservice_put_report_definition}
\title{Creates a new report using the description that you provide}
\usage{
costandusagereportservice_put_report_definition(ReportDefinition)
}
\arguments{
\item{ReportDefinition}{[required] Represents the output of the PutReportDefinition operation. The content
consists of the detailed metadata and data file information.}
}
\description{
Creates a new report using the description that you provide.
See \url{https://www.paws-r-sdk.com/docs/costandusagereportservice_put_report_definition/} for full documentation.
}
\keyword{internal}
|
d4946b5b738a56556076f18e84702259e8d558a2 | 360df3c6d013b7a9423b65d1fac0172bbbcf73ca | /FDA_Pesticide_Glossary/diazosulfonate.R | aefe5fc5d721cae334f1be79a3b018208b593470 | [
"MIT"
] | permissive | andrewdefries/andrewdefries.github.io | 026aad7bd35d29d60d9746039dd7a516ad6c215f | d84f2c21f06c40b7ec49512a4fb13b4246f92209 | refs/heads/master | 2016-09-06T01:44:48.290950 | 2015-05-01T17:19:42 | 2015-05-01T17:19:42 | 17,783,203 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 240 | r | diazosulfonate.R | library("knitr")
library("rgl")
#knit("diazosulfonate.Rmd")
#markdownToHTML('diazosulfonate.md', 'diazosulfonate.html', options=c("use_xhml"))
#system("pandoc -s diazosulfonate.html -o diazosulfonate.pdf")
knit2html('diazosulfonate.Rmd')
|
10c2a699b5b7eb005c1ae297cc56511ca403d18d | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/inaparc/examples/rsamp.Rd.R | 19a602b86414e37e67caf9b8deec75bf8226f7c1 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 243 | r | rsamp.Rd.R | library(inaparc)
### Name: rsamp
### Title: Initialization of cluster prototypes using simple random
### sampling
### Aliases: rsamp
### Keywords: cluster
### ** Examples
data(iris)
res <- rsamp(x=iris[,1:4], k=5)
v <- res$v
print(v)
|
7b6997a0cbaeaac9a2c15369d1724a171727b477 | 9eaee48e819bded9e865b97d327df65563fe9616 | /cachematrix.R | de95677b2e3102d60a25ce03580d9e79ea343422 | [] | no_license | mmalhotra01/ProgrammingAssignment2 | c6a803312eeca76a618fd766553b81ad64a4500d | 62338d19af897c1ef6735789ee6f2107b5960705 | refs/heads/master | 2020-04-06T04:58:17.208179 | 2015-04-25T14:45:39 | 2015-04-25T14:45:39 | 34,423,612 | 0 | 0 | null | 2015-04-23T00:19:40 | 2015-04-23T00:19:40 | null | UTF-8 | R | false | false | 854 | r | cachematrix.R | ## Put comments here that give an overall description of what your
## functions do
## Write a short comment describing this function
# this function works like a class, it creates a list of functions: set, get, setCache and getCache.
makeCacheMatrix <- function(x = matrix()) {
xCache <- NULL
set <- function(y) {
x <<- y
xCache <<- NULL
}
get <- function() x
setCache <- function(var) xCache <<- var
getCache <- function() xCache
list(set = set, get = get,
setCache = setCache,
getCache = getCache)
}
## Write a short comment describing this function
cacheSolve <- function(x, ...) {
m <- x$getCache()
if(!is.null(m)) {
message("Fetching Cached Data")
return(m)
}
data <- x$get()
m <- solve(data)
x$setCache(m)
m
}
|
bbf0fe2627e46b410450b2f908708264a0b9df80 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/agridat/examples/zuidhof.broiler.Rd.R | a7e0b802ae275fadbd6d3f82b39d4a3990ff7a7d | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,278 | r | zuidhof.broiler.Rd.R | library(agridat)
### Name: zuidhof.broiler
### Title: Daily weight, feed, egg measurements for a broiler
### Aliases: zuidhof.broiler
### Keywords: datasets
### ** Examples
data(zuidhof.broiler)
dat <- zuidhof.broiler
dat <- transform(dat, age=age/7) # Change days into weeks
# Reproducing figure 1 of Zuidhof et al.
# Plot using left axis
op <- par(mar=c(5,4,4,4))
plot(bw~age, dat, xlab="Age (weeks)", ylab="Bodyweight (g)",
main="zuidhof.broiler",
xlim=c(20,32), ylim=c(0,4000), pch=20)
lines(targetbw~age, subset(dat, !is.na(targetbw)), col="black")
# Now plot using the right axis
par(new=TRUE)
plot(adfi~age, subset(dat, !is.na(adfi)),
xlab="", ylab="", xlim=c(20,32), xaxt="n",yaxt="n",
ylim=c(-50,175), type="s", lty=2)
axis(4, at=c(-50,-25,0,25,50,75,100,125,150,175), col="red", col.axis="red")
mtext("Weight (g)", side=4, line=2, col="red")
lines(adg~age, subset(dat, !is.na(adg)), col="red", type="s", lty=1, lwd=2)
abline(h=c(0,52), col="red")
with(dat, segments(age, 0, age, eggwt, col="red"))
legend(20, -40, c("Body weight", "Target BW", "Feed/day", "Gain/day", "Egg wt"),
bty="n", cex=.5, ncol=5,
col=c("black","black","red","red","red"),
lty=c(-1,1,2,1,1), lwd=c(1,1,1,2,1), pch=c(20,-1,-1,-1,-1))
par(op)
|
be5dcad55abf290d6fe513bb64ac976228b9261c | cd3d6404093d457944dd56d6b5411da4262ac982 | /lucky_num.R | 21ec7f93fff838392af96c9775f48f7edb731a68 | [] | no_license | mascoma/make_graph | 5bf48ae33da1635085fb23522ca430759cb22201 | ff67b8cda9d3ccd5a2e9492a295c7bdcfb68d56d | refs/heads/master | 2021-01-10T07:21:29.777630 | 2016-03-19T05:55:11 | 2016-03-19T05:55:11 | 50,301,016 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,075 | r | lucky_num.R | library(dplyr)
library(reshape2)
library(ggplot2)
library(RCurl)
library(splitstackshape)
URL = "http://www.powerball.com/powerball/winnums-text.txt"
x = getURL(URL)
dat = read.delim(textConnection(x), header = F, sep="")
pn = dat[-1,2:8]
names(pn) = c("wb1", "wb2", "wb3", "wb4", "wb5", "rb", "pp")
v1 = dat[-1,1]
v1 = cSplit(as.data.table(v1),splitCols="v1",sep = "/")
names(v1) = c("mm", "dd", "yy")
pn = cbind(v1, pn)
wb = melt(pn[,4:8])
wb_count = tally(group_by(wb, value), sort = T)
percent = (wb_count[,2]/sum(wb_count[,2]))*100
names(percent) = "weight"
wb_count = cbind(wb_count, percent, group = "wb")
wb_count = as.data.frame(wb_count)
rb = as.data.frame(pn[,9])
names(rb) = "value"
rb_count = tally(group_by(rb, value), sort = T)
percent2 = (rb_count[,2]/sum(rb_count[,2]))*100
names(percent2) = "weight"
rb_count = cbind(rb_count, percent2, group = "rb")
rb_count = as.data.frame(rb_count)
pb_count = rbind(wb_count, rb_count)
dist = sqrt((pb_count[,1])^2 + (pb_count[,2])^2)
pb_count = cbind(pb_count, dist)
g = ggplot(pb_count, aes(x=n, y=value, size = weight)) +
stat_density2d(alpha = 0.2) +
geom_point(aes(x = n, y= value, color = weight,shape = group), alpha = 0.2) +
#scale_colour_gradient(limits = c(1460, 39440),low = "firebrick4", high = "lightgoldenrodyellow")+
geom_line(aes(x = n, y = value, color = weight), alpha = 0.2)+
#scale_colour_brewer(palette = "Set1") +
scale_shape_manual(values = c(15,18))+
scale_colour_gradientn(colours=c("red","green","purple"))+
theme(panel.background = element_rect(fill = "black"),
axis.text.x = element_text(face = "bold", color ="gray18" , size = 5, angle = 45 ),
axis.text.y = element_text(face = "bold", color = "gray18", size = 5, angle = 45 ),
panel.grid.major.y = element_blank(),
panel.grid.minor.y = element_blank(),
panel.grid.major.x = element_blank(),
panel.grid.minor.x = element_blank(),
legend.position = "none") +xlab("") + ylab("")
g
ggsave("/Users/Xin/Documents/data_visulization/lucky_num.png", plot = g, dpi = 600)
|
88346c1d4f2a344a1ff3f05261569e180a56454c | 9d10ba9e0b76fd8e78ecf30ad3da08080047d3b0 | /inst/scratch/census scratch work/census bivar sctatch.R | ae6d500d79e9087e6d3bdc35a759e83b6493eaec | [
"Apache-2.0",
"LicenseRef-scancode-us-govt-public-domain",
"CC0-1.0",
"LicenseRef-scancode-public-domain"
] | permissive | ishaandave/DemographySpawnR | 75dc67add6fac4b3fe3a26682a2cfae464d24e56 | 802abee7e4d5d4cb733dbc00898ca0a5ac442dfb | refs/heads/master | 2021-07-13T14:04:10.198404 | 2020-07-30T02:51:00 | 2020-07-30T02:51:00 | 191,783,050 | 0 | 0 | Apache-2.0 | 2019-06-13T14:54:13 | 2019-06-13T14:54:12 | null | UTF-8 | R | false | false | 2,065 | r | census bivar sctatch.R |
### Takes census data and allows you to map racial distributions across the US (according to HHS boundaries)
### Will allow user to make a map whatever race/ethnicity they want to input
## eventually want to make it so, so we can create racial distributions across the 10 HHS divisions and use that
## to sample into our new dataset
census = data.frame(read.csv("C:/Users/yhd8/Desktop/Data/Census/cc-est2017-alldata.csv"))
census2 = census[census$YEAR == 10 & census$AGEGRP == 0, ]
#want only measurements in 2017 to see if anything works
census2$YEAR_new = census2$YEAR + 2007
names(census2)[names(census2) == "YEAR_new"] = "Year"
names(census2)[names(census2) == "STNAME"] = "state"
library(mapproj)
library(maps)
library(usmap)
library(dplyr)
library(ggplot2)
## create function to plot whatever race/ethnicity across the US
raceDistribution = function (race) {
female = paste0(toupper(race), "_FEMALE")
male = paste0(toupper(race), "_MALE")
census2[, paste0("TOT_", toupper(race))] = census2[, female] + census2[, male]
totals = aggregate(census2[,c("TOT_POP", paste0("TOT_", toupper(race)))], by = list(census2$state), sum)
names(totals)[1] = "state"
totals$state = tolower(totals$state)
names(totals) = c("state", "totpop", paste0("n", race))
totals$prop = totals[, paste0("n", race)] / totals$totpop
totalsAndHHS = merge(totals, hhsRegions, by.x = "state", by.y = "region")
aggDivision = aggregate(totalsAndHHS[, c("totpop", paste0("n", race))], by = list(totalsAndHHS$division), sum)
aggDivision$overallRate = aggDivision[, paste0("n", race)] / aggDivision$totpop
names(aggDivision)[1] = "division"
us_state_map.mod <- merge(x=us_state_map, y=aggDivision, all.x=TRUE, by.x="division", by.y="division")
us_state_map.mod = arrange(us_state_map.mod, order);
us_state_map.mod$division = as.factor(us_state_map.mod$division)
#plot a map of each division
map <- ggplot()
map = map + geom_polygon(data=us_state_map.mod, aes(x=long, y=lat, group=group, fill=overallRate))
map = map + scale_fill_gradient(low = "lightblue", high = "darkblue")
map
}
|
44b0592b30c80b773c24985373fce32c67d21bde | e9fe5defca0107b67b0f571d70d38ace249aef75 | /R/mainFUN.R | cc77a3a78b61686724721686b5b46b997bcfb33b | [] | no_license | inambioinfo/MSIQ | ea5235bdf2ab2d0e8e7d11f51ed61af03ef2364b | 36fc63ee0db1cba70f643c3d8b9b8a191d1eef43 | refs/heads/master | 2020-03-28T03:10:45.409208 | 2018-02-14T17:55:55 | 2018-02-14T17:55:55 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,684 | r | mainFUN.R | #' use MSIQ to quantify transcript expression from multiple samples
#'
#' @param D An integer specifying the number of RNA-seq sample supplied
#' @param gtf_path A character specifying the full path of the .gtf file
#' @param bam_path A character vector. Each element gives the full path of one .bam file.
#' @param A An integer specifying the first parameter used the Beta prior. Default is 5.
#' @param B An integer specifying the second parameter used the Beta prior. Default is 2.
#' @param ncores An integer denoting the number of cores used for parallel computation.
#' @return Save the estimation results to a text file transcript_summary.txt.
#' @export
#' @import parallel
#' @import gtools
#' @import rbamtools
#' @import rPython
#' @import GenomicFeatures
#' @import GenomicAlignments
msiq = function(D, gtf_path, bam_path, A = 5, B = 2, ncores){
# supfolder = "~/Dropbox/MSIQ_package/MSIQ/R/"
# python.load(paste(supfolder, "Gibbs_functions.py", sep=""))
py_path = system.file("exec", "Gibbs_functions.py", package = "MSIQ")
print(py_path)
python.load(py_path)
###############################################
### get gene_models from annotation (gtf file)
###############################################
folder = "MSIQ_result/"
dir.create(folder, showWarnings = TRUE, recursive = FALSE)
gene_path = paste(folder, "gene_models.RData", sep = "")
gene_models = gtf_to_gene_models(gtf_path, save_path = gene_path, ncores = ncores)
load(file=gene_path)
###############################################
### get statistics from bam file
###############################################
bai_path = paste(bam_path, ".bai", sep = "")
print("extarct basic statistics from bam files ...")
part1 <- get_FPKM_and_num(gene_models, D, bam_path, bai_path, num_cores = ncores)
FPKM_by_gene <- part1$FPKM_by_gene
N_by_bam <- part1$N_by_bam
save(FPKM_by_gene, file=paste(folder, "FPKM_by_gene.RData", sep=""))
save(N_by_bam, file=paste(folder, "N_by_bam.RData", sep=""))
frag_len = get_frag_len(gene_models, D, bam_path, strandmode = 0, folder, ncores = ncores)
###############################################
### process reads in bam files
###############################################
print("extarct reads from bam files ...")
for (i in 1:D){
dir.create(paste(folder, "rep", i, sep = ""), showWarnings = TRUE, recursive = FALSE)
}
bin_by_gene <- get_bin(gene_models, D, bam_path, num_cores = ncores,
save_folder = folder, start_id = 1)
###############################################
### MSIQ estimation
###############################################
print("start isoform quantification ...")
load(paste(folder, "FPKM_by_gene.RData", sep=""))
load(paste(folder, "num_by_gene.RData", sep=""))
load(paste(folder, "N_by_bam.RData", sep=""))
load(paste(folder, "mu_D.RData", sep=""))
load(paste(folder, "sigma_D.RData", sep=""))
gene_exons_list <- lapply(1:nrow(gene_models),function(i){
exon_starts <- as.numeric(strsplit(gene_models[i,"exon_starts"],split=",")[[1]])
exon_ends <- as.numeric(strsplit(gene_models[i,"exon_ends"],split=",")[[1]])
exon_Len <- exon_ends - exon_starts + 1
list(exon_starts=exon_starts, exon_ends=exon_ends, exon_Len=exon_Len)
})
names(gene_exons_list) <- gene_models[,"geneName"]
rep_thre = ceiling(D/2) # genes should have reads in at least rep_thre samples
dir.create(paste(folder, "estimation", sep = ""), showWarnings = TRUE, recursive = FALSE)
Result <- mclapply(1:nrow(gene_models), function(geneid){
#Result <- mclapply(geneIDs, function(geneid){
if(geneid %% 50 == 0) print(paste("gene", geneid))
gene_name = gene_models[geneid, "geneName"]
FPKM = FPKM_by_gene[[gene_name]]
N_gene <- num_by_gene[[gene_name]]
Fresult <- try(estFun(geneid, gene_models, gene_exons_list,
gene_name, FPKM, N_gene, N_by_bam, D, mu_D, sigma_D,
A = A, B = B, rep_thre = rep_thre),
silent=TRUE)
gene_res_name <- paste(folder, "estimation/", geneid, "est.RData", sep="")
save(Fresult, file=gene_res_name)
return(geneid)
}, mc.cores = ncores)
reslist <- list.files(paste(folder, "estimation/", sep=""))
reslist <- paste(folder, "estimation/", reslist, sep = "")
resdata <- lapply(1:length(reslist), function(i){
# print(i)
load(reslist[i])
if (class(Fresult) == "list"){
mat = matrix(nrow = nrow(Fresult$iso_FPKM), ncol = 3 + 2*D)
mat[,1] = rownames(Fresult$iso_FPKM)
mat[,2] = Fresult$gene_name
mat[,3] = Fresult$tau_result
mat[,4:(3+D)] = Fresult$iso_FPKM
mat[, (4+D):(3+2*D)] = matrix(rep(Fresult$estE.D, nrow(Fresult$iso_FPKM)),
nrow = nrow(Fresult$iso_FPKM), byrow = TRUE)
#mat[1, 4+2*D] = Fresult$estGamma
return(mat)
}else{
return(NULL)
}
})
resdata = resdata[!sapply(resdata, is.null)]
resdata = Reduce(rbind, resdata)
colnames(resdata) = c("transcriptID", "geneID", "frac",
paste("FPKM_", 1:D, sep = ""),
paste("sample", 1:D, sep = ""))
# "gamma")
write.table(resdata, paste(folder, "transcript_summary.txt", sep = ""),
quote = FALSE, row.names = FALSE)
# subdata = resdata[, c(2, (4+D):(3+2*D))]
# subdata = subdata[complete.cases(subdata), ]
# write.table(subdata, paste(folder, "sample_summary.txt", sep = ""),
# quote = FALSE, row.names = FALSE)
# save(resdata, file = paste(folder, "resdata_test.RData", sep = ""))
print("calculation finished!")
return(0)
}
|
f16ca58d9d9388c691626a9ea9ae0bd95101e704 | 705255987191f8df33b8c2a007374f8492634d03 | /design/devel/varnames.R | 0fb3ec90a180d1ae294e76ecb63545a8130c2eb7 | [] | no_license | Roche/crmPack | be9fcd9d223194f8f0e211616c8b986c79245062 | 3d897fcbfa5c3bb8381da4e94eb5e4fbd7f573a4 | refs/heads/main | 2023-09-05T09:59:03.781661 | 2023-08-30T09:47:20 | 2023-08-30T09:47:20 | 140,841,087 | 24 | 9 | null | 2023-09-14T16:04:51 | 2018-07-13T11:51:52 | HTML | UTF-8 | R | false | false | 236 | r | varnames.R | alllines <- paste(readLines("varnames.txt"),
collapse = "\n"
)
alllines
splitlines <- strsplit(alllines, split = "visible", fixed = TRUE)
gsub(
pattern = ".*'(.+)'.*", replacement = "\\1",
x = splitlines[[1]][2],
perl = TRUE
)
|
71647e19db47fdd50cbef8bd2e1ee3e0d28aa629 | 4791761f3cb1fd85f280f1d2d5c815042f713cc3 | /tests/testthat/test-remote.R | 439af3b6c2153f944acb13cfef9cd09c833e926f | [
"MIT"
] | permissive | han-tun/orderly | c6de7267c426b1f96f7be86aaab232bcc71e4da2 | fae07efedfd9c8ab7f5347f5463ef2b4d7203ece | refs/heads/master | 2021-02-13T12:40:06.741484 | 2020-03-02T11:27:17 | 2020-03-02T11:27:17 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,583 | r | test-remote.R | context("remote")
test_that("defaults: null", {
path <- prepare_orderly_example("minimal")
expect_null(orderly_default_remote_set(NULL, path))
expect_error(orderly_default_remote_get(path, FALSE),
"default remote has not been set yet")
expect_error(get_remote(NULL, path),
"default remote has not been set yet")
})
test_that("get_remote type failure", {
dat <- prepare_orderly_remote_example()
expect_error(get_remote(1, dat$config),
"Unknown remote type")
expect_error(get_remote("extra", dat$config),
"Unknown remote 'extra'")
})
test_that("orderly_pull_archive with wrong version", {
dat <- prepare_orderly_remote_example()
expect_error(
orderly_pull_archive("example", new_report_id(), root = dat$config,
remote = dat$remote),
paste0("Version '.+?' not found at '.+?': valid versions are:.+",
dat$id1))
})
test_that("pull dependencies", {
dat <- prepare_orderly_remote_example()
expect_message(
orderly_pull_dependencies("depend", root = dat$config,
remote = dat$remote),
"\\[ pull\\s+ \\] example:")
expect_equal(orderly_list_archive(dat$config),
data_frame(name = "example", id = dat$id2))
## and update
id3 <- orderly_run("example", root = dat$path_remote, echo = FALSE)
orderly_commit(id3, root = dat$path_remote)
expect_message(
orderly_pull_dependencies("depend", root = dat$config,
remote = dat$remote),
"\\[ pull\\s+ \\] example:")
expect_equal(orderly_list_archive(dat$config),
data_frame(name = "example", id = c(dat$id2, id3)))
})
test_that("pull_dependencies counts dependencies", {
dat <- prepare_orderly_remote_example()
expect_message(
orderly_pull_dependencies("example", root = dat$config,
remote = dat$remote),
"\\[ depends\\s+ \\] example has 0 dependencies")
id <- orderly_run("example", root = dat$path_remote, echo = FALSE)
orderly_commit(id, root = dat$path_remote)
expect_message(
orderly_pull_dependencies("depend", root = dat$config,
remote = dat$remote),
"\\[ depends\\s+ \\] depend has 1 dependency")
})
## These need dealing with properly, but check that they trigger
## correctly here:
test_that("pull from old remote", {
oo <- options(orderly.nowarnings = TRUE)
on.exit(options(oo))
path_local <- prepare_orderly_example("demo")
path_remote <- unpack_reference("0.6.0")
## In order to make this work we do need to update the data table.
## This will stop being a problem shortly.
##
## Once we get a further migration we should disable importing of
## all files prior to archive version 0.6.8 because of this problem.
db_local <- orderly_db("destination", root = path_local)
db_remote <- orderly_db("destination", root = path_remote, validate = FALSE)
tbl_data <- DBI::dbReadTable(db_remote, "data")
DBI::dbWriteTable(db_local, "data", tbl_data, append = TRUE)
DBI::dbDisconnect(db_local)
DBI::dbDisconnect(db_remote)
expect_message(
orderly_pull_archive("minimal", root = path_local, remote = path_remote),
"^\\[ migrate")
contents <- orderly_list_archive(path_local)
expect_equal(nrow(contents), 1)
path <- file.path(path_local, "archive", "minimal", contents$id)
expect_equal(
readRDS(path_orderly_run_rds(path))$archive_version,
numeric_version(read_orderly_archive_version(path_local)))
})
## These need dealing with properly, but check that they trigger
## correctly here:
test_that("pull from new remote", {
dat <- prepare_orderly_remote_example()
p <- path_orderly_run_rds(
file.path(dat$path_remote, "archive", "example", dat$id2))
d <- readRDS(p)
d$archive_version <- numeric_version("100.100.100")
saveRDS(d, p)
expect_error(
orderly_pull_archive("example", dat$id2, root = dat$path_local,
remote = dat$remote),
"Report was created with orderly more recent than this, upgrade!")
})
test_that("pull migrated archive", {
oo <- options(orderly.nowarnings = TRUE)
on.exit(options(oo))
path_local <- prepare_orderly_example("demo")
unlink(file.path(path_local, "archive"), recursive = TRUE)
dir.create(file.path(path_local, "archive"))
path_remote <- unpack_reference("0.5.4")
withr::with_options(list(orderly.nmowarnings = TRUE),
orderly_migrate(path_remote))
file.copy(file.path(path_local, "orderly_config.yml"),
file.path(path_remote, "orderly_config.yml"),
overwrite = TRUE)
dir.create(file.path(path_remote, "global"))
## Empty archives have a null version:
expect_equal(read_orderly_archive_version(path_local), "0.0.0")
remote <- orderly_remote_path(path_remote)
orderly_pull_archive("use_dependency", root = path_local, remote = remote)
## The archive version has been upgraded:
expect_equal(read_orderly_archive_version(path_local),
as.character(cache$current_archive_version))
expect_setequal(orderly_list_archive(path_local)$name,
c("other", "use_dependency"))
## This fails in old versions, but will work here:
id <- orderly_run("minimal", root = path_local, echo = FALSE)
orderly_commit(id, root = path_local)
expect_true(id %in% orderly_list_archive(path_local)$id)
## And this is not necessary but also fails on the previous version
## because of issues re-running migrations.
expect_silent(orderly_migrate(root = path_local))
})
|
3e297a0e5eaa7cc0d8caead16c70ec5fc596dcf0 | 2908e8dd8c6fe598942d21e2b4f8112daf4aac31 | /R/model_additive.R | a246e240ad2db6d215fa657677dfd368973a8b94 | [] | no_license | cran/deaR | 734b7d33e9afecc30e49bac385d12c0ecd37af2a | 49e5427f0b0a24014e5d67420f972aac51cdcbf4 | refs/heads/master | 2023-05-11T02:54:01.278513 | 2023-05-02T08:20:02 | 2023-05-02T08:20:02 | 162,907,947 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 14,186 | r | model_additive.R | #' @title Additive DEA model.
#'
#' @description Solve the additive model of Charnes et. al (1985). With the current
#' version of deaR, it is possible to solve input-oriented, output-oriented,
#' and non-oriented additive model under constant and non-constant returns to scale.
#'
#' Besides, the user can set weights for the input slacks and/or output slacks. So,
#' it is also possible to solve weighted additive models. For example: Measure of
#' Inefficiency Proportions (MIP), Range Adjusted Measure (RAM), etc.
#'
#' @note In this model, the efficiency score is the sum of the slacks. Therefore,
#' a DMU is efficient when the objective value (\code{objval}) is zero.
#'
#' @usage model_additive(datadea,
#' dmu_eval = NULL,
#' dmu_ref = NULL,
#' orientation = NULL,
#' weight_slack_i = 1,
#' weight_slack_o = 1,
#' rts = c("crs", "vrs", "nirs", "ndrs", "grs"),
#' L = 1,
#' U = 1,
#' compute_target = TRUE,
#' returnlp = FALSE,
#' ...)
#'
#' @param datadea A \code{deadata} object with \code{n} DMUs, \code{m} inputs and \code{s} outputs.
#' @param dmu_eval A numeric vector containing which DMUs have to be evaluated.
#' If \code{NULL} (default), all DMUs are considered.
#' @param dmu_ref A numeric vector containing which DMUs are the evaluation reference set.
#' If \code{NULL} (default), all DMUs are considered.
#' @param orientation This parameter is either \code{NULL} (default) or a string, equal to
#' "io" (input-oriented) or "oo" (output-oriented). It is used to modify the weight slacks.
#' If input-oriented, \code{weight_slack_o} are taken 0.
#' If output-oriented, \code{weight_slack_i} are taken 0.
#' @param weight_slack_i A value, vector of length \code{m}, or matrix \code{m} x
#' \code{ne} (where \code{ne} is the length of \code{dmu_eval})
#' with the weights of the input slacks. If 0, output-oriented.
#' @param weight_slack_o A value, vector of length \code{s}, or matrix \code{s} x
#' \code{ne} (where \code{ne} is the length of \code{dmu_eval})
#' with the weights of the output slacks. If 0, input-oriented.
#' @param rts A string, determining the type of returns to scale, equal to "crs" (constant),
#' "vrs" (variable), "nirs" (non-increasing), "ndrs" (non-decreasing) or "grs" (generalized).
#' @param L Lower bound for the generalized returns to scale (grs).
#' @param U Upper bound for the generalized returns to scale (grs).
#' @param compute_target Logical. If it is \code{TRUE}, it computes targets.
#' @param returnlp Logical. If it is \code{TRUE}, it returns the linear problems
#' (objective function and constraints).
#' @param ... Ignored, for compatibility issues.
#'
#' @author
#' \strong{Vicente Coll-Serrano} (\email{vicente.coll@@uv.es}).
#' \emph{Quantitative Methods for Measuring Culture (MC2). Applied Economics.}
#'
#' \strong{Vicente Bolós} (\email{vicente.bolos@@uv.es}).
#' \emph{Department of Business Mathematics}
#'
#' \strong{Rafael Benítez} (\email{rafael.suarez@@uv.es}).
#' \emph{Department of Business Mathematics}
#'
#' University of Valencia (Spain)
#'
#' @references
#' Charnes, A.; Cooper, W.W.; Golany, B.; Seiford, L.; Stuz, J. (1985) "Foundations
#' of Data Envelopment Analysis for Pareto-Koopmans Efficient Empirical Production
#' Functions", Journal of Econometrics, 30(1-2), 91-107.
#' \doi{10.1016/0304-4076(85)90133-2}
#'
#' Charnes, A.; Cooper, W.W.; Lewin, A.Y.; Seiford, L.M. (1994). Data Envelopment
#' Analysis: Theory, Methology, and Application. Boston: Kluwer Academic Publishers.
#' \doi{10.1007/978-94-011-0637-5}
#'
#' Cooper, W.W.; Park, K.S.; Pastor, J.T. (1999). "RAM: A Range Adjusted Measure
#' of Inefficiencies for Use with Additive Models, and Relations to Other Models
#' and Measures in DEA". Journal of Productivity Analysis, 11, p. 5-42.
#' \doi{10.1023/A:1007701304281}
#'
#' @examples
#' # Example 1.
#' # Replication of results in Charnes et. al (1994, p. 27)
#' x <- c(2, 3, 6, 9, 5, 4, 10)
#' y <- c(2, 5, 7, 8, 3, 1, 7)
#' data_example <- data.frame(dmus = letters[1:7], x, y)
#' data_example <- make_deadata(data_example,
#' ni = 1,
#' no = 1)
#' result <- model_additive(data_example,
#' rts = "vrs")
#' efficiencies(result)
#' slacks(result)
#' lambdas(result)
#'
#' # Example 2.
#' # Measure of Inefficiency Proportions (MIP).
#' x <- c(2, 3, 6, 9, 5, 4, 10)
#' y <- c(2, 5, 7, 8, 3, 1, 7)
#' data_example <- data.frame(dmus = letters[1:7], x, y)
#' data_example <- make_deadata(data_example,
#' ni = 1,
#' no = 1)
#' result2 <- model_additive(data_example,
#' rts = "vrs",
#' weight_slack_i = 1 / data_example[["input"]],
#' weight_slack_o = 1 / data_example[["output"]])
#' slacks(result2)
#'
#' # Example 3.
#' # Range Adjusted Measure of Inefficiencies (RAM).
#' x <- c(2, 3, 6, 9, 5, 4, 10)
#' y <- c(2, 5, 7, 8, 3, 1, 7)
#' data_example <- data.frame(dmus = letters[1:7], x, y)
#' data_example <- make_deadata(data_example,
#' ni = 1,
#' no = 1)
#' range_i <- apply(data_example[["input"]], 1, max) -
#' apply(data_example[["input"]], 1, min)
#' range_o <- apply(data_example[["output"]], 1, max) -
#' apply(data_example[["output"]], 1, min)
#' w_range_i <- 1 / (range_i * (dim(data_example[["input"]])[1] +
#' dim(data_example[["output"]])[1]))
#' w_range_o <- 1 / (range_o * (dim(data_example[["input"]])[1] +
#' dim(data_example[["output"]])[1]))
#' result3 <- model_additive(data_example,
#' rts = "vrs",
#' weight_slack_i = w_range_i,
#' weight_slack_o = w_range_o)
#' slacks(result3)
#'
#' @seealso \code{\link{model_addsupereff}}
#'
#' @import lpSolve
#'
#' @export
model_additive <-
function(datadea,
dmu_eval = NULL,
dmu_ref = NULL,
orientation = NULL,
weight_slack_i = 1,
weight_slack_o = 1,
rts = c("crs", "vrs", "nirs", "ndrs", "grs"),
L = 1,
U = 1,
compute_target = TRUE,
returnlp = FALSE,
...) {
# Cheking whether datadea is of class "deadata" or not...
if (!is.deadata(datadea)) {
stop("Data should be of class deadata. Run make_deadata function first!")
}
# Checking non-discretionary inputs/outputs
if ((!is.null(datadea$nd_inputs)) || (!is.null(datadea$nd_outputs))) {
warning("This model does not take into account the non-discretionary feature for inputs/outputs.")
}
# Checking undesirable inputs/outputs
if (!is.null(datadea$ud_inputs) || !is.null(datadea$ud_outputs)) {
warning("This model does not take into account the undesirable feature for inputs/outputs.")
}
# Checking rts
rts <- tolower(rts)
rts <- match.arg(rts)
if (rts == "grs") {
if (L > 1) {
stop("L must be <= 1.")
}
if (U < 1) {
stop("U must be >= 1.")
}
}
dmunames <- datadea$dmunames
nd <- length(dmunames) # number of dmus
if (is.null(dmu_eval)) {
dmu_eval <- 1:nd
} else if (!all(dmu_eval %in% (1:nd))) {
stop("Invalid set of DMUs to be evaluated (dmu_eval).")
}
names(dmu_eval) <- dmunames[dmu_eval]
nde <- length(dmu_eval)
if (is.null(dmu_ref)) {
dmu_ref <- 1:nd
} else if (!all(dmu_ref %in% (1:nd))) {
stop("Invalid set of reference DMUs (dmu_ref).")
}
names(dmu_ref) <- dmunames[dmu_ref]
ndr <- length(dmu_ref)
input <- datadea$input
output <- datadea$output
inputnames <- rownames(input)
outputnames <- rownames(output)
ni <- nrow(input) # number of inputs
no <- nrow(output) # number of outputs
inputref <- matrix(input[, dmu_ref], nrow = ni)
outputref <- matrix(output[, dmu_ref], nrow = no)
nc_inputs <- datadea$nc_inputs
nc_outputs <- datadea$nc_outputs
nnci <- length(nc_inputs)
nnco <- length(nc_outputs)
# Checking weights
if(is.null(weight_slack_i)){
weight_slack_i <- 1
}
if(is.null(weight_slack_o)){
weight_slack_o <- 1
}
if (is.matrix(weight_slack_i)) {
if ((nrow(weight_slack_i) != ni) || (ncol(weight_slack_i) != nde)) {
stop("Invalid weight input matrix (number of inputs x number of evaluated DMUs).")
}
} else if ((length(weight_slack_i) == 1) || (length(weight_slack_i) == ni)) {
weight_slack_i <- matrix(weight_slack_i, nrow = ni, ncol = nde)
} else {
stop("Invalid weight input vector (number of inputs).")
}
if ((!is.null(orientation)) && (orientation == "oo")) {
weight_slack_i <- matrix(0, nrow = ni, ncol = nde)
}
rownames(weight_slack_i) <- inputnames
colnames(weight_slack_i) <- dmunames[dmu_eval]
if (is.matrix(weight_slack_o)) {
if ((nrow(weight_slack_o) != no) || (ncol(weight_slack_o) != nde)) {
stop("Invalid weight output matrix (number of outputs x number of evaluated DMUs).")
}
} else if ((length(weight_slack_o) == 1) || (length(weight_slack_o) == no)) {
weight_slack_o <- matrix(weight_slack_o, nrow = no, ncol = nde)
} else {
stop("Invalid weight output vector (number of outputs).")
}
if ((!is.null(orientation)) && (orientation == "io")) {
weight_slack_o <- matrix(0, nrow = no, ncol = nde)
}
rownames(weight_slack_o) <- outputnames
colnames(weight_slack_o) <- dmunames[dmu_eval]
target_input <- NULL
target_output <- NULL
DMU <- vector(mode = "list", length = nde)
names(DMU) <- dmunames[dmu_eval]
###########################
if (rts == "crs") {
f.con.rs <- NULL
f.dir.rs <- NULL
f.rhs.rs <- NULL
} else {
f.con.rs <- cbind(matrix(1, nrow = 1, ncol = ndr), matrix(0, nrow = 1, ncol = ni + no))
if (rts == "vrs") {
f.dir.rs <- "="
f.rhs.rs <- 1
} else if (rts == "nirs") {
f.dir.rs <- "<="
f.rhs.rs <- 1
} else if (rts == "ndrs") {
f.dir.rs <- ">="
f.rhs.rs <- 1
} else {
f.con.rs <- rbind(f.con.rs, f.con.rs)
f.dir.rs <- c(">=", "<=")
f.rhs.rs <- c(L, U)
}
}
# Constraints matrix
f.con.1 <- cbind(inputref, diag(ni), matrix(0, nrow = ni, ncol = no))
f.con.2 <- cbind(outputref, matrix(0, nrow = no, ncol = ni), -diag(no))
f.con.nc <- matrix(0, nrow = (nnci + nnco), ncol = (ndr + ni + no))
f.con.nc[, ndr + c(nc_inputs, ni + nc_outputs)] <- diag(nnci + nnco)
f.con <- rbind(f.con.1, f.con.2, f.con.nc, f.con.rs)
# Directions vector
f.dir <- c(rep("=", ni + no + nnci + nnco), f.dir.rs)
for (i in 1:nde) {
ii <- dmu_eval[i]
# Objective function coefficients
f.obj <- c(rep(0, ndr), weight_slack_i[, i], weight_slack_o[, i])
# Right hand side vector
f.rhs <- c(input[, ii], output[, ii], rep(0, nnci + nnco), f.rhs.rs)
if (returnlp) {
lambda <- rep(0, ndr)
names(lambda) <- dmunames[dmu_ref]
slack_input <- rep(0, ni)
names(slack_input) <- inputnames
slack_output <- rep(0, no)
names(slack_output) <- outputnames
var <- list(lambda = lambda, slack_input = slack_input, slack_output = slack_output)
DMU[[i]] <- list(direction = "max", objective.in = f.obj, const.mat = f.con,
const.dir = f.dir, const.rhs = f.rhs, var = var)
} else {
res <- lp("max", f.obj, f.con, f.dir, f.rhs)
if (res$status == 0) {
objval <- res$objval
lambda <- res$solution[1 : ndr]
names(lambda) <- dmunames[dmu_ref]
slack_input <- res$solution[(ndr + 1) : (ndr + ni)]
names(slack_input) <- inputnames
slack_output <- res$solution[(ndr + ni + 1) : (ndr + ni + no)]
names(slack_output) <- outputnames
if (compute_target) {
target_input <- as.vector(inputref %*% lambda)
target_output <- as.vector(outputref %*% lambda)
names(target_input) <- inputnames
names(target_output) <- outputnames
}
} else {
objval <- NA
lambda <- NA
slack_input <- NA
slack_output <- NA
if (compute_target) {
target_input <- NA
target_output <- NA
}
}
DMU[[i]] <- list(objval = objval,
lambda = lambda,
slack_input = slack_input, slack_output = slack_output,
target_input = target_input, target_output = target_output)
}
}
# Checking if a DMU is in its own reference set (when rts = "grs")
if (rts == "grs") {
eps <- 1e-6
for (i in 1:nde) {
j <- which(dmu_ref == dmu_eval[i])
if (length(j) == 1) {
kk <- DMU[[i]]$lambda[j]
kk2 <- sum(DMU[[i]]$lambda[-j])
if ((kk > eps) && (kk2 > eps)) {
warning(paste("Under generalized returns to scale,", dmunames[dmu_eval[i]],
"appears in its own reference set."))
}
}
}
}
deaOutput <- list(modelname = "additive",
rts = rts,
L = L,
U = U,
DMU = DMU,
data = datadea,
dmu_eval = dmu_eval,
dmu_ref = dmu_ref,
weight_slack_i = weight_slack_i,
weight_slack_o = weight_slack_o,
orientation = NA)
return(structure(deaOutput, class = "dea"))
} |
5f32bc033044ce0bea49a435b8e40dd974216c83 | e81de997187aac7d10d2a6b0513c0cfbd5038c62 | /web_crawling01.R | 1b4852258fefb95411bbca8330a3c3a7dbfbc4db | [] | no_license | hyunnie94/self_r | c09c6d76d070c1e51bda770f1a68401bf27ea488 | f7a0e757d147c000a9b7dd1c671e77f7becee53e | refs/heads/master | 2020-06-17T00:23:36.584856 | 2019-07-20T10:18:31 | 2019-07-20T10:18:31 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,837 | r | web_crawling01.R |
#R로 웹 데이터를 가져오는 4가지 방법(웹크롤링)
#웹에 있는 데이터를 가져오는 단계 : 요청 -> 추출 -> 저장 -> 반복 -> 예외처리 -> 최적화
install.packages("rvest")
library(rvest)
#xml_document라고 하는 r class로 만드는 과정
tar <- "https://news.naver.com/main/ranking/read.nhn?mid=etc&sid1=111&rankingType=popular_day&oid=052&aid=0001321181&date=20190720&type=2&rankingSeq=6&rankingSectionId=100"
read_html(tar)
# read_html(tar) %>% html_nodes("tag.class")
read_html(tar) %>% html_nodes("h3") #tar html을 읽어서 그 중 h3라는 tag를 가져옵니다.
read_html(tar) %>% html_nodes("h3[id = 'articleTitle']") #h3 중에서도 id = 'articleTitle' 부분을 가져옵니다
read_html(tar) %>% html_nodes("h3#articleTitle") #이렇게 가져와도 되고ㅎㅎ
#read_html(tar) %>% html_nodes("h3.articleTitle") class는 .으로 표시 이거는 class가 있을 경우
read_html(tar) %>% html_nodes("h3#articleTitle") %>% html_text() #text만 가져오도록!
read_html(tar) %>% html_nodes("h3#articleTitle") %>% html_attr("id")
#보통 attr은 기사에서 추가적인 정보를 가져오기 위해 링크가 필요한데 이때 attr로 하이퍼링크 값을 가져오도록 한다. 그 기사의 하이퍼링크는 그 전페이지에서 링크제목 오른쪽 버튼 클릭-> 검사를 통해 가져올 수 있다. href = 이게 보통은 하이퍼링크를 뜻함.
tar1 <- "https://news.naver.com/" #원하는 링크가 있는 페이지
read_html(tar1) %>% html_nodes("a") #a태그는 관례적으로 달고 있어서 엄청 많음..
read_html(tar1) %>% html_nodes("li a") #부모자식 노드 개념 이용. li밑의 a를 보여주세요
read_html(tar1) %>% html_nodes("li a") %>% html_attr("href")
|
27665d713d25e801207e9710cf21570293d953a3 | b4ff4bbc9d4fb425cdd665588d916ed3b7012239 | /best.R | e633bd5e9df743e865a4af2e6b4287361a0617f3 | [] | no_license | jjsjamesj/datascience | 5e091023f1f1493cfaecd4b14aa3a6d1875de0bd | 0625a943206365a5b211f0de14ffda4222f78c60 | refs/heads/master | 2021-01-15T17:20:33.389994 | 2016-10-10T04:36:39 | 2016-10-10T04:36:39 | 68,292,073 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,100 | r | best.R | ## This file contains the definition of the function 'best'.
best <- function(stateArg, outcome){
data <- read.csv('rproghw3/outcome-of-care-measures.csv', na.strings = 'Not Available')
outVector<- c('heart attack', 'heart failure', 'pneumonia',
'Hospital.30.Day.Death..Mortality..Rates.from.Heart.Attack',
'Hospital.30.Day.Death..Mortality..Rates.from.Heart.Failure',
'Hospital.30.Day.Death..Mortality..Rates.from.Pneumonia')
stateVector<- c("AK", "AL", "AR", "AZ", "CA", "CO", "CT", "DC",
"DE", "FL", "GA", "GU", "HI", "IA", "ID", "IL",
"IN", "KS", "KY", "LA", "MA", "MD", "ME", "MI",
"MN", "MO", "MS", "MT", "NC", "ND", "NE", "NH",
"NJ", "NM", "NV", "NY", "OH", "OK", "OR", "PA",
"PR", "RI", "SC", "SD", "TN", "TX", "UT", "VA",
"VI", "VT", "WA", "WI", "WV", "WY")
outcomeColNum<- NULL
longOutcome<- NULL
for(i in 1:3){
if(outVector[i]==outcome){
longOutcome<- outVector[i+3]
outcomeColNum<- (i*6 +5)
}
}
## ^^for-loop: Test if 'outcome' is valid. If outcome
## is valid, assign to 'longOutcome' the corresponding
## variable name as it appears in the cvs file.
## The mortality collumns in the .cvs begin at collumn 11
## and occur thereafter every 6th collum up to collumn 23.
## Therefore we compute the collumn number associated with
## outcome 'i', for 'i' in {1,2,3}, as i*6+5.
if(is.null(longOutcome)){stop('invalid outcome')} ## tests if 'longOutcome' is null. It is it,
## then nothing has been assigned to 'longOutcome',
## which means 'outcome' is invalid, so call 'stop'.
if(!(stateArg %in% stateVector)){stop('invalid state')} ## tests if 'state' is valid. If not then call 'stop'.
subdata<- subset(data, State==stateArg, select=c(2,outcomeColNum)) ## subset the dataframe into dataframe with 2 collumns:
## Hospital Name and the 30 day mortality rate of the
## outcome argument.
IndexofBest<- which(subdata[,2]==min(subdata[,2], na.rm = TRUE)) ## which statement returns indicies of min elements
NumTies<-NULL ## Determines if there is a tie. If so put names of
TiedVector<-c() ## tied hospitals in a vector, sort the vector, and
if(length(IndexofBest)>1){ ## return the first element
NumTies<- length(IndexofBest)
for(i in 1:NumTies){
TiedVector[i]<-subdata[IndexofBest[i],1]
}
TiedVector<-sort(TiedVector)
return(TiedVector[1])
}
subdata[IndexofBest,1]
} |
7bcbc220aab6799edf19e6213ad6dcb0d12bb7af | 829b03a589cb54a5c8c1ba095d75728aa90ab929 | /test/tests.R | 2b66c70efccf4ea1fc7c09cd7443bc51307c1051 | [
"MIT"
] | permissive | antoine-lizee/R-GSC | 2d56bba3675fd347f8be4c63f1824cba218b6e98 | 353047286b71d7225e6766d73e7e71c868a6b2c6 | refs/heads/master | 2021-03-12T20:10:43.384789 | 2015-02-25T04:28:47 | 2015-02-25T04:28:47 | 29,715,727 | 1 | 1 | null | 2015-01-26T22:51:41 | 2015-01-23T03:59:48 | R | UTF-8 | R | false | false | 1,039 | r | tests.R | # This is the test file accompanying the main GSC.R
#
# Copyright Antoine Lizee 01/2015 antoine.lizee@gmail.com
source("GSC.R")
source("test//GSC2.R")
# This is the actual example in the paper appendix [doi:10.1016/0022-2836(94)90012-4]: https://pdf.yt/d/Sx3jMbr8vANgxAej/download
# The reader should be warned that the results in the paper are
# wrong, due to the low (and inconsistent!) precision they use.
testhc <- list("merge" = matrix(nrow = 3, byrow = T, c(-1,-2,-3,1, -4,2)),
"height" = c(20, 50, 80),
"order" = c(1,2,3,4),
"labels" = LETTERS[1:4])
attr(testhc, "class") <- "hclust"
testdend <- as.dendrogram(testhc)
plot(testdend)
print(GSC(testdend))
print(GSC2(testdend))
# A more real-looking use-case:
hc <- hclust(dist(mtcars))
dd <- as.dendrogram(hc)
plot(as.dendrogram(hc))
ddGSCs <- GSC(dd) #compute the weights
hc$labels[hc$order] <- paste(names(ddGSCs), sprintf("%.1f", ddGSCs), sep = " - ") #add them to the label names
plot(as.dendrogram(hc)) #plot them to have a look. |
8ba690070dcd717528408042e64d9f10194798aa | 383e67439d920e91ad2a9e8528e6fc68288796af | /0-libraries.R | 71c765012822e4991a49ae158c2e802b2dfc687a | [] | no_license | SciBorgo/READY-study | e422505855b8bd3e530a8cdb6957e5736d9c98d2 | e583934e11b9c42c2689256795b130c1a1ada59c | refs/heads/main | 2023-09-03T07:05:36.467805 | 2021-11-13T05:32:31 | 2021-11-13T05:32:31 | 375,308,529 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 246 | r | 0-libraries.R |
# Packages
library(readr)
library(ggplot2)
library(dplyr)
library(car)
library(naniar)
library(visdat)
library(tidyverse)
library(janitor)
library(cowplot)
library(lubridate)
library(mice)
library(randomForest)
library(brms)
library(tidybayes)
|
8f604849e9a287a2b1a287d74360f94c333ab1bc | ac7cb7f7852daad5cd9748f631dfa071f5a731ac | /tests/testthat/test-densities.R | 8fe331f911097b9f28a5b4fd605c55f176a2926a | [] | no_license | nschaefer1211/OSE | 762d25eabd31452c1fe22a7c1a4d6300f1ee5847 | 3a6910180083bee15f020bd362d535fc8cdf7f7e | refs/heads/master | 2023-01-16T00:39:45.377897 | 2020-11-30T20:25:00 | 2020-11-30T20:25:00 | 317,336,894 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,166 | r | test-densities.R | #Beachte: Aus Vollständigkeitsgründen existieren diese Testfiles, sie sind aber zu nicht viel nütze, da
#unsere Resultate Approximationen sind, die wir mit keinen existenten Datensets abgleichen können.
#Wertvoller zum Verständnis des Pakets und zur Überprüfung der Funktionen sind die Datei 'plots' und die Vignette.
context("Tests für die Funktionen des Pakets, die Dichten Schätzen")
test_that("est_dens", {
expect_is(est_dens(Trig_Basis$new(100), runif(50), 6), "function") #checking that output is function
expect_error(est_dens(Trig_Basis$new(100), runif(50), -2)) #error if dimension is negative
expect_error(est_dens(Trig_Basis$new(100), runif(50), 42.5)) #error if dimension is not integer
})
test_that("est_dens2", {
expect_is(est_dens2(Trig_Basis$new(100), data.frame(runif(50), runif(50)), c(6,8)), "function") #checking that output is function
our_dens <- function(x) 1
our_dens <- Vectorize(our_dens)
our_data <- data.frame(rdens(our_dens, 200), rdens(our_dens, 200))
expect_equal(est_dens2(Trig_Basis$new(100), our_data, c(5, 5)), est_dens2(Trig_Basis$new(100), our_data, 5)) #checking if dimension vector is reproduced
})
|
98dc86eabef6f928010779632b912d83c1cb1b86 | 3b2b5636282ae842def1c16265cccac19f9d125a | /R/plotSpectrum.R | 23ef365779d77e051d1bf3f50cb766a623bca84b | [
"BSD-2-Clause"
] | permissive | ilkkavir/LPI.gdf | 06cf2ccb0ed19b7a04df417fe93cef2f7e530115 | 088a53c3624c68406b87ccaf8d1451ef678c3b62 | refs/heads/master | 2023-05-28T09:56:00.948551 | 2023-05-15T13:23:37 | 2023-05-15T13:23:37 | 205,375,323 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,944 | r | plotSpectrum.R | ## file:plotSpectrumR
## (c) 2015- University of Oulu, Finland
## Written by Ilkka Virtanen <ilkka.i.virtanen@oulu.fi>
## Licensed under FreeBSD license.
##
##
## Color-coded image of power spectral density
## estimated by means of the Lomb-Scargle periodogram
##
## Arguments:
## data an ACF list from LPI.gdf output file, from the
## function readACF, or from plotACF
## pdf pdf file name
## jpg jpeg file name
## figNum figure number
## zlim z limits
## ylim y axis limits
## xlim x axis limits
## cex Text magnification
## bg Background color
## fg Foreground color
## width Plot width
## height Plot height
## paper paper dimensions when printing to files
## res Resolution for jpg images
## stdThreh Threshold for ACF variance(in some units?)
## yheight Logical, if TRUE y-axis is height, otherwise range
## llhT latitude, longitude, height of transmitter
## azelT azimuth and elevation of transmitter beam
## llhR latitude, longitude, height of receiver
## lags Indices of lags to include in the plot
## SIunits Logical, if TRUE range / height is expressed in km
## and lag in ms, otherwise gates (sample intervals)
## are used.
## rscale Logical, scale with range squared?
##
##
## Returns:
## spectrum The input list padded with the power spectral density
##
plotSpectrum <- function( data, normalize=TRUE , pdf=NULL , jpg=NULL , figNum=NULL , zlim=NULL , ylim=NULL , xlim=NULL , cex=1 , bg='white' , fg='black' , width=8.27 , height=11.69 , paper='a4' , res=300 , stdThrsh=Inf , yheight=FALSE , llhT=NULL , azelT=NULL , llhR=NULL , rscale=FALSE)
{
UseMethod("plotSpectrum")
}
plotSpectrum.character <- function( data, normalize=TRUE , pdf=NULL , jpg=NULL , figNum=NULL , zlim=NULL , ylim=NULL , xlim=NULL , cex=1 , bg='white' , fg='black' , width=8.27 , height=11.69 , paper='a4' , res=300 , stdThrsh=Inf , yheight=FALSE , llhT=NULL , azelT=NULL , llhR=NULL , rscale=FALSE)
{
data <- readACF( dpath=data , stdThrsh=stdThrsh )
par <- formals()
parnames <- names(par)
par <- lapply( names( par ) , FUN=function(x){ eval( as.name( x ) ) } )
names(par) <- parnames
do.call(plotSpectrum,par)
}
plotSpectrum.list <- function( data , normalize=TRUE , pdf=NULL , jpg=NULL , figNum=NULL , zlim=NULL , ylim=NULL , xlim=NULL , cex=1 , bg='white' , fg='black' , width=8.27 , height=11.69 , paper='a4' , res=300 , stdThrsh=Inf , yheight=FALSE , llhT=NULL , azelT=NULL , llhR=NULL , rscale=FALSE)
{
# copy the data list
data2 <- data
# open the proper figure
figList <- c(is.null(figNum),is.null(pdf),is.null(jpg))
if(sum(figList) < 2 ) stop('Only one output device can be selected at a time')
# a new x11 by defaul
if(sum(figList) == 3) x11(width=width,height=height)
# new plot to an existing x11 window
if(!is.null(figNum)) {dev.set(figNum);plot.new()}
# a new pdf file
if(!is.null(pdf)) pdf(file=paste(pdf,'.pdf',sep=''),paper=paper,width=width,height=height)
# a new jpeg file
if(!is.null(jpg)) jpeg(filename=paste(jpg,'.jpg',sep=''),width=width,height=height,units='in',res=res)
# Check if requested range and lag vectors exist
# in the data
if( is.null(data[["range.km"]]) ){
stop('The data does not contain range in km')
}else if( is.null(data[["lag.us"]]) ){
stop('The data does not contain lag in us')
}
# Select the correct range and lag vectors
r <- data[["range.km"]]
l <- data[["lag.us"]]/1000
# create the frequency axis
fmax <- 1/min(diff(l),na.rm=T)
ftmp <- seq(0,fmax,length.out=length(l))
f <- c(-rev(ftmp[2:length(l)]),ftmp)
# If site locations and pointing directions were not given as
# input they must be read from files
if( is.null( llhT ) ) llhT <- data[["llhT"]]
if( is.null( llhR ) ) llhR <- data[["llhR"]]
if( is.null( azelT ) ) azelT <- data[["azelT"]]
if(yheight){
# If site locations or pointing directions are still unknown
# we must stop
if( is.null( llhT ) ) stop("Transmitter location not known, cannot convert range to height")
if( is.null( llhR ) ) stop("Receiver location not known, cannot convert range to height")
if( is.null( azelT ) ) stop("Transmitter pointing not known, cannot convert range to height")
h <- r
for(k in seq(length(r))) h[k] <- range2llh(r[k]*1000,llhT,azelT,llhR)["h"]/1000
grid <- expand.grid(x=f,y=h)
data[["height.km"]] <- h
data2[["height.km"]] <- h
}else{
grid <- expand.grid(x=f,y=r)
}
if( rscale ){
for( k in seq( length( data[["range"]] ) ) ){
data[["ACF"]][k,] <- data[["ACF"]][k,]*data[["range"]][k]**2
}
}
# ok, this will still require some thinking
# looks like the simple summation over data[["ACF"]][height,lag]*exp(1i*om*lag) is the correct way to do this?
ssc <- matrix(ncol=length(f),nrow=length(data[["range"]]))
for(hh in seq(length(data[["range"]]))){
if(any(!is.na(data[["ACF"]][hh,]))){
ssc[hh,] <- 0
for(ll in seq(length(data[["lag.us"]]))){
if(!is.na(data[["ACF"]][hh,ll])){
ssc[hh,] <- ssc[hh,] + data[["ACF"]][hh,ll]*exp(1i*2*pi*f*data[["lag.us"]][ll]*1e-6) + Conj(data[["ACF"]][hh,ll]*exp(-1i*2*pi*f*data[["lag.us"]][ll]*1e-6))
}
}
}
}
ss <- abs(ssc)**2
if(normalize){
for(hh in seq(length(data[["range"]]))){
ss[hh,] <- ss[hh,]/max(ss[hh,],na.rm=TRUE)
}
}
grid$z <- c(t(ss))
main <- "Power spectrum"
if(yheight){
ylab <- "Height [km]"
}else{
ylab <- "Range [km]"
}
xlab <- "Frequency [Hz]"
trebg <- trellis.par.get(name='background')
trebg$col <- bg
trellis.par.set(col=fg,background=trebg)
if (is.null(ylim)){
if(yheight){
ylim=range(h,na.rm=TRUE)
}else{
ylim=range(r,na.rm=TRUE)
}
}
if (is.null(xlim)) xlim=range(f,na.rm=TRUE)
if (is.null(zlim)) zlim=range(grid$z,na.rm=T,finite=T)
par(cex.axis=cex,cex.lab=cex,cex.main=cex,bg=bg,fg=fg,lwd=cex,col.lab=fg,
col.axis=fg,col.main=fg,bty='n',mar=c(5,8,0,0)*cex,mgp=c(6,2,0)*cex)
print(
levelplot(
z~x*y,
grid,
col.regions=beer,
ylim=ylim,
xlim=xlim,
at=seq(zlim[1],zlim[2],length.out=100),
cuts=100,
xlab=list(xlab,cex=cex,col=fg),
ylab=list(ylab,cex=cex,col=fg),
colorkey=list(labels=list(col=fg,cex=cex)),
scales=list(col=fg,cex=cex),
main=main,
)
)
data2[["spectrum"]] <- ss
data2[["f"]] <- f
if(!is.null(pdf)) dev.off()
if(!is.null(jpg)) dev.off()
invisible(data2)
}
|
f11ff0adf341cda92a999da6ffab5abfd93c14e8 | e11528102c9dd7e8c43828a39c649e39f366683c | /test.R | 0815e86a0475279f2a57c6777f6b531717b8e776 | [] | no_license | RogerPink/twitter_crawl | 064090a994e8832085e36ff388f0b57001c46f2b | c9fa77c88cf23b87d2659a4b3ea314cc9136d6da | refs/heads/master | 2020-04-21T10:44:58.026287 | 2019-06-13T01:51:36 | 2019-06-13T01:51:36 | 169,495,627 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 23 | r | test.R | y<-3
x<-y + 3
print(x)
|
b5bcb515bcb1fd5c10234318be5af409bab671b9 | 7a85e51da609955285640d05cea1e9153fe721a4 | /day01/ch10.R | dc68d6ff958207a21c21c5d9351862e732b3be09 | [] | no_license | up1/study-r-programming | 2180b8d783859191815a07f709e63dd485cc48cf | 57ca8f379f247d1e210d8404b269bf47800aff83 | refs/heads/master | 2022-07-17T22:00:55.479726 | 2020-05-15T16:39:03 | 2020-05-15T16:39:03 | 263,380,566 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 156 | r | ch10.R | score <- 70
grade <- "C"
if(score >= 80) {
grade <- "A"
}else if(score >= 70) {
grade <- "B"
}
result <- ifelse(score >= 80, "A1", "B1")
print(result) |
28ec451965e2ca174e29843d2f35c230d5b127ae | 29585dff702209dd446c0ab52ceea046c58e384e | /blkergm/R/zzz.R | 99d9f99412881ad06399e6027a1a13b0c8ff2d09 | [] | no_license | ingted/R-Examples | 825440ce468ce608c4d73e2af4c0a0213b81c0fe | d0917dbaf698cb8bc0789db0c3ab07453016eab9 | refs/heads/master | 2020-04-14T12:29:22.336088 | 2016-07-21T14:01:14 | 2016-07-21T14:01:14 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 707 | r | zzz.R | # File R/zzz.R in package ergm.userterms, part of the Statnet suite
# of packages for network analysis, http://statnet.org .
#
# This software is distributed under the GPL-3 license. It is free,
# open source, and has the attribution requirements (GPL Section 7) at
# http://statnet.org/attribution
#
# Copyright 2003-2013 Statnet Commons
#######################################################################
.onAttach <- function(lib, pkg){
sm <- statnetStartupMessage("blkergm", c("statnet","ergm.userterm"), FALSE)
if(!is.null(sm)){
packageStartupMessage(sm)
packageStartupMessage("An extension to the ergm package for fitting the block models based on the ergm.userterms")
}
}
|
43efed56c402449eb37853d3e5067546ec660ef4 | cca7f37876c8dc54f8c296f90ff7af28313ae3f9 | /一般机器学习方法.R | b57f7338dd718de1384e73af3d4751991e27a1cf | [] | no_license | CHDYangMeng/R_Code | e8913cff90f695e1459d4df09438019fb73965e7 | 960f8b5ea52a7d3cceb8506a27549d36d572ef4b | refs/heads/master | 2022-10-20T03:54:32.352056 | 2020-06-10T08:13:20 | 2020-06-10T08:13:20 | 271,218,379 | 0 | 0 | null | null | null | null | GB18030 | R | false | false | 8,049 | r | 一般机器学习方法.R | rm(list = ls())
options(warn = -1)
# -------------------------------------------
# ###########################################
#
# 机器学习
#
# ###########################################
# -------------------------------------------
#
# 神经网络 Neural Networks and Deep Learning
#
# -------------------------------------------
# 包
# nnet:这个包(package)中含有单隐藏层神经网络
# rnn:实现了循环神经网络
# deepnet:前馈神经网络(feed-forward neural network)
# tensorflow:提供了面向TensorFlow的接口
#
# install.packages('nnet')
library(nnet)
seeds <-read.table("E:/数据学习/R语言机器学习模块/seeds_dataset.txt", header=FALSE, sep="")
names(seeds) <-c("A", "P", "C", "L", "W","AC", "LKG", "Label")
# 把seed数据集划分为训练70%的训练数据集和30%的测试数据集
trainIndex <-sample(1:210, 147)
trainIndex
testIndex <-setdiff(1:210, trainIndex)
testIndex
# 利用神经网络对训练数据集获得模型
ideal <-class.ind(seeds$Label)
seedsANN <-nnet(seeds[trainIndex, -8], ideal[trainIndex, ], size=10, softmax=TRUE)
# 对测试数据及进行测试
testLabel <-predict(seedsANN, seeds[testIndex, -8], type="class")
# 计算测试误差率
# 混淆矩阵
my_table <-table(seeds[testIndex,]$Label, testLabel)
# 误差
test_error <- 1 - sum(diag(my_table)) / sum(my_table)
# -------------------------------------------
#
# 决策树 Recursive Partitioning
#
# -------------------------------------------
# 包
# rpart: 递归拆分利用树形结构模型,来做回归、分类和生存分析
# maptree: 用于树的可视化的图形工具
# party和partykit: 提供二分支树和节点分布的可视化展示
library(rpart)
loc<-"http://archive.ics.uci.edu/ml/machine-learning-databases/"
ds<-"breast-cancer-wisconsin/breast-cancer-wisconsin.data"
url<-paste(loc,ds,sep="")
data<-read.table(url,sep=",",header=F,na.strings="?")
names(data)<-c("编号","肿块厚度","肿块大小","肿块形状","边缘黏附","单个表皮细胞大小","细胞核大小","染色质","细胞核常规","有丝分裂","类别")
data$类别[data$类别==2]<-"良性"
data$类别[data$类别==4]<-"恶性"
data<-data[-1] #删除第一列元素#
set.seed(1234) #随机抽样设置种子
train<-sample(nrow(data),0.7*nrow(data)) #抽样函数,第一个参数为向量,nrow()返回行数 后面的是抽样参数前
# 训练集
tdata<-data[train,] #根据抽样参数列选择样本,都好逗号是选择行
# 测试集
vdata<-data[-train,] #删除抽样行
# 训练
dtree<-rpart(类别~.,data=tdata,method="class", parms=list(split="information"))
print(dtree)
printcp(dtree)
# 剪枝
tree<-prune(dtree,cp=dtree$cptable[which.min(dtree$cptable[,"xerror"]),"CP"])
# 画图
library(rpart.plot)
rpart.plot(dtree,branch=1,type=2, fallen.leaves=T,cex=0.8, sub="剪枝前")
rpart.plot(tree,branch=1, type=4,fallen.leaves=T,cex=0.8, sub="剪枝后")
#利用预测集进行预测
predtree<-predict(tree,newdata=vdata,type="class")
#输出混淆矩阵
table(vdata$类别,predtree,dnn=c("真实值","预测值"))
# -------------------------------------------
#
# 随机森林 Random Forests
#
# -------------------------------------------
# 包
# randomForest: 提供了用于回归和分类的随机森林算法
#
library(randomForest)
data <- iris
Randommodel <- randomForest(Species ~ ., data=data,importance = TRUE, proximity = FALSE, ntree = 100)
print(Randommodel)
importance(Randommodel,type=1) #重要性评分
importance(Randommodel,type=2) #Gini指数
varImpPlot(Randommodel) #可视化
prediction <- predict(Randommodel, data[,1:5],type="class") #还有response回归类型
# 混肴矩阵
table(observed =data$Species,predicted=prediction)
# -------------------------------------------
#
# 支持向量机和核方法 Support Vector Machines and Kernel Methods
#
# -------------------------------------------
# 包
# e1071: 函数svm()提供了LIBSVM库的接口
# kernlab: 实现了一个灵活的核学习框架(包括SVMs,RVMs和其他核学习算法)
library(e1071)
library(lattice)
data(iris)
attach(iris)
xyplot(Petal.Length ~ Petal.Width, data = iris, groups = Species,auto.key=list(corner=c(1,0)))
subdata <- iris[iris$Species != 'virginica',]
subdata$Species <- factor(subdata$Species)
# model1 <- svm(Species ~ Petal.Length + Petal.Width, data = subdata)
# plot(model1, subdata, Petal.Length ~ Petal.Width)
model2 <- svm(Species ~ ., data = iris)
summary(model2)
# -------------------------------------------
#
# 贝叶斯方法 Bayesian Methods
#
# -------------------------------------------
# 包
# install.packages("caret")
# install.packages("bnlearn")
# caret
# bnlearn
# 加载扩展包和数据
library(caret)
data(PimaIndiansDiabetes2,package='mlbench')
# 对缺失值使用装袋方法进行插补
preproc <- preProcess(PimaIndiansDiabetes2[-9],method="bagImpute")
data <- predict(preproc,PimaIndiansDiabetes2[-9])
data$Class <- PimaIndiansDiabetes2[,9]
# 加载包
library(bnlearn)
# 数据离散化
data2 <- discretize(data[-9],method='quantile')
data2$class <- data[,9]
# 使用爬山算法进行结构学习
bayesnet <- hc(data2)
# 显示网络图
plot(bayesnet)
# 修改网络图中的箭头指向
bayesnet<- set.arc(bayesnet,'age','pregnant')
# 参数学习
fitted <- bn.fit(bayesnet, data2,method='mle')
# 训练样本预测并提取混淆矩阵
pre <- predict(fitted,data=data2,node='class')
confusionMatrix(pre,data2$class)
# 进行条件推理
cpquery(fitted,(class=='pos'),(age=='(36,81]'&mass=='(34.8,67.1]'))
# -------------------------------------------
#
# 遗传算法进行优化 Optimization using Genetic Algorithms
#
# -------------------------------------------
# 包
# install.packages("mcga")
# mcga包,多变量的遗传算法,用于求解多维函数的最小值。
# genalg包,多变量的遗传算法,用于求解多维函数的最小值。
# rgenoud包,复杂的遗传算法,将遗传算法和衍生的拟牛顿算法结合起来,可以求解复杂函数的最优化化问题。
# gafit包,利用遗传算法求解一维函数的最小值。不支持R 3.1.1的版本。
# GALGO包,利用遗传算法求解多维函数的最优化解。不支持R 3.1.1的版本。
library(mcga)
f<-function(x){
return ((x[1]-5)^2 + (x[2]-55)^2 +(x[3]-555)^2 +(x[4]-5555)^2 +(x[5]-55555)^2)
}
m <- mcga( popsize=200,chsize=5,minval=0.0,
maxval=999999,maxiter=2500,crossprob=1.0,
mutateprob=0.01,evalFunc=f)
# 最优化的个体结果
print(m$population[1,])
# 执行时间
m$costs[1]
# -------------------------------------------
#
# 关联规则 Association Rules
#
# -------------------------------------------
# 包
# install.packages("arules")
# arules: 提供有效处理稀疏二元数据的数据结构,以及Apriori和Eclat实现的接口,
# 用于挖掘频繁项集,最大频繁项集,封闭频繁项集和关联规则
library(arules)
data <- Groceries
inspect(Groceries)
rules <- apriori(data = data, parameter = list(support=0.01,confidence=0.5))
inspect(rules)
# -------------------------------------------
#
# 基于模糊规则的系统 Fuzzy Rule-based Systems
#
# -------------------------------------------
# 包
# frbs: 实现了许多标准方法,用于从回归和分类问题数据中学习基于模糊规则的系统
# RoughSets: 提供粗糙集理论(RST)和模糊粗糙集理论(FRST)的全面实现
# -------------------------------------------
#
# GUI包rattle是R语言数据挖掘的图形用户界面包
#
# -------------------------------------------
# 包
# install.packages("rattle")
# rattle
library(rattle)
rattle()
# -------------------------------------------
#
# 可视化(最初由Brandon Greenwell提供)
#
# -------------------------------------------
# 包
# ggRandomForests包提供了基于ggplot2的工具,
# 用于从randomForest包和randomForestSRC包中对随机森林模型
# (例如变量重要性图和PDP)进行图形化探索
|
db68f81cdb78b81985dcb37bd57aebc528728c0a | f2809fe5dffcbd82cfb7ada6cbf60398746512eb | /R/percent_tolerant.R | b80d67f9cb82ff03770e0120a0bb2ca2822c0762 | [
"CC0-1.0"
] | permissive | mpdougherty/mcat | 7374d207b39a3a4b32b61a95ebb61dab8bada6d9 | a69bbc006d54763e68a87939309e0fc68a9b5aa6 | refs/heads/master | 2020-05-23T19:34:06.037792 | 2019-05-28T20:54:36 | 2019-05-28T20:54:36 | 186,914,680 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,605 | r | percent_tolerant.R | #' @title Percent Tolerant
#'
#' @description Calculates the percent tolerant MCAT metric for the input
#' individual mussel data frame.
#'
#' @export
#' @param individuals data frame; a data frame of individual mussel records.
#'
#' @return A data frame of sampled sites with the calculated percent tolerant
#' MCAT metric
#'
#' @examples
#' # Create the individual mussel data frame
#' individuals <- mcat::individuals
#'
#' # Calculate percent tolerant for the individuals data frame
#' pt <- percent_tolerant(individuals)
#'
#' @importFrom dplyr group_by summarize
#'
percent_tolerant <- function(individuals) {
# Get vector of tolerant species
tolerant <- mcat::tolerant$tolerant
# Create the `number_live` field
individuals$number_live <- ifelse(individuals$Status == "Live", 1, 0)
# Set a flag field if the individual is tolerant and alive
individuals$tolerant <- ifelse(individuals$Ename %in% tolerant &
individuals$number_live >= 1,
1, 0)
# Group by SampleID
individuals %>%
dplyr::group_by(SampleID) %>%
dplyr::summarize(SUM_number_live = sum(number_live),
SUM_tolerant = sum(tolerant)) -> sample
# Calculate percent listed
sample$percent_tolerant <- (sample$SUM_tolerant / sample$SUM_number_live) * 100
# Convert NaN to zero (numerator and denominator is zero)
sample$percent_tolerant[is.nan(sample$percent_tolerant)] <- 0
# Convert Inf to zero (demominator is zero)
sample$percent_tolerant[is.infinite(sample$percent_tolerant)] <- 0
return(sample)
}
|
b4414802b93ceee53d1dfbe63b1653c2a68aa7b3 | 454c1254be5ec2d6f7d3a9d864fa603a9fd95a74 | /R/Step1_Identify_homologous_series.R | 14e797fa5511e18aca1dad356a1f594f657b3049 | [] | no_license | CRKOMassSpecComputing/HomoSeriesMatcher | aae13d201d34eca3ef8a0491de2e4fdac85e7c46 | 4e89a6615eec300ebd835a808035ea3ad11cde9f | refs/heads/master | 2020-03-18T13:06:40.777168 | 2018-05-24T21:24:44 | 2018-05-24T21:24:44 | 134,761,200 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,714 | r | Step1_Identify_homologous_series.R | ##################################################################################
# Step1_Identify_homologous_series
#
# Karl Oetjen and Christopher Ruybal
# Last modified: 5/10/2018
#
#################################################################################
# Non-Target Screening Tool
#Package Source "nontarget" developed by Martin Loos <Martin.Loos@eawag.ch>
# Load libraries
library(nontarget)
library(nontargetData)
library(calibrate)
library(openxlsx)
library(plotly)
# Source additional scripts
source('C:/Nontarget_Data/Scripts/plothomol3.R')
source('C:/Nontarget_Data/Scripts/Interactive Update 51517.R')
###################################
##### CREATING DATA PATH ##########
###################################
#Load adduct list
data("adducts")
#Load isotope list
data("isotopes")
# ***********Directory containing data to plot**********
indir <- 'C:/Nontarget_Data/Fluro/Simon/Krista POS GW/'
#*********** Directory that you want to put your plots in*****************
outdir <- 'C:/Nontarget_Data/Fluro/Simon/Krista POS GW/Results/'
#*************load file you want to look at name it the sample list name*******
fname <- paste(indir,'070715 Pos Jacksonville Short.csv',sep='')
#************Name Run Varible - change this to sample name ****************
peaklist<-read.csv(fname)
#### Load file with known peak names ####
namelist<-read.csv('C:/Nontarget_Data/SETAC/dummy.csv')
####################################
##### 1 MAKE ISOTOPE LIST ##########
####################################
#make isotope list that you want to look at you change canhe this to look at the isotope list [ View(isotopes) ]
iso<-make.isos(isotopes, use_isotopes=c("13C","15N","34S","37Cl","81Br","41K","13C","15N","34S","37Cl","81Br","41K","18O","2H"), use_charges=c(1,1,1,1,1,1,2,2,2,2,2,2,2,2))
###################################
##### 2 RUN PATTERN SEARCH ########
###################################
#Run Pattern Search
pattern<-pattern.search(peaklist,iso,cutint=10000,rttol=c(-0.05,0.05),mztol=5,mzfrac=0.1,ppm=TRUE,inttol=0.2,rules=c(TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE,TRUE),deter=FALSE,entry=50)
# Plot isotope and masss defect
plotisotopes(pattern);
plotdefect(pattern,elements=c("C"))
###########################################
####### 3 RUNNING ADDUCT SEARCH#############
###########################################
#Run adduct search
adducts <- nontarget::adduct.search(peaklist, adducts, rttol=0.5, mztol = 5, ppm = TRUE, use_adducts = c("M+H","M+K","M+Na", "M+NH4"), ion_mode = "positive")
#Plot adduct
plotadduct(adducts)
#To show single pattern group and its relation to adduct groups######
plotall(pattern,adducts);
plotgroup(pattern,adducts,groupID=1,massrange=10,allmass=FALSE);
############################################
########### 4 HOMOLOGUE SEARCH #############
############################################
# Screen for homologue series
homol <- homol.search(peaklist,isotopes,elements=c("C","H","O"), use_C=TRUE, minmz=5, maxmz=1200, minrt=0.5, maxrt=45, ppm=TRUE, mztol=5, rttol=0.05, minlength=3, mzfilter=FALSE,vec_size=1E6)
#Plot results
plothomol(homol,xlim=FALSE,ylim=FALSE,plotlegend=TRUE);
# plot series, m/z / mass defect
plothomol(homol,plotdefect=TRUE);
# If Peak Name is Known (PeakID,Name,Group ID)
plothomol3(homol,xlim=FALSE,ylim=FALSE,plotlegend=TRUE);
datAll <- data.frame(mz = homol[[1]][,1],
RT = homol[[1]][,3],
PeakID = homol[[1]][,4],
GroupID = homol[[1]][,5])
m = 0
KNO <- matrix(NA, nrow = length(namelist$Name[!is.na(namelist$Name)]), ncol = 3)
cnam <- c('PeakID','Name','GroupID')
colnames(KNO) = cnam
for (rrr in 1:length(datAll$mz)){
if (!is.na(namelist$Name[rrr])) {
m = m + 1
KNO[m,1] = toString(datAll$PeakID[rrr])
KNO[m,2] = toString(namelist$Name[rrr])
KNO[m,3] = toString(datAll$GroupID[rrr])
}
}
## setup a workbook with 3 worksheets
wb <- createWorkbook("Karl")
addWorksheet(wb = wb, sheetName = "Single Peaks", gridLines = FALSE)
writeDataTable(wb = wb, sheet = 1, x = homol[[1]][,1:5])
addWorksheet(wb = wb, sheetName = "Series", gridLines = TRUE)
writeData(wb = wb, sheet = 2, x = homol[[3]][,c(1,3,4,2)])
addWorksheet(wb = wb, sheetName = "Known Peaks", gridLines = TRUE)
writeData(wb = wb, sheet = 3, x = KNO)
saveWorkbook(wb,'C:/Nontarget_Data/Fluro/Simon/Krista POS GW/Results/070715 Pos Jacksonville Short.xlsx',overwrite = TRUE)
# 1. spreadsheet defines membership of single peaks in a series
# write.csv(homol[[1]][,1:5],file.path(outdir,"results_George_CHO.csv"), row.names=FALSE)
# columns 1-4 = peaklist & peak IDs
# column 5 = ID of (homologues) series
# 2. spreadsheet defines series and the peaks therein
# write.csv(homol[[3]][,c(1,3,4,2)],file.path(outdir,"results2_Geogre_CHO.csv"), row.names=FALSE)
# column 1 = series ID
# column 2 = mean m/z increment in a series
# column 3 = mean RT increment in a series
# column 4 = IDs of peaks in the series
# all other columns: series properties
#############################################
######## 5 Combine Results ##################
#############################################
# Combine grouping results to components
comp<-combine(pattern,adducts,homol,dont=FALSE, rules=c(TRUE,FALSE,FALSE));comp[[7]];
plotisotopes(comp);
plotcomp(comp,compoID=1,peakID=FALSE);
################################################
##### Plot interactive Version (html) ##########
################################################
plot_interact(homol)
|
d79a67a7b7b858148035b8bd6abe94cb2df2b8bf | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/Sequential/examples/SampleSize.Binomial.Rd.R | a3b0afa3ab3d046cca25ceb1c90bb9036788bcca | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 487 | r | SampleSize.Binomial.Rd.R | library(Sequential)
### Name: SampleSize.Binomial
### Title: Sample size calculation for continuous sequential analysis with
### binomial data.
### Aliases: SampleSize.Binomial
### Keywords: Continuous MaxSPRT analysis
### ** Examples
result<- SampleSize.Binomial(RR=5,alpha=0.01,power=0.88,M=1,z=2)
# if you type:
result
# then you will get the following output:
# $Required_N
# [1] 25
# $cv
# [1] 4.59581
# $Type_I_Error
# [1] 0.009755004
# $Actual_power
# [1] 0.8855869
|
cb2aeb50b510a9ff9cd6ce4d3fe720cf953f4d3e | c1463667cf3ff1057859b4bbd956b7e1737bc187 | /R Primer Codes.R | fe66cb3c142662ed969dcd15a8f6badcd41b1fb5 | [] | no_license | kshirasaagar/R-U-Ready | 0c6ce8d8d0bb297a754d2229c86ff268755720d7 | 1caf81814cdd9cc779771f763f34bbfa2fc424c9 | refs/heads/master | 2021-08-03T19:23:51.091305 | 2021-01-28T23:06:36 | 2021-01-28T23:06:36 | 30,675,127 | 1 | 1 | null | 2021-01-28T23:08:06 | 2015-02-11T23:24:22 | R | UTF-8 | R | false | false | 15,107 | r | R Primer Codes.R | ##-----------------------------------------------------------
## Program: A Course in R.R
## Purpose: A basic introduction to R
## Author : Indrajit Sen Gupta
## Date : January 2012
##-----------------------------------------------------------
##-----------------------------------------------------------
## Load required libraries
##-----------------------------------------------------------
library(MASS)
##-----------------------------------------------------------
## Set working directory
##-----------------------------------------------------------
setwd("D:/Work/_Documents/Training/R Training (Dec 2011)")
##-----------------------------------------------------------
## A simple vector in R. The letter "c" stands for concatenate
##-----------------------------------------------------------
x <- c(1,2,4)
x
q <- c(x,x,8)
q
##-----------------------------------------------------------
## The standard assignment operator in R is "<-".
## "=" can also be used but is usually discouraged
##-----------------------------------------------------------
##-----------------------------------------------------------
## Let us access the third element in x via [ ].
##-----------------------------------------------------------
x[3]
##-----------------------------------------------------------
## Subsetting a vector
##-----------------------------------------------------------
q[3:7]
##-----------------------------------------------------------
## Calculate mean and std of q and assign it to a variable
##-----------------------------------------------------------
q.mean <- mean(q)
q.std <- sd(q)
##-----------------------------------------------------------
## Find the reminder when 38 is divided by 7
##-----------------------------------------------------------
38 %% 7
##-----------------------------------------------------------
## Find out what variables are there in R's memory
##-----------------------------------------------------------
ls()
##-----------------------------------------------------------
## Delete x from the memory
##-----------------------------------------------------------
rm(x)
ls()
##-----------------------------------------------------------
## Quitting R
##-----------------------------------------------------------
q()
##-----------------------------------------------------------
## Find out what internal datasets exist in R
##-----------------------------------------------------------
data()
data(package = .packages(all.available = TRUE))
##-----------------------------------------------------------
## Use the internal dataset Nile to plot a histogram
##-----------------------------------------------------------
Nile
hist(Nile, col = "red")
##-----------------------------------------------------------
## Help on the function hist
##-----------------------------------------------------------
help(hist)
##-----------------------------------------------------------
## Write a function in R to count the number of odd integers
##-----------------------------------------------------------
oddcount <- function(x) {
k <- 0 ## Assign the value 0 to k
for (n in x) { ## Start a FOR loop for every element in x
if (n %% 2 == 1) k <- k + 1 ## %% is a modulo operator
}
return(k)
}
oddcount(c(1,2,3,5,7,9,14))
##-----------------------------------------------------------
## A different way of writing the above function
##-----------------------------------------------------------
oddcount2 <- function(x) {
k <- 0 ## Assign the value 0 to k
for (i in 1:length(x)) { ## The length function gives number of elements in x
if (x[i] %% 2 == 1) k <- k + 1 ## %% is a modulo operator
}
return(k)
}
ls()
##-----------------------------------------------------------
## Some data structure examples in R
##-----------------------------------------------------------
y <- "Indrajit" ## Character
mode(y)
mat <- matrix(data = c(1:9), nrow = 3, byrow = TRUE) ## Matrix
mat
mat[1:3,1:2] ## Get the first two columns and top three rows of the matrix
mat[1,] ## Get the first column of the matrix
mat2 <- matrix(data = c(10:18), nrow = 3, byrow = FALSE)
mat %*% mat2 ## Matrix multiplication in R
mylist <- list(name = "Indrajit", age = 31) ## Generate a list with two data types
mylist
mylist$name
hm <- hist(Nile, col = "red") ## An object of class histogram
summary(hm)
hm
dm <- data.frame(list(kids = c("Jack", "Jill"), ages = c(12,10))) ## A data frame
dm
dm$ages
##-----------------------------------------------------------
## Assignment: Write a function to calculate median absolute
## deviation from median without using the median() function
## and use mad() function to validate your results
##-----------------------------------------------------------
##-----------------------------------------------------------
## Some useful functions in R: all(), any()
##-----------------------------------------------------------
##-----------------------------------------------------------
## Vector In, Vector Out
##-----------------------------------------------------------
u <- c(5,2,8)
v <- c(1,3,9)
u > v
all(u > 4)
any(u > 7)
##-----------------------------------------------------------
## Apply error handling in functions wherever possible
##-----------------------------------------------------------
f1 <- function(x,c) return((x+c)^2)
f1(1:3,1:3)
f2 <- function(x,c) {
if (length(c) != 1) {
stop("vector c not allowed")
}
return((x+c)^2)
}
f2(1:4,2)
##-----------------------------------------------------------
## Simplify Apply function sapply(): Applies function to each
## element and then converts the result to a matrix form
##-----------------------------------------------------------
f3 <- function(z) return(c(z,z^2))
t(sapply(1:8, f3))
##-----------------------------------------------------------
## NULL and NA values in R
##-----------------------------------------------------------
x <- NULL
x <- c(88, NA, 12, 168, 13)
x
mean(x)
mean(x, na.rm = TRUE)
x1 <- c(0, 2, 4)
1/x1
is.na(x) ## Check whether an object is NA
x <- c(88, NULL, 12, 168, 13)
x
##-----------------------------------------------------------
## Use of NULL in building up a vector
##-----------------------------------------------------------
rm(list = ls()) ## Delete all objects from the library
z <- NULL
for (i in 1:10) if (i %% 2 == 0) z <- c(z,i) ## A bad way to achieve this result however :-(
z
##-----------------------------------------------------------
## Subsetting vectors based on boolean logic
##-----------------------------------------------------------
z <- c(5, 2, -3, 8)
z[1:4]
z*z > 8
w <- z[z * z > 8] ## What is our intent here?
w ## Evaluating the expression z*z > 8 gives us a vector of Boolean values
z[z < 0] <- 0 ## Replace all negative values with 0
z
##-----------------------------------------------------------
## Subsetting vectors using the subset function
## when NA is present
##-----------------------------------------------------------
x <- c(6, 1:3, NA, 12)
x
x[x > 5]
subset(x,x > 5)
##-----------------------------------------------------------
## Identifying the indices which satisfy a condition using
## the which() function
##-----------------------------------------------------------
which(x > 5)
which(is.na(x))
##-----------------------------------------------------------
## Applying functions to rows/columns of a matrix using apply()
## Usage: apply(x, MARGIN, FUN, ARG)
## x: data matrix on which to apply the function
## MARGIN: 1 indicates rows, 2 indicates columns
## FUN: function to be applied
## ARG: optional set of arguments
##-----------------------------------------------------------
z <- matrix(1:6, ncol = 2)
apply(z, 1, mean)
##-----------------------------------------------------------
## Differences between apply, lapply, sapply, tapply
##-----------------------------------------------------------
##-----------------------------------------------------------
## apply: Apply a function to the rows and columns of a
## matrix
##-----------------------------------------------------------
#Two dimensional matrix
M <- matrix(runif(16),4,4)
# apply min to rows
apply(X = M, MARGIN = 1, FUN = min)
# apply max to columns
apply(M, 2, max)
##-----------------------------------------------------------
## lapply: Apply a function to each element of a list and
## in turn get a list back
##-----------------------------------------------------------
x <- list(a = 1, b = 1:3, c = 10:100, d = "abcd")
lapply(x, FUN = length)
##-----------------------------------------------------------
## sapply: Apply a function to each element of a list and
## get the output in form of a vector
##-----------------------------------------------------------
sapply(x, FUN = length)
f1 <- function(x) { rnorm(n = 3, mean = x)}
sapply(1:5, FUN = f1)
##-----------------------------------------------------------
## tapply: Apply a function to subsets of a vector where the
## subsets are defined by some other vector usually a factor
##-----------------------------------------------------------
x <- 1:20
y <- factor(rep(letters[1:5], each = 4))
tapply(x, INDEX = y, FUN = sum)
##-----------------------------------------------------------
## Appending datasets: rbind() and cbind()
##-----------------------------------------------------------
z
one <- c(1,2)
cbind(one, z)
two <- rep(2,2)
rbind(z, two)
##-----------------------------------------------------------
## Merging dataframes on common key
##-----------------------------------------------------------
d1 <- data.frame(kids = c("Jack", "Jill", "Jillian", "John"),
states = c("CA","MA","MA","HI"))
d2 <- data.frame(ages = c(10, 7, 12),
kids = c("Jill","Lillian", "Jack"))
merge(d1, d2, all = TRUE) # Outer join
merge(d1, d2, all.x = TRUE) # Left join
merge(d1, d2, all.y = TRUE) # Right join
merge(d1, d2, all = FALSE) # Inner join
##-----------------------------------------------------------
## Merging dataframes on uncommon key
##-----------------------------------------------------------
d2 <- data.frame(ages = c(10, 7, 12),
pals = c("Jill","Lillian", "Jack"))
merge(d1, d2, by.x = "kids", by.y = "pals")
##-----------------------------------------------------------
## Reading external files into R: read.table, read.csv
##-----------------------------------------------------------
rm(list = ls())
sales <- read.table("sales.csv", header = TRUE, sep= ",")
str(sales)
head(sales)
sink("myoutput.txt") # Direct all output to external file
sink() # Reset output
##-----------------------------------------------------------
## Writing to external files from R: write.table
##-----------------------------------------------------------
write.table(sales, file = "sales_out.csv", append = FALSE,
sep = ",", col.names = TRUE, row.names = FALSE)
cat("Indrajit\n",file = "u.txt")
cat("Sengupta\n", file = "u.txt", append = TRUE)
##-----------------------------------------------------------
## Working with factors in R
##-----------------------------------------------------------
incomes <- read.table("income.csv", header= TRUE, sep = ",")
incomes
str(incomes)
attach(incomes) ## Allow access to the variables inside the dataframe
tapply(Income, Gender, mean)
incomes$Over50 <- ifelse(Age > 50,1,0) ## Create a dummy variable
tapply(Income, list(incomes$Gender, incomes$Over50), mean)
str(incomes)
incomes$Over50 <- factor(incomes$Over50)
str(incomes)
summary(incomes$Over50)
##-----------------------------------------------------------
## SQL in R
##-----------------------------------------------------------
ls()
install.packages("sqldf")
library(sqldf)
sqldf("select * from incomes limit 3")
highinc <- sqldf("select *
from incomes
where Income > 80000")
sqldf("select Gender, max(Income) from
incomes group by Gender")
##-----------------------------------------------------------
## Connecting to Databases in R
##-----------------------------------------------------------
library(RODBC)
ch <- odbcConnect("integration") ## Create connection to ODBC
##-----------------------------------------------------------
## Get a list of tables in the database
##-----------------------------------------------------------
table.list <- sqlTables(ch)
table.list[,c(3,4)]
## Get the table name and table type columns
##-----------------------------------------------------------
## Extract tables from ODBC into R
##-----------------------------------------------------------
customer <- sqlFetch(ch,"CustomerMaster", max= 20)
str(customer)
head(customer)
##-----------------------------------------------------------
## Query databases in R
##-----------------------------------------------------------
customer2 <- sqlQuery(ch, "select Customer_Name, City from CustomerMaster",
max = 50)
customer2
##-----------------------------------------------------------
## Reading SAS datasets in R
##-----------------------------------------------------------
library(sas7bdat)
trans <- read.sas7bdat("transaction.sas7bdat")
head(trans)
##-----------------------------------------------------------
## Graphs in R
##-----------------------------------------------------------
sales <- read.table("sales.csv", header = TRUE, sep= ",")
newsales=sales[1:500,c("quantity","sales","visits","ind","basket")]
newsales
attach(newsales)
plot(newsales$quantity,newsales$basket)
boxplot(newsales$sales)
plot(quantity, basket, xlab = "quantity",ylab= "basket")
plot(quantity, basket, xlab = "Quantity of Items Purchased",ylab="Value of Basket",
xlim = c(0,500), ylim=c(0,1200), pch="+", col = "blue", bty = "L", cex=0.5)
##-----------------------------------------------------------
## Output graphics to PDF
##-----------------------------------------------------------
pdf("fruitpie2.pdf")
fruits <- c(41,59,78,23,34)
names(fruits) <- c("grape", "chickoo", "mango", "papaya", "orange")
pie(fruits,col=c("black","brown","yellow","green","orange"),
main="Pie Chart for Fruit Juice Consumption")
dev.off()
##-----------------------------------------------------------
## Two plots in the same graph
##-----------------------------------------------------------
theta = seq(10,20,0.1)
thetapower = 1 - pnorm(15,theta, 3)
plot(theta, thetapower, type = "l", col="red",xlab = "theta", ylab = "power")
thetapower = 1 - pnorm(13, theta, 3)
points(theta, thetapower, type = "l", lty = "dashed", col="blue")
legend(10,0.8, legend = c("x=15", "x=13"), lty = c("solid", "dashed"))
title("Power Function for Z Test for Normal Mean theta sd=3")
rm(theta)
rm(thetapower)
##----------------------------------------------------------------------------------------------
##
##----------------------------------------------------------------------------------------------
##----------------------------------------------------------------------------------------------
##
##----------------------------------------------------------------------------------------------
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.