blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 327 | content_id stringlengths 40 40 | detected_licenses listlengths 0 91 | license_type stringclasses 2 values | repo_name stringlengths 5 134 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringclasses 46 values | visit_date timestamp[us]date 2016-08-02 22:44:29 2023-09-06 08:39:28 | revision_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | committer_date timestamp[us]date 1977-08-08 00:00:00 2023-09-05 12:13:49 | github_id int64 19.4k 671M ⌀ | star_events_count int64 0 40k | fork_events_count int64 0 32.4k | gha_license_id stringclasses 14 values | gha_event_created_at timestamp[us]date 2012-06-21 16:39:19 2023-09-14 21:52:42 ⌀ | gha_created_at timestamp[us]date 2008-05-25 01:21:32 2023-06-28 13:19:12 ⌀ | gha_language stringclasses 60 values | src_encoding stringclasses 24 values | language stringclasses 1 value | is_vendor bool 2 classes | is_generated bool 2 classes | length_bytes int64 7 9.18M | extension stringclasses 20 values | filename stringlengths 1 141 | content stringlengths 7 9.18M |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e5600e211b854f996cc6a0afbd8ff0a854713c5e | e625eedd8ae078dd88a7b1630727bb3a4e9fd76e | /code/plot_power.R | 873ebaa71e0cd17623588f697256b495e2613e24 | [
"MIT"
] | permissive | yuyouling/Sze_Obesity_mBio_2016 | d8c5b746cef6bfc0b028061a002b69b094f1f098 | 78166b568af1f79dc95cb5e2b12fb45b3cf78197 | refs/heads/master | 2021-12-22T17:29:26.568788 | 2017-10-17T12:51:37 | 2017-10-17T12:51:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,876 | r | plot_power.R | pch <- c(baxter=21, escobar=21, goodrich=21, hmp=21, ross=21,
schubert=21, turnbaugh=21, wu=21, zeevi=21, zupancic=21)
col <- c(baxter="black", escobar="red", goodrich="green", hmp="blue",
ross="orange", schubert="black", turnbaugh="red", wu="green",
zeevi="blue", zupancic="orange")
bg <- c(baxter="white", escobar="white", goodrich="white", hmp="white",
ross="white", schubert="black", turnbaugh="red", wu="green",
zeevi="blue", zupancic="orange")
names <- c(baxter="Baxter", escobar="Escobar", goodrich="Goodrich", hmp="HMP",
ross="Ross", schubert="Schubert", turnbaugh="Turnbaugh", wu="Wu",
zeevi="Zeevi", zupancic="Zupancic")
build_plots <- function(method){
pred <- read.table(file=paste0("data/process/", method, "_power.predicted"), header=T, stringsAsFactors=FALSE)
metrics <- unique(pred$metric)
for(m in metrics){
pred_subset <- pred[pred$metric == m,]
o <- order(pred_subset$effect_size, pred_subset$study)
pred_subset <- pred_subset[o,]
effects <- unique(pred_subset$effect_size)
n_effects <- length(effects)
studies <- unique(pred_subset$study)
n_studies <- length(studies)
stagger <- seq(-0.3,0.3,length.out=n_studies)
tiff_file <- paste0("results/figures/", method, "_", m, "_power.tiff")
tiff(file=tiff_file, width=6.0, height=5, units='in', res=300)
layout(matrix(c(1,1,3,2,2,3,0,0,0), nrow=3, byrow=T), width=c(1,1,0.4), height=c(1,1,0.2))
par(mar=c(0.5,5,0.5,0.5))
plot(NA, xlim=c(0.7,4.3), ylim=c(0,1), ylab="Power to Detect Effect Size\nWith Original Sampling Effort", xlab="", axes=F)
for(e in 1:n_effects){
effect <- pred_subset[pred_subset$effect_size==effects[e],]
points(x=e+stagger, y=effect$power, col=col[effect$study],
bg=bg[effect$study], lwd=2, pch=pch[effect$study])
}
axis(1, at=1:n_effects, labels=FALSE)
axis(2, las=2)
box()
mtext(side=2, at=1.0, line=3, text="A", las=2, font=2, cex=1)
abline(v=c(1.5, 2.5, 3.5))
par(mar=c(0.5,5,0.5,0.5))
plot(NA, xlim=c(0.7,4.3), ylim=c(1,max(pred_subset$balanced_n)), ylab="Number of Samples\nNeeded per Group", xlab="", axes=F, log='y')
for(e in 1:n_effects){
effect <- pred_subset[pred_subset$effect_size==effects[e],]
points(x=e+stagger, y=effect$balanced_n, col=col[effect$study],
bg=bg[effect$study], lwd=2, pch=pch[effect$study])
}
axis(1, at=1:n_effects, labels=100*effects)
axis(2, las=2)
box()
mtext(side=2, at=1.2*(10^par()$usr[4]), line=3, text="B", las=2, font=2, cex=1)
if(method == 'alpha'){
mtext(1, line=2, text = "Effect Size (%)", cex=0.7)
} else {
mtext(1, line=2, text = "Effect Size (Cohen's d)", cex=0.7)
}
abline(v=c(1.5, 2.5, 3.5))
par(mar=c(0,0,0,0))
plot(NA, xlim=c(0,1), ylim=c(0,1), axes=F, xlab="", ylab="")
legend(x=0.1, y=0.66, legend=names, pch=pch, col=col, pt.bg=bg, pt.cex=1.5,
pt.lwd=2)
dev.off()
}
}
|
2317d75cbbeb11bb33a2b56649faa0f622fafa36 | ef572bd2b0515892d1f59a073b8bf99f81d6a734 | /man/unPackDataPackName.Rd | 938b6de3bd682f28bcd189d2a7021d2a1ba3a391 | [
"CC0-1.0"
] | permissive | pepfar-datim/datapackr | 5bc604caa1ae001b6c04e1d934c0c613c59df1e6 | 9275632673e45948db6846513a53c1436cfc0e47 | refs/heads/master | 2023-08-30T23:26:48.454382 | 2023-08-11T13:01:57 | 2023-08-11T13:01:57 | 170,350,211 | 9 | 7 | CC0-1.0 | 2023-09-11T21:53:24 | 2019-02-12T16:19:47 | R | UTF-8 | R | false | true | 554 | rd | unPackDataPackName.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/unPackDataPackName.R
\name{unPackDataPackName}
\alias{unPackDataPackName}
\title{Extract the name of the datapack}
\usage{
unPackDataPackName(submission_path, tool)
}
\arguments{
\item{submission_path}{Local path to the file to import.}
\item{tool}{What type of tool is the submission file? Default is "Data Pack".
javascript:;}
}
\value{
Character vector of the name of the data pack.
}
\description{
When supplied a submission path, will return the name of the datapack.
}
|
d0e4a01708f3e4faf432ca69400a8ae86c70bf39 | 2a691a3598dd431eabe4b0f47f6b7df0bf4467ce | /man/cv.softKv.Rd | 102c741f30222152a90a773f0ef38280ea980055 | [] | no_license | mengchen18/omic3plus | 62a26efbf0ae27cadfd1746df74f2602282b0ef9 | 34766eea3c4cbf99d0e4e05f19a46fbfa0dd5234 | refs/heads/master | 2021-01-21T13:25:37.709955 | 2018-01-23T21:50:57 | 2018-01-23T21:50:57 | 102,119,183 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,297 | rd | cv.softKv.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cv.softSVD.R
\name{cv.softKv}
\alias{cv.softKv}
\title{cross-validation for softSVD}
\usage{
cv.softKv(x, nf = 1, kv.opt = c(0.3, 0.5, 0.8), wv = 1, wu = 1,
pos = FALSE, maxiter = 50, tol = sqrt(.Machine$double.eps),
verbose = FALSE, init = c("svd", "average")[2], ncores = 1, fold = 5,
nstart = 1, seed = NULL, loorss = FALSE)
}
\arguments{
\item{x}{input matrix}
\item{nf}{number of component}
\item{kv.opt}{optional value for sparsity on right singular value}
\item{wv}{weight for columns}
\item{wu}{weight for rows}
\item{pos}{whether retein non-negative results}
\item{maxiter}{maximum number of iteration}
\item{tol}{convergence tolerance}
\item{verbose}{if print the progress}
\item{init}{how to initialize the algorithm. if no sparsity, svd is fast.}
\item{ncores}{the number of cores used, passed to mclapply}
\item{fold}{fold number in cross validation}
\item{nstart}{how many time the k-fold cross validation should be done}
\item{seed}{set seed}
\item{loorss}{if the Leave-one-out procedure should be used in matrix reconstruction}
}
\description{
This function use k-fold cross-valiation method to optimize the sparsity
of right singular values
}
\seealso{
\code{\link{cv.softSVD}}
}
|
6f2d9aab37ae5b9e7ef82da2e22cb8e259cc7c17 | f4f1cd0987ad6c7a71fc400f41ca3f22e3f6ed53 | /man/pImport.Rd | 6f2bebd95caf2a018722b3ec0a81384b0fc6dd08 | [] | no_license | jordandeherrera/prioritizationMatrix | 6ee0e89e12e281c111e2794d5565e7cccd3ab0ba | 16484b6f3a6a97e90dc3e899b57e9d77004e0f52 | refs/heads/master | 2022-01-21T03:50:31.217496 | 2019-06-25T05:55:01 | 2019-06-25T05:55:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 349 | rd | pImport.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/functions.R
\docType{data}
\name{pImport}
\alias{pImport}
\title{Sample Priority Values}
\format{An object of class \code{numeric} of length 4.}
\usage{
pImport
}
\description{
An example of priority numeric values.
}
\examples{
\dontrun{
pImport
}
}
\keyword{datasets}
|
359d60803b2171a85519a43a77542bdfefb42755 | df07a558779d1f2da01e1f47bda210a233028035 | /tests/testthat/test-features-brolgar.R | 16f1d7ef965fb7316a9cdb365c93d94501ccdb04 | [
"MIT"
] | permissive | jesserp92/brolgar | 1d208ed805eff9c01b7c2291719abf8670bf453e | 93d105dce4d88ca21f3035b1688dd2ad0ef78a99 | refs/heads/master | 2023-07-03T08:18:18.389004 | 2021-08-11T01:55:56 | 2021-08-11T01:55:56 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,111 | r | test-features-brolgar.R | wages_brolgar <- wages %>%
features(ln_wages, feat_brolgar)
test_that("feat_brolgar returns the right names", {
expect_equal(names(wages_brolgar),
c("id",
"min",
"max",
"median",
"mean",
"q25",
"q75",
"range1",
"range2",
"range_diff",
"sd",
"var",
"mad",
"iqr",
"increase",
"decrease",
"unvary",
"diff_min",
"diff_q25",
"diff_median",
"diff_mean",
"diff_q75",
"diff_max",
"diff_var",
"diff_sd",
"diff_iqr"
))
})
test_that("feat_brolgar returns the right dimensions", {
expect_equal(dim(wages_brolgar),
c(888, 26))
})
library(dplyr)
test_that("feat_brolgar returns all ids", {
expect_equal(n_distinct(wages_brolgar$id), 888)
})
|
985d6a8ff93d6b850b66c8fdc9d1d188df8bc340 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/AER/examples/USInvest.Rd.R | 14f4fa10cda20eb7e1c72deb22f839af0cce27a0 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 894 | r | USInvest.Rd.R | library(AER)
### Name: USInvest
### Title: US Investment Data
### Aliases: USInvest
### Keywords: datasets
### ** Examples
data("USInvest")
## Chapter 3 in Greene (2003)
## transform (and round) data to match Table 3.1
us <- as.data.frame(USInvest)
us$invest <- round(0.1 * us$invest/us$price, digits = 3)
us$gnp <- round(0.1 * us$gnp/us$price, digits = 3)
us$inflation <- c(4.4, round(100 * diff(us$price)/us$price[-15], digits = 2))
us$trend <- 1:15
us <- us[, c(2, 6, 1, 4, 5)]
## p. 22-24
coef(lm(invest ~ trend + gnp, data = us))
coef(lm(invest ~ gnp, data = us))
## Example 3.1, Table 3.2
cor(us)[1,-1]
pcor <- solve(cor(us))
dcor <- 1/sqrt(diag(pcor))
pcor <- (-pcor * (dcor %o% dcor))[1,-1]
## Table 3.4
fm <- lm(invest ~ trend + gnp + interest + inflation, data = us)
fm1 <- lm(invest ~ 1, data = us)
anova(fm1, fm)
## More examples can be found in:
## help("Greene2003")
|
262d1b80f537d3ae7772e61d52d1aa6e2cb61ebb | 35df119b1c18fe0df6a62f03a40ae0fc20fe11fd | /6-gganimations.R | 7c8e478693230061c04b42cebdf323a823311a67 | [] | no_license | linchen-deng/IMDB-webscraping | 16774c7a436c8f8d6bae88e971c3abecaae06034 | 0a12f5437ba90c2c064615cafacc23e31c45a575 | refs/heads/master | 2020-09-20T06:50:19.190257 | 2020-02-05T22:27:50 | 2020-02-05T22:27:50 | 224,403,921 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,901 | r | 6-gganimations.R | setwd(dirname(rstudioapi::callFun("getActiveDocumentContext")$path))
source("3-get_data.R")
# This file generates animations that descibe the change of number of movies by genres throughout years from 1950 - 2020
# =============== First Animation ============================
if (!dir.exists("./plot_data")){
dir.create(file.path(".","plot_data"))
}
# if it is not saved in the folder, we will run the code again.
if (!"animation.RData" %in% dir("plot_data")){
# stack all data frames into one data frame
final_table <- data.frame()
for (i in 1:length(genre_list)){
final_table <- final_table %>%
rbind(year_data_list[[i]] %>%
num_movie_per_year_by_genre(genre=genre_list[i]))
}
theme_set(theme_classic())
Final_table1 <- final_table %>%
group_by(years) %>%
mutate(rank = min_rank(-movie_peryear)) %>%
ungroup()
moving_bar <- ggplot(Final_table1,aes(rank, group= genre,fill = as.factor(genre))) +
geom_tile(aes(y = movie_peryear/2,height = movie_peryear)) +
geom_text(aes(y = 0, label = paste(genre, " ")), vjust = 0.2, hjust = 1) +
coord_flip(clip = "off", expand = FALSE) +
scale_y_continuous(labels = scales::comma) +
scale_x_reverse() +
guides(color = FALSE, fill = FALSE) +
labs(title='{closest_state}', x = "", y = "Movies per year") +
theme(plot.title = element_text(hjust = 0, size = 22),
axis.ticks.y = element_blank(),
axis.text.y = element_blank(),
plot.margin = margin(1,1,1,4, "cm"))+
transition_states(years, transition_length = 4, state_length = 1) +
ease_aes('cubic-in-out')
moving_animation <- animate(moving_bar, fps = 20, duration = 25, width = 800, height = 600)
# ================= second animation ==========================
movie_per_year <- Final_table1 %>%
plot_ly(
x = ~years,
y = ~movie_peryear,
size = ~ movie_peryear,
color = ~genre,
frame = ~years,
text = ~genre,
hoverinfo = "text",
type = 'scatter',
mode = 'markers'
)%>%
layout(
xaxis = list(
type = "log"
)
)
# ================== thrid animation ==================================
popular <- Final_table1 %>%
filter(genre %in% c("Action","History","Romance","War"))%>%
arrange(years)
trend_of_movies <- ggplot(popular,aes(x = years, y = movie_peryear, color= genre))+
geom_line() +
geom_point() +
scale_color_viridis(discrete = TRUE) +
ggtitle("Trend of movies") +
theme_ipsum() +
ylab("Movies per year") +
transition_reveal(years)
# saving animation results for future use
save(moving_animation, movie_per_year, trend_of_movies,file="plot_data/animation.RData")
} else {
cat("Animations are saved already, reading from folder: plot_data/animation.RData \n")
}
|
87e8b52be1317645aaec4c9c0c72c4da27ef8640 | 4359d75816ac645b6b80e72b75068f1d4ffc5164 | /man/plot_lollipop.Rd | 808d773a9b48f468796604c3b4c219baab7e45f9 | [] | no_license | Changing-Cities-Research-Lab/seattleViz | 04e5e3cfad30a57b632614fed310729ebc2b0a7b | fbcb42776e3dbf74153f24d509801d7b5cfb288d | refs/heads/main | 2023-04-13T15:55:07.183707 | 2021-04-12T23:06:48 | 2021-04-12T23:06:48 | 337,885,525 | 0 | 2 | null | 2021-02-25T06:54:14 | 2021-02-10T23:48:54 | null | UTF-8 | R | false | true | 1,223 | rd | plot_lollipop.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/plot_lollipop.R
\name{plot_lollipop}
\alias{plot_lollipop}
\title{Produce lollipop plot by King County HRA.}
\usage{
plot_lollipop(
data,
var,
limits,
title = NULL,
x_title = NULL,
scale_type = "numeric",
save = F,
savename = "plot.png",
caption = paste0(frb_acs_caption_splitline, ses_caption)
)
}
\arguments{
\item{data}{Data with column for variable of interest with "facet" and "facet_col"}
\item{var}{Column name of variable of interest.}
\item{limits}{Y-axis limits.}
\item{title}{Plot title}
\item{x_title}{Title to display along x-axis}
\item{scale_type}{Y-axis scale type: "numeric" or "percent"}
\item{save}{T if user would like to return plot object and save file, F (default) to just return object.}
\item{savename}{File name of map for saving.}
\item{caption}{Figure caption}
}
\value{
Lollipop plot of variable by HRA and SES.
}
\description{
This function takes in data and produces a horizontal lollipop plot by
King County HRA The order of these categories can be adjusted by changing
the factor levels of the facet variable. Input data needs columns for
variable of interest (titled "var") and HRA.
}
|
6c8c7661faab20a0807c3a11b63465da2d9f77aa | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/NISTunits/examples/NISTdarcyTOmeterSqrd.Rd.R | 7ace7367676ebb1a26a9e523c13c15abaa93008d | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 203 | r | NISTdarcyTOmeterSqrd.Rd.R | library(NISTunits)
### Name: NISTdarcyTOmeterSqrd
### Title: Convert darcy 14 to meter squared
### Aliases: NISTdarcyTOmeterSqrd
### Keywords: programming
### ** Examples
NISTdarcyTOmeterSqrd(10)
|
98b88269665ba9ae0810958a81aa6742c5c99d5c | e428429992f8a90c114570f593b205a44cc96f9f | /cod.R | 16da288da1f77c7503709ea3d7da01d8cd693f77 | [] | no_license | Ashlo/R | b9c8aea5d28b6de1085bc866683c4010755fedf2 | 01e16f144e49f23946ad63d68c5ccb97588e3120 | refs/heads/master | 2023-01-30T01:52:03.795495 | 2020-12-15T12:30:16 | 2020-12-15T12:30:16 | 321,362,375 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,172 | r | cod.R | # load required libraries
library(caTools)
library(class)
# function definitions
clean.data = function(df) {
df = df[-1]
df$Gender = factor(df$Gender, levels = c('Male', 'Female'), labels = c(1, 2))
return(df)
}
load.data = function() {
# load the data
df = read.csv('Social_Network_Ads.csv')
# clean the data
df = clean.data(df)
return(df)
}
split.data = function(df) {
set.seed(12345)
result = sample.split(df$Purchased, SplitRatio = 0.8)
return (result)
}
classify.lm = function(df) {
model = lm(formula = Purchased ~ ., data = df)
return (model)
}
predict.values = function(model, df) {
predictions = predict(model, newdata = df)
return (predictions)
}
evaluate = function(expected, observed) {
cm = table(observed, expected)
accuracy = sum(diag(cm)) / sum(cm)
return (accuracy * 100)
}
# function calls
df = load.data()
result = split.data(df)
df.train = df[result == T, ]
df.test = df[result == F, ]
model.lm = classify.lm(df.train)
predictions.lm = predict.values(model.lm, df.test)
predictions.lm = ifelse(predictions.lm >= 0.5, 1, 0)
accuracy.lm = evaluate(predictions.lm, df.test$Purchased)
print(accuracy.lm)
|
541cbd84245155ecf70dd9f94c2c121cef88ecf7 | fadd25738df09516aedb88a53579e7e121ad51f4 | /R/inspectMeasure.R | 4b96c88bd6937dd52a5cb3f53775959a07e893c2 | [] | no_license | JangSeonghoon/maintcivil | 2630dee5df3512c5f9ea39b71169b590138e4ddc | 7a5c61eedfdd4bb3b10f506b8e11aac11b475f30 | refs/heads/master | 2021-09-06T21:48:32.855105 | 2018-02-12T05:23:12 | 2018-02-12T05:23:12 | 103,596,243 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,945 | r | inspectMeasure.R | #'
#'inspect_Measure_graph
#'
#' @param None
#' @return caution Tb(show spots to fix and working priorty)
devtools::use_package("magrittr")
devtools::use_package("stringr")
devtools::use_package("dplyr")
devtools::use_package("DMwR")
devtools::use_package("ggplot2")
devtools::use_package("rJava")
devtools::use_package("DBI")
devtools::use_package("RJDBC")
#' @importFrom compiler cmpfun
#' @importFrom magrittr %>%
#' @importFrom stringr str_extract
#' @importFrom stringr str_split
#' @importFrom stringr str_c
#' @importFrom dplyr filter
#' @importFrom dplyr mutate
#' @importFrom dplyr left_join
#' @importFrom DMwR centralImputation
#' @importFrom ggplot2 ggplot
#' @importFrom ggplot2 aes
#' @importFrom ggplot2 geom_line
#' @importFrom ggplot2 geom_abline
#' @importFrom ggplot2 theme_bw
#' @importFrom ggplot2 labs
#' @importFrom ggplot2 ggsave
#' @importFrom ggplot2 scale_x_continuous
#' @importFrom ggplot2 theme
#' @importFrom ggplot2 element_text
#' @importFrom RJDBC JDBC
#' @importFrom DBI dbConnect
#' @importFrom DBI dbSendQuery
#' @importFrom DBI dbExecute
#' @importFrom DBI dbFetch
#' @importFrom DBI dbHasCompleted
#' @importFrom DBI dbWriteTable
#' @importFrom DBI dbDisconnect
#' @export
inspect=function(order){
A=cmpfun(
function(){
rm(list=ls())
if(Sys.info()['sysname']=="Windows"){
path=
paste0(
Sys.getenv("CATALINA_HOME"),"/webapps/bigTeam/"
)
}else if(Sys.info()['sysname']=="Linux"){
path="/home/jsh/eclipse-workspace/bigTeam/src/main/webapp/"
}
load(paste0(path,"RData/inspect.RData"))
inspect_file=ls()[(length(ls())-3):length(ls())]
drv=JDBC("oracle.jdbc.driver.OracleDriver",paste0(path,"driver/ojbc6.jar"))
conn=dbConnect(drv,"jdbc:oracle:thin:@localhost:1521:xe","korail150773","0818")
rs=dbSendQuery(conn,
paste0("select V4,V8,V9 FROM TEMPORARY WHERE V1=",order))
d=dbFetch(rs)
rs=dbSendQuery(conn,
paste0("select V4 FROM TEMPORARY"))
kind=ifelse(d[1,1]=="ALIGNMENT LEFT","ALL10M",
ifelse(d[1,1]=="ALIGNMENT RIGHT","ALR10M",
ifelse(d[1,1]=="PROFILE LEFT","PRL10M",
ifelse(d[1,1]=="PRFILE RIGHT","PRR10M",
ifelse(d[1,1]=="PRFILE LEFT","PRL10M",
ifelse(d[1,1]=="TWIST 3M","TWIST3M","SUP"))
)
)
)
)
except=as.numeric(d[,3])
max=as.numeric(d[,2])
kind_no=ifelse(kind=="GAGE",3,
ifelse(kind=="PRL10M",4,
ifelse(kind=="PRR10M",5,
ifelse(kind=="ALL10M",6,
ifelse(kind=="ALR10M",7,
ifelse(kind=="SUP",8,
ifelse(kind=="TWIST3M",9,0)))))))
startD=(except-0.2)*1000
lastD=(except+0.2)*1000
vector=1:((lastD-startD)*4+1)
range=startD+0.25*(vector-1)
range=round(range,digits=2)
inspect=data.frame("LOCATION"=range)
i=1;for(i in 1:4){
if(i!=1){inspect1=inspect}
inspect=left_join(inspect,eval(parse(text=inspect_file[i]))[,c(1,kind_no)],by="LOCATION")
names(inspect)[length(inspect)]=paste0(names(inspect)[length(inspect)],"_",
str_extract(inspect_file[i],"[0-9]{6}"))
print(
paste0(
i,"/",4
)
)
}
inspect <- centralImputation(inspect)
#####################################################################
inspect_2=inspect %>% filter(LOCATION>=startD,LOCATION<=lastD)
j=1;for(j in 1:3){
k=5-j
memory=1
cor2=10000
i=1;for(i in 1:100){
if(i!=1) {cor2=ifelse(cor1<cor2,cor1,cor2)}
range_original=101:(length(inspect_2[,5])-100)
range_positive=i:(length(inspect_2[,k])-(201-i))
# cor1=round(cor(inspect[range_original,5],inspect[range_positive,k])^2,digits=4)
cor1=sum(abs(inspect_2[range_original,5]-inspect_2[range_positive,k]))
range_negative=(100+i):(length(inspect_2[,k])-(101-i))
# cor1_1=round(cor(inspect[range_original,5],inspect[range_negative,k])^2,digits=4)
cor1_1=sum(abs(inspect_2[range_original,5]-inspect_2[range_negative,k]))
cor1=ifelse(cor1<cor1_1,cor1,cor1_1)
if(i!=1&cor1<cor2){
memory=ifelse(cor1<cor1_1,i,i*(-1))
}
if(i==99){
i=abs(memory)
if(memory>0){
range_positive=i:(length(inspect_2[,k])-(201-i))
inspect_2[,k]=c(rep(0,100),inspect_2[range_positive,k],rep(0,100))
}else if(memory<0){
range_negative=(100+i):(length(inspect_2[,k])-(101-i))
inspect_2[,k]=c(rep(0,100),inspect_2[range_negative,k],rep(0,100))
}
}#if
print(paste0(
"j=",j," i=",i,"/100"," cor=",cor1," ",cor2," memory=",memory
))
}#for(i)
}#for(j)
#####################################################################
inspect_3=inspect_2 %>% filter(LOCATION>=(except-0.02)*1000,LOCATION<=(except+0.02)*1000)
a=which(inspect_3[,1]==except*1000)
b=ifelse(max<0,which(inspect_3[,5]==min(inspect_3[,5])),which(inspect_3[,5]==max(inspect_3[,5])))
c=a-b
absc=abs(c)
len=length(inspect_3[,1])
if(c>0){
inspect_3[,1]=c(inspect_3[-(1:absc),1],
rep(0,absc))
}else{
inspect_3[,1]=c(rep(0,absc),
inspect_3[-((len-absc+1):len),1])
}
inspect_3 %>%
filter(LOCATION>=(except-0.007)*1000,LOCATION<=(except+0.007)*1000) %>%
ggplot() +
aes(x=LOCATION) +
geom_line(aes(y=eval(parse(text=names(inspect[2])))),color= '#adc2eb') +
geom_line(aes(y=eval(parse(text=names(inspect[3])))),color= '#7094db') +
geom_line(aes(y=eval(parse(text=names(inspect[4])))),color= '#24478f') +
geom_line(aes(y=eval(parse(text=names(inspect[5])))),color= '#e60000') +
geom_abline(slope = 0,intercept = 0) +
scale_x_continuous(breaks=seq((except-0.007)*1000,(except+0.007)*1000,2)) +
theme_bw()+
labs(x="km",y="검측치") +
theme(axis.text.x=element_text(size=13, face="bold"),
axis.title.x=element_text(size=15, face="bold"),
axis.text.y=element_text(size=15, face="bold"),
axis.title.y=element_text(size=15, face="bold"))
ggsave(paste(path,"html/inspect.jpg"),
width=20,height=14,units=c("cm"))
print(except)
}#fun
)#cmpfun
A()
}
|
eba78d2fe3b9a177e99edddd862156b86ca95411 | 214d008a5228752ee7ababb13230bcb44bba308f | /analysis/engagement_Paper_1/colorScheme.R | 4071de4baec5e4d2e28be7cdda27d2829af595f0 | [] | no_license | Sage-Bionetworks/elevateMS_analysis | 6ef9768d32bfbfbf477949e4a08f28e3708f1515 | 2828d6407044a4dc08305300480a5c1b8c813227 | refs/heads/master | 2022-12-28T13:29:04.197254 | 2020-10-08T01:46:08 | 2020-10-08T01:46:08 | 95,137,868 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 136 | r | colorScheme.R | install_load("scales", "ggsci")
show_col(pal_npg("nrc")(10))
#pal_npg("nrc")(10)
COL_MS_PATIENT = '#DC0000FF'
COL_CONTROL = '#00A087FF'
|
e04ba7ef06e731b615ec2a6f972f46bcb263cfaa | e98b541d19a1893cf0507389888a14b4097607e4 | /R/LEN.R | c16d869fd534c88153e77d08f70e7a90172a45bb | [] | no_license | njbultman/xlsxFunctions | 6cd647aacd140a45207949fe01b596f00bd0490a | 0326efb9af26be1efc7f6e65b782f3a6e30bebcf | refs/heads/master | 2023-07-06T00:22:15.572100 | 2021-08-08T14:26:17 | 2021-08-08T14:26:17 | 294,152,526 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 563 | r | LEN.R | #' Count Characters in String or Vector
#'
#' Given a string or vector, count the number of characters for each string.
#'
#' @return Returns the number of characters in string or vector.
#' @author Nick Bultman, \email{njbultman74@@gmail.com}, February 2021
#' @seealso \code{\link{nchar}}
#' @keywords count len nchar
#' @export
#' @examples
#' LEN("hi")
#' LEN(c("hey", "hi", "hey"))
#' LEN(c(1, "hi", "hey", 2))
#'
#' @param text String or vector that you would like its characters counted.
#'
LEN <- function(text) {
y <- nchar(text)
return(y)
} |
f796a77a3805259f6e2eada03087742a73c38590 | 2c3bc3cd0efb2891081f05b98fff8532280e3789 | /other_scripts/stan_helpers/run_stan_models.R | 6dcfdb698b8cbc04e3ab68b1c91dfe7c085f6f76 | [] | no_license | joelewis101/thesis | 459600339d9d9e8be10743c30c43be8670d400ed | 0d2831b788792efc91a031edc8dab66fb7c87fdf | refs/heads/master | 2023-02-18T01:50:37.883192 | 2020-08-10T10:36:50 | 2020-08-10T10:36:50 | 171,511,551 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 888 | r | run_stan_models.R |
df_t0 <- read.csv("chapter_9/stan_df.csv")
df_t0$ab
N <- nrow(df_t0)
t <- df_t0$tstop
covariates <- as.matrix(dplyr::select(df_t0,abx_start_time, abx_end_time, hosp_start_time, hosp_end_time, prev_abx_stop_time))
ab_flags <- as.matrix(dplyr::select(df_t0,prev_abx_exposure, ab_this_step))
df_t0$p0 <- 0
df_t0$p1 <- 0
df_t0$p0[df_t0$ESBL_start == 0] <- 1
df_t0$p1[df_t0$ESBL_start == 1] <- 1
start_state = as.matrix(dplyr::select(df_t0,p0,p1))
end_state = df_t0$ESBL_stop
stan_data <- list(N = N, t = t, covariates = covariates, start_state = start_state, end_state = end_state, ab_flags = ab_flags)
stan_model <- "other_scripts/stan_helpers/stan_model_real_data_exp_fn_loglik.stan"
#saveRDS(fit,"/Users/joelewis/Documents/PhD/R/PhD/stan/stan_model_real_data_msm_replica.rda" )
fit <- stan(stan_model, data = stan_data, warmup = 500, iter = 1000, chains = 1, cores = 2, thin = 1)
|
e43f54b9b928f0b5cad1684d6b8efc51fb2bc70e | 0e26112fdff7dd8beacd4a51e38014833a5ebdf7 | /src/53_fig3_effplots.R | fb355fee08c4a691790320fdecdd25fd35007f5f | [] | no_license | jkbest2/tunabayes | 278fd782d92c60432c01fcf125e48e6ad489a3b6 | 4911af91a7b8d614392b3039dd86026c341a2982 | refs/heads/master | 2020-07-15T02:57:17.951095 | 2019-11-14T04:04:12 | 2019-11-14T04:06:57 | 205,463,789 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,604 | r | 53_fig3_effplots.R | source("src/50_makefigs.R")
## Munge data frames for efficiency plots (figure 3)
reshape_effs <- function(df_diag) {
df_diag %>%
select(model_name, adapt_delta, ess_rate, div_total, div, summary)
}
## Get standardized data frames for each dynamics specification
fullPT_effs <- reshape_effs(fullPT_diagnostics) %>%
mutate(dyn = factor("P-T, estimated m", levels = dyn_levels))
fixedPT_effs <- reshape_effs(fixedPT_diagnostics) %>%
mutate(dyn = factor("P-T, fixed m", levels = dyn_levels))
Schaefer_effs <- reshape_effs(Schaefer_diagnostics) %>%
mutate(dyn = factor("Schaefer", levels = dyn_levels))
## Put into a single data frame
all_effs <- bind_rows(fullPT_effs,
fixedPT_effs,
Schaefer_effs) %>%
mutate(rhat = map(summary, ~ .x$summary[, 10]),
min_rhat = map_dbl(rhat, min, na.rm = TRUE),
max_rhat = map_dbl(rhat, max, na.rm = TRUE))
## Set breaks manually over orders of magnitude
breaks <- 10^(-5:5)
## Plot it
effplot <- all_effs %>%
filter(adapt_delta > 0.75) %>%
ggplot(aes(x = adapt_delta, y = ess_rate,
color = model_name,
shape = div,
group = model_name
)) +
geom_line(size = 0.5, alpha = 0.25) +
geom_point(data = filter(all_effs, div),
shape = 1, size = 1.5, stroke = 0.3) +
geom_point(data = filter(all_effs, !div),
shape = 19, size = 1.5) +
scale_color_manual(values = param_colors) +
xlab("Target acceptance rate") +
guides(shape = FALSE,
color = guide_legend(title = "", nrow = 1L)) +
scale_x_continuous(breaks = ad_vals,
labels = c(ad_vals[1:4], "", "", ad_vals[7]),
minor_breaks = NULL) +
scale_y_log10(name = "Effectively independent samples per second",
breaks = breaks,
labels = breaks,
expand = expand_scale()) +
coord_cartesian(y = c(1e-2, 1e3)) +
facet_wrap(~ dyn) +
theme_jkb(base_size = 8) +
theme(plot.margin = margin(l = 1, r = 2),
plot.title = element_blank(),
axis.ticks = element_line(size = 0.2),
axis.line = element_line(size = 0.2),
strip.text.x = element_text(vjust = 0,
margin = margin(b = 3)),
legend.position = "bottom",
legend.margin = margin(t = -10))
## Save a TIFF for Word, and a PDF as a high quality vector image for publication
ggsave("figs/fig3_effplot.tiff", effplot, width = 6, height = 4)
ggsave("figs/fig3_effplot.pdf", effplot, device = cairo_pdf,
width = 6, height = 4)
|
41f21c07a3af6eabdeb90f489b9f19ecd11bdacc | a7fc036b705c291c306b1ac7a288de39fdc49f4b | /man/get8KItems.Rd | 7807359bf381302cf388bc782b9ba36374f2cfb8 | [] | no_license | cran/edgar | 6476c18e82a360a6e2e725adc8edf0db9fb5a9f7 | 3e620a52a9d9f00080abf4d24e0f654edc4b59a1 | refs/heads/master | 2023-08-20T22:42:15.300717 | 2023-08-10T13:20:02 | 2023-08-10T15:30:49 | 48,079,412 | 10 | 14 | null | null | null | null | UTF-8 | R | false | true | 1,560 | rd | get8KItems.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/get8KItems.R
\name{get8KItems}
\alias{get8KItems}
\title{Retrieves Form 8-K event information}
\usage{
get8KItems(cik.no, filing.year, useragent)
}
\arguments{
\item{cik.no}{vector of CIK(s) in integer format. Suppress leading
zeroes from CIKs.}
\item{filing.year}{vector of four digit numeric year}
\item{useragent}{Should be in the form of "Your Name Contact@domain.com"}
}
\value{
Function returns dataframe with Form 8-K events information along with CIK
number, company name, date of filing, and accession number.
}
\description{
\code{get8KItems} retrieves Form 8-K event information of firms based on CIK numbers
and filing year.
}
\details{
get8KItems function takes firm CIK(s) and filing year(s) as input parameters from
a user and provides information on the Form 8-K triggering events along with the firm
filing information. The function searches and imports existing downloaded
8-K filings in the current directory; otherwise it downloads them using
\link[edgar]{getFilings} function. It then reads the 8-K filings and parses them
to extract events information. According to SEC EDGAR's guidelines a user also needs to
declare user agent.
}
\examples{
\dontrun{
output <- get8KItems(cik.no = 38079, filing.year = 2005, useragent)
## Returns 8-K event information for CIK '38079' filed in year 2005.
output <- get8KItems(cik.no = c(1000180,38079),
filing.year = c(2005, 2006), useragent)
}
}
|
117aecf92a17be4be75f014e65b78bc6a566605b | 93a5d4b6be128af228a4846edb4ba16e5f8db363 | /County Profiles.R | 4720b994cfde7046c9b51b73963113c10393e57d | [] | no_license | jvanloon93/ACCD-Tools-Scripts | 8b3fbb6a71f81b32c9bca80d2696bdd282cc3d7c | 32a2920b4494b96a449a04d68f274f121af5e1ab | refs/heads/master | 2020-05-24T04:17:37.865099 | 2020-02-13T17:21:52 | 2020-02-13T17:21:52 | 187,089,110 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,140 | r | County Profiles.R | library(XLConnect)
library(XLConnectJars)
library(rJava)
library(tidycensus)
library(blscrapeR)
library(tidyverse)
library(usmap)
ten_county <- as.vector(fips('PA', county = c('Allegheny', 'Armstrong', 'Beaver', 'Butler', 'Fayette','Greene', 'Indiana', 'Lawrence', 'Washington', 'Westmoreland')))
names(ten_county) <- c('Allegheny', 'Armstrong', 'Beaver', 'Butler', 'Fayette','Greene', 'Indiana', 'Lawrence', 'Washington', 'Westmoreland')
Census <- County_profile_Census_Pull(ten_county, Estimates_year = 2018, ACS_year = 2017, dataset = "acs5")
BLS <- UN_LF_County_Pull(ten_county, 2018)
CEW <- PRA_10_County(2018) %>% bind_rows()
CEW <- CEW %>%
filter(own_code == 0) %>% select(area_fips, annual_avg_emplvl, annual_avg_estabs, avg_annual_pay)
row.names(CEW) <- (c('Allegheny', 'Armstrong', 'Beaver', 'Butler', 'Fayette','Greene', 'Indiana', 'Lawrence', 'Washington', 'Westmoreland')
)
book <- loadWorkbook("County_Profile.xlsx", create = TRUE)
createSheet(book, "Census")
createSheet(book, "LAU")
createSheet(book, "CEW")
writeWorksheet(book, Census, "Census")
writeWorksheet(book, BLS, "LAU")
writeWorksheet(book, CEW, "CEW")
|
f7e39574b835575a74e01fd894be939956219ebe | 0ce785a7c4c0641319d4223cd2e6a381006a04d9 | /man/makeBSseq.Rd | 9d65383b6c45a86e0061292ee48aee57c64a178b | [] | no_license | ttriche/biscuiteer | 2d5b9b788de8eb44fc34827d3b9195f6e55a9193 | 9056279c86487c69f5a0aa57ab102c776e1fa9bc | refs/heads/master | 2020-03-10T07:13:24.616315 | 2019-02-27T18:55:50 | 2019-02-27T18:55:50 | 129,257,925 | 1 | 3 | null | 2018-08-27T15:29:22 | 2018-04-12T13:44:39 | R | UTF-8 | R | false | true | 643 | rd | makeBSseq.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/makeBSseq.R
\name{makeBSseq}
\alias{makeBSseq}
\title{make an in-core BSseq object from a biscuit BED}
\usage{
makeBSseq(tbl, params, simplify = FALSE, verbose = FALSE)
}
\arguments{
\item{tbl}{a tibble (from read_tsv) or a data.table (from fread())}
\item{params}{parameters (from checkBiscuitBED)}
\item{simplify}{simplify sample names by dropping .foo.bar.hg19 & similar}
\item{verbose}{be verbose about what is happening? (FALSE)}
}
\value{
an in-core BSseq object
}
\description{
make an in-core BSseq object from a biscuit BED
}
\seealso{
makeBSseq_HDF5
}
|
95b4f45aa9d7b344759fba9d0c0e36e1865d1c21 | 2fe4c16e0377a99e198ab04d5c378ca247ae4329 | /Rscript/R/mzkit/man/PrecursorType.Match.Rd | 365a57089f43699201cc9fdf5250b5e9e6a34168 | [
"MIT"
] | permissive | xieguigang/mzkit | 1964d28b0fad5f6d44950fdccdd4a70877f75c29 | 6391304b550f7e4b8bb6097a6fb1c0d3b6785ef1 | refs/heads/master | 2023-08-31T06:51:55.354166 | 2023-08-30T08:56:32 | 2023-08-30T08:56:32 | 86,005,665 | 37 | 11 | MIT | 2023-03-14T14:18:44 | 2017-03-23T23:03:07 | Visual Basic .NET | UTF-8 | R | false | true | 1,518 | rd | PrecursorType.Match.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/match.R
\name{PrecursorType.Match}
\alias{PrecursorType.Match}
\title{Match the precursor type}
\usage{
PrecursorType.Match(
mass,
precursorMZ,
charge,
chargeMode = "+",
tolerance = tolerance.deltaMass(0.3),
debug.echo = TRUE
)
}
\arguments{
\item{mass}{Molecular mass}
\item{precursorMZ}{Precursor m/z value of the ion.}
\item{charge}{The charge value of the ion}
\item{tolerance}{Tolerance between two mass value, by default is 0.3 da,
if this parameter is a numeric value, then means tolerance by ppm value.
There are two pre-defined tolerance function:
\enumerate{
\item \code{\link{tolerance.deltaMass}}
\item \code{\link{tolerance.ppm}}
}}
}
\description{
Match the precursor type through min ppm value match.
}
\examples{
mass = 853.33089
PrecursorType.Match(853.33089, 307.432848, charge = 3) # pos "[M+3Na]3+" charge = 3, 307.432848
PrecursorType.Match(853.33089, 1745.624938, charge = 1) # pos "[2M+K]+" charge = 1, 1745.624938
PrecursorType.Match(853.33089, 854.338166, charge = 1) # pos "[M+H]+" charge = 1, 854.338166
PrecursorType.Match(853.33089, 283.436354, charge = -3, chargeMode = "-") # neg "[M-3H]3-" charge = -3, 283.436354
PrecursorType.Match(853.33089, 2560.999946, charge = -1, chargeMode = "-") # neg "[3M-H]-" charge = -1, 2560.999946
PrecursorType.Match(853.33089, 852.323614, charge = -1, chargeMode = "-") # neg "[M-H]-" charge = -1, 852.323614
}
|
df1385d444dbc6a727975aedfdf7156cedfda425 | 0af2caa1b58109fbe67ba984e07b2b709f66d598 | /R/metro.R | 795bf89197948877664b90cc62c1176bf6b92fd2 | [
"MIT"
] | permissive | visualizacionuc/tidy-data | 8921aa4673da91005e7dfd91891a44774f85c1ab | 2e2b78fb1a1cc69c70278017f2a7a96d214a1920 | refs/heads/master | 2020-03-14T18:29:58.721330 | 2018-11-15T15:04:21 | 2018-11-15T15:04:21 | 131,742,976 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 2,360 | r | metro.R | library(tidyverse)
library(lubridate)
# url <- "https://github.com/jbkunst/r-material/blob/gh-pages/201710-Visualizacion-en-el-Analisis/data/2015.04_Subidas_paradero_mediahora_web/2015.04_Subidas_paradero_mediahora_web.csv"
# lo fome -----------------------------------------------------------------
url <- "https://tinyurl.com/data-metro-scl"
path <- "data/2015.04_Subidas_paradero_mediahora_web.csv"
data <- read_delim(path, delim = ";")
data
data <- data %>%
filter(!str_detect(paraderosubida, "[0-9]+-[0-9]"))
data <- data %>%
filter(paraderosubida != "-")
data <- data %>%
filter(hour(mediahora) > 0)
# interesante -------------------------------------------------------------
ggplot(data) +
geom_point(aes(subidas_laboral_promedio, mediahora))
ggplot(data) +
geom_point(aes(x = mediahora, y = subidas_laboral_promedio))
ggplot(data) +
geom_point(aes(x = mediahora, y = subidas_laboral_promedio), alpha = 0.02, size = 2) +
geom_smooth(aes(x = mediahora, y = subidas_laboral_promedio))
ggplot(data) +
geom_point(aes(x = mediahora, y = subidas_laboral_promedio, color = paraderosubida),
alpha = 1, size = 2) +
geom_smooth(aes(x = mediahora, y = subidas_laboral_promedio)) +
theme(legend.position = "none")
# datita ------------------------------------------------------------------
datita <- data %>%
filter(paraderosubida == "ALCANTARA")
ggplot(datita) +
geom_point(aes(x = mediahora, y = subidas_laboral_promedio),
alpha = 1, size = 2)
ggplot(datita) +
geom_line(aes(x = mediahora, y = subidas_laboral_promedio))
datita <- data %>%
filter(paraderosubida == "UNIVERSIDAD DE CHILE")
ggplot(datita) +
geom_line(aes(x = mediahora, y = subidas_laboral_promedio))
# Comparacion -------------------------------------------------------------
est <- c("ALCANTARA", "UNIVERSIDAD DE CHILE", "PAJARITOS", "PLAZA MAIPU",
"LA CISTERNA L2", "BELLAS ARTES", "EL GOLF", "ESCUELA MILITAR", "NUBLE",
"PLAZA DE PUENTE ALTO")
datota <- data %>%
filter(paraderosubida %in% est)
datota %>% count(paraderosubida)
library(viridis)
ggplot(datota) +
geom_line(aes(x = mediahora, y = subidas_laboral_promedio, color = paraderosubida),
size = 3) +
scale_color_viridis(discrete = TRUE, option = "B") +
facet_wrap( ~ paraderosubida, scales = "free")
|
2ebbdb578cb28798468760ac01a5c5f55b56573f | fd0ab0f09d3c07f03e0af82bf93875524c44a0e9 | /tmp-tests/test-impute3.R | 137abfe9bf24b3f7651dab4818465312a7a1a8e9 | [] | no_license | privefl/bigsnpr | b05f9e36bcab6d8cc86fb186c37fe94a6425960a | 83db98f974b68132a9a3f3ee7ca388159a4c12b5 | refs/heads/master | 2023-08-02T13:31:18.508294 | 2023-06-30T12:15:55 | 2023-06-30T12:15:55 | 62,644,144 | 162 | 47 | null | 2022-10-12T16:46:15 | 2016-07-05T14:36:34 | R | UTF-8 | R | false | false | 10,630 | r | test-impute3.R | ################################################################################
CODE_IMPUTE_LABEL <- c(0, 0.5, 1, rep(NA, 253))
CODE_IMPUTE_PRED <- c(0, 1, 2, NA, 0, 1, 2, rep(NA, 249))
################################################################################
imputeChr <- function(Gna, ind.chr, alpha, size, p.train, seed) {
# reproducibility
if (!any(is.na(seed))) set.seed(seed[attr(ind.chr, "chr")])
# init
n <- nrow(Gna)
m.chr <- length(ind.chr)
loss_fun <- function(x, y, t1 = 0.25, t2 = 0.75, lambda = 0) {
mean(((x > t1) + (x > t2) - y)^2) +
lambda * ((t1 - 0.25)^2 + (t2 - 0.75)^2)
}
# correlation between SNPs
corr <- snp_cor(
Gna = Gna,
ind.row = 1:n,
ind.col = ind.chr,
size = size,
alpha = alpha,
fill.diag = FALSE
)
# imputation
nbNA <- integer(m.chr)
error <- rep(NA_real_, m.chr)
num_pred <- rep(NA_integer_, m.chr)
for (i in 1:m.chr) {
cat(i)
X.label <- Gna[, ind.chr[i]]
nbNA[i] <- l <- length(indNA <- which(is.na(X.label)))
if (l > 0) {
indNoNA <- setdiff(1:n, indNA)
ind.train <- sort(sample(indNoNA, p.train * length(indNoNA)))
ind.val <- setdiff(indNoNA, ind.train)
# ind.col <- ind.chr[which(corr[, i] != 0)]
ind.col <- which(corr[, i] != 0)
num_pred[i] <- length(ind.col)
if (length(ind.col) < 5)
ind.col <- setdiff(intersect(1:m.chr, -size:size + i), i)
ind.col <- ind.chr[ind.col]
# xgboost model
bst <- xgboost(data = Gna[ind.train, ind.col],
label = X.label[ind.train] / 2,
objective = "binary:logistic",
base_score = mean(X.label[ind.train]) / 2,
nrounds = 10,
params = list(max_depth = 4, gamma = 1, alpha = 1),
nthread = 1, verbose = 0, save_period = NULL)
# learn thresholds on training set
pred.train <- stats::predict(bst, Gna[ind.train, ind.col])
lambda <- 8 * loss_fun(pred.train, X.label[ind.train])
opt.min <- stats::optim(par = c(0.25, 0.75), fn = function(t) {
loss_fun(pred.train, X.label[ind.train], t[[1]], t[[2]], lambda)
})
thrs <- `if`(opt.min$convergence == 0, opt.min$par, c(0.25, 0.75))
# error of validation
pred.val <- stats::predict(bst, Gna[ind.val, ind.col, drop = FALSE])
pred.val <- rowSums(outer(pred.val, thrs, '>'))
error[i] <- mean(pred.val != X.label[ind.val])
# impute
pred <- stats::predict(bst, Gna[indNA, ind.col, drop = FALSE])
pred <- rowSums(outer(pred, thrs, '>'))
Gna[indNA, ind.chr[i]] <- as.raw(pred + 4)
}
}
data.frame(pNA = nbNA / n, pError = error, num_pred = num_pred)
}
################################################################################
#' Fast imputation
#'
#' Fast imputation algorithm based on local XGBoost models. **This algorithm
#' has not been extensively compared with other imputation methods yet.**
#'
#' @inheritParams bigsnpr-package
#' @param alpha Type-I error for testing correlations. Default is `0.02`.
#' @param size Number of neighbor SNPs to be possibly included in the model
#' imputing this particular SNP. Default is `500`.
#' @param p.train Proportion of non missing genotypes that are used for training
#' the imputation model while the rest is used to assess the accuracy of
#' this imputation model. Default is `0.8`.
#' @param seed An integer, for reproducibility. Default doesn't use seeds.
#'
#' @return A `data.frame` with
#' - the proportion of missing values by SNP,
#' - the estimated proportion of imputation errors by SNP.
#' @export
#'
#' @import Matrix xgboost
#'
snp_fastImpute <- function(Gna, infos.chr,
alpha = 0.02,
size = 500,
p.train = 0.8,
seed = NA,
ncores = 1) {
check_args()
Gna$code256 <- CODE_IMPUTE_PRED
if (!is.na(seed)) seed <- seq_len(max(infos.chr)) + seed
args <- as.list(environment())
do.call(what = snp_split, args = c(args, FUN = imputeChr, combine = 'rbind'))
}
################################################################################
#' imputeChr2 <- function(Gna, ind.chr, size, p.train, seed) {
#'
#' # reproducibility
#' if (!any(is.na(seed))) set.seed(seed[attr(ind.chr, "chr")])
#'
#' # init
#' n <- nrow(Gna)
#' m.chr <- length(ind.chr)
#'
#' # imputation
#' nbNA <- integer(m.chr)
#' error <- rep(NA_real_, m.chr)
#' for (i in 1:m.chr) {
#' cat(i)
#' X.label <- Gna[, ind.chr[i]]
#' nbNA[i] <- l <- length(indNA <- which(is.na(X.label)))
#' if (l > 0) {
#' indNoNA <- setdiff(1:n, indNA)
#' ind.train <- sort(sample(indNoNA, p.train * length(indNoNA)))
#' ind.val <- setdiff(indNoNA, ind.train)
#'
#' ind.col <- -size:size + i
#' ind.col[ind.col < 1 | ind.col > m.chr | ind.col == i] <- 0L
#' X.data <- Gna[, ind.col]
#'
#' bst <- xgboost(
#' data = X.data[ind.train, ],
#' label = X.label[ind.train],
#' objective = "multi:softmax",
#' base_score = mean(X.label[ind.train]),
#' nrounds = 10,
#' params = list(max_depth = 4, num_class = 3, gamma = 1, alpha = 1),
#' nthread = 1,
#' verbose = 0,
#' save_period = NULL
#' )
#'
#' # error of validation
#' pred.val <- stats::predict(bst, X.data[ind.val, ])
#' error[i] <- mean(pred.val != X.label[ind.val])
#' # impute
#' pred <- stats::predict(bst, X.data[indNA, ])
#' Gna[indNA, ind.chr[i]] <- as.raw(pred + 4L)
#' }
#' }
#'
#' data.frame(pNA = nbNA / n, pError = error)
#' }
#'
#' ################################################################################
#'
#' #' Fast imputation
#' #'
#' #' Fast imputation algorithm based on local XGBoost models. **This algorithm
#' #' has not been extensively compared with other imputation methods yet.**
#' #'
#' #' @inheritParams bigsnpr-package
#' #' @param size Number of neighbor SNPs to be possibly included in the model
#' #' imputing this particular SNP. Default is `100`.
#' #' @param p.train Proportion of non missing genotypes that are used for training
#' #' the imputation model while the rest is used to assess the accuracy of
#' #' this imputation model. Default is `0.8`.
#' #' @param seed An integer, for reproducibility. Default doesn't use seeds.
#' #'
#' #' @return A `data.frame` with
#' #' - the proportion of missing values by SNP,
#' #' - the estimated proportion of imputation errors by SNP.
#' #' @export
#' #'
#' #' @import xgboost
#' #'
#' snp_fastImpute2 <- function(Gna, infos.chr,
#' size = 100,
#' p.train = 0.8,
#' seed = NA,
#' ncores = 1) {
#'
#' check_args()
#'
#' Gna$code256 <- CODE_IMPUTE_PRED
#'
#' if (!is.na(seed)) seed <- seq_len(max(infos.chr)) + seed
#' args <- as.list(environment())
#'
#' do.call(what = snp_split, args = c(args, FUN = imputeChr2, combine = 'rbind'))
#' }
#'
#' ################################################################################
#'
#' imputeChr3 <- function(Gna, ind.chr, alpha, size, p.train, seed) {
#'
#' # reproducibility
#' if (!any(is.na(seed))) set.seed(seed[attr(ind.chr, "chr")])
#'
#' # init
#' X <- Gna$copy(code = CODE_IMPUTE_PRED)
#' n <- nrow(X)
#' m.chr <- length(ind.chr)
#'
#' # correlation between SNPs
#' corr <- snp_cor(
#' Gna = Gna,
#' ind.row = 1:n,
#' ind.col = ind.chr,
#' size = size,
#' alpha = alpha,
#' fill.diag = FALSE
#' )
#' print(corr)
#'
#' # imputation
#' nbNA <- integer(m.chr)
#' error <- rep(NA_real_, m.chr)
#' for (i in 1:m.chr) {
#' cat(i)
#' X.label <- Gna[, ind.chr[i]]
#' nbNA[i] <- l <- length(indNA <- which(is.na(X.label)))
#' if (l > 0) {
#' indNoNA <- setdiff(1:n, indNA)
#' ind.train <- sort(sample(indNoNA, p.train * length(indNoNA)))
#' ind.val <- setdiff(indNoNA, ind.train)
#'
#' ind.col <- which(corr[, i] != 0)
#' if (length(ind.col) < 5)
#' ind.col <- setdiff(intersect(1:m.chr, -size:size + i), i)
#' X.data <- X[, ind.chr[ind.col]]
#'
#' bst <- xgboost(
#' data = X.data[ind.train, ],
#' label = X.label[ind.train],
#' objective = "multi:softmax",
#' base_score = mean(X.label[ind.train]),
#' nrounds = 10,
#' params = list(max_depth = 4, num_class = 3, gamma = 1, alpha = 1),
#' nthread = 1,
#' verbose = 0,
#' save_period = NULL
#' )
#'
#' # error of validation
#' pred.val <- stats::predict(bst, X.data[ind.val, ])
#' error[i] <- mean(pred.val != X.label[ind.val])
#' # impute
#' pred <- stats::predict(bst, X.data[indNA, ])
#' Gna[indNA, ind.chr[i]] <- as.raw(pred + 4L)
#' }
#' }
#'
#' data.frame(pNA = nbNA / n, pError = error)
#' }
#'
#' ################################################################################
#'
#' #' Fast imputation
#' #'
#' #' Fast imputation algorithm based on local XGBoost models. **This algorithm
#' #' has not been extensively compared with other imputation methods yet.**
#' #'
#' #' @inheritParams bigsnpr-package
#' #' @param alpha Type-I error for testing correlations. Default is `0.02`.
#' #' @param size Number of neighbor SNPs to be possibly included in the model
#' #' imputing this particular SNP. Default is `500`.
#' #' @param p.train Proportion of non missing genotypes that are used for training
#' #' the imputation model while the rest is used to assess the accuracy of
#' #' this imputation model. Default is `0.8`.
#' #' @param seed An integer, for reproducibility. Default doesn't use seeds.
#' #'
#' #' @return A `data.frame` with
#' #' - the proportion of missing values by SNP,
#' #' - the estimated proportion of imputation errors by SNP.
#' #' @export
#' #'
#' #' @import Matrix xgboost
#' #'
#' snp_fastImpute3 <- function(Gna, infos.chr,
#' alpha = 0.02,
#' size = 500,
#' p.train = 0.8,
#' seed = NA,
#' ncores = 1) {
#'
#' check_args()
#'
#' if (!is.na(seed)) seed <- seq_len(max(infos.chr)) + seed
#' args <- as.list(environment())
#'
#' do.call(what = snp_split, args = c(args, FUN = imputeChr3, combine = 'rbind'))
#' }
#'
#' ################################################################################
|
1b3c8bacb02aa6f8c20672fe5aa7a5cc769a52b9 | 76d7a0bb142867e3ff689ef5bc4556a57ae2e6d4 | /plot4.R | d5e775a5249b33efc3d1bb3686b1f5bec3a4e316 | [] | no_license | patrickbigdigit/Exploratory_Data_Analysis | c2e3f19645aef94e4811bb0b73d1279ba8bcb3e9 | e682f8b74c43ff172dfdb97b11c5bee69d90452f | refs/heads/master | 2021-01-10T05:27:03.573614 | 2016-01-09T23:44:19 | 2016-01-09T23:44:19 | 49,346,129 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,725 | r | plot4.R | ## R script for plot 3
# Initial file assignment and data retrival.
file <- "household_power_consumption.txt"
data <- read.table(file, header=TRUE, sep=";", stringsAsFactors=FALSE, dec=".")
# subsetting to limit for two days of data
s_data <- subset(data,data$Date %in% c("1/2/2007","2/2/2007") )
# converting to all required variables to numeric class.
s_data$Global_active_power <- as.numeric(s_data$Global_active_power)
s_data$Sub_metering_1 <- as.numeric(s_data$Sub_metering_1)
s_data$Sub_metering_2 <- as.numeric(s_data$Sub_metering_2)
s_data$Sub_metering_3 <- as.numeric(s_data$Sub_metering_3)
s_data$globalReactivePower <- as.numeric(s_data$Global_reactive_power)
s_data$voltage <- as.numeric(s_data$Voltage)
# combining date and time values and coverting to datetime format to plot the chart against this variable.
var_dt_time <- strptime(paste(s_data$Date, s_data$Time, sep=" "), "%d/%m/%Y %H:%M:%S")
# Opening a chart for plotting and updating mfrow for 2*2 array
png("plot4.png", width=480, height=480)
par(mfrow = c(2, 2))
# Ploting all four graphs as per the requirement
plot(var_dt_time, s_data$Global_active_power, type="l", xlab="", ylab="Global Active Power")
plot(var_dt_time, s_data$voltage, type="l", xlab="datetime", ylab="Voltage")
plot(var_dt_time, s_data$Sub_metering_1, type="l", ylab="Energy Submetering", xlab="")
lines(var_dt_time, s_data$Sub_metering_2, type="l", col="red")
lines(var_dt_time, s_data$Sub_metering_3, type="l", col="blue")
legend("topright", c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lty=1, col=c("black", "red", "blue"), bty="n")
plot(var_dt_time, s_data$globalReactivePower, type="l", xlab="datetime", ylab="Global_reactive_power")
dev.off() |
204fd2ee434b9fea793699e85952214be7dff16f | e3453f8c610ceb4b5022be45b1dc145d690c1358 | /man/chatty.Rd | 5ed66d027275862f6b3d8ab37334e03f3411e1c8 | [] | no_license | kongra/koR | 2592b69b7525099d2ceeef2686fb4e0f0005f933 | 48f06fd6832ac4ccaa7c66438f9a5668072e50a4 | refs/heads/master | 2018-10-08T03:05:41.749217 | 2018-08-30T08:18:29 | 2018-08-30T08:18:29 | 38,744,631 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 441 | rd | chatty.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/core.R
\name{chatty}
\alias{chatty}
\title{Debug wrapper generator.
Returns a diagnostic wrapper around f. Thanks H. Wickham.}
\usage{
chatty(f, prefix = "Processing ")
}
\arguments{
\item{f}{Function to wrap}
\item{prefix}{A prefix for the diagnostic message}
}
\description{
Debug wrapper generator.
Returns a diagnostic wrapper around f. Thanks H. Wickham.
}
|
541e9d24f1a97fb2eff25f8e0223c494f91f9db2 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/Ecfun/examples/camelParse.Rd.R | 206db0aa156311f2597a3857264c9b96c2c9f3e0 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 579 | r | camelParse.Rd.R | library(Ecfun)
### Name: camelParse
### Title: Split a character string where a capital letter follows a
### lowercase letter
### Aliases: camelParse
### Keywords: manip
### ** Examples
tst <- c('Smith, JohnJohn Smith',
'EducationNational DefenseOther Committee',
'McCain, JohnJohn McCain')
tst. <- camelParse(tst)
## Don't show:
stopifnot(
## End(Don't show)
all.equal(tst., list(c('Smith, John', 'John Smith'),
c('Education', 'National Defense', 'Other Committee'),
c('McCain, John', 'John McCain') ) )
## Don't show:
)
## End(Don't show)
|
82519d99fed039e28a58dc7715eabb4fe2ab18cb | 377b3d754053ae109af8cfb5eb282860a7e134f6 | /w6/climr/R/fit.climr.R | 04c0e4647d52569c631b6d6f6f561995758a7cbc | [] | no_license | iant04128591/STAT40830 | df97f205ab685805535db08f774673a88c9b24eb | ad188ea27f7e54e3e92bd061aa0fdad1a855fdf5 | refs/heads/master | 2020-08-01T08:13:05.023278 | 2016-12-08T23:45:21 | 2016-12-08T23:45:21 | 73,582,215 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,884 | r | fit.climr.R | #' Fit basic statistical moels to climate data
#'
#' @param obj An object of class \code{climr} from \code{\link{load_clim}}
#' @param data_type The type of data to be analysed , either yearly, monthly or quarterly
#' @param fit_type The type of model required , eitheither linear regression (\code{lm}), loess or smoothing spline (\code{smooth.spline})
#'
#' @return Return a list of class \code{climr_fit} which includes the model details as well as the data set and the fit type used
#' @seealso \code{\link{load_clim}}, \code{\link{plot.climr_fit}}
#' @export
#' @importFrom magrittr "extract2" "%$%"
#' @importFrom stats "lm" "loess" "smooth.spline" "na.omit" "predict"
#'
#' @examples
#' ans1 = load_clim('SH')
#' ans2 = fit(ans1)
#' ans3 = fit(ans1, data_type='monthly', fit_type = 'smooth.spline')
#' ans4 = fit(ans1, data_type='quarterly', fit_type = 'loess')
fit = function(obj,
data_type = c('yearly', 'quarterly', 'monthly'),
fit_type = c('lm', 'loess', 'smooth.spline')){
UseMethod('fit')
}
#' @export
fit.climr = function(obj,
data_type = c('yearly', 'quarterly', 'monthly'),
fit_type = c('lm', 'loess', 'smooth.spline')){
#fund out which data set
fit_dat = match.arg(data_type)
#find fittig method
fit_arg = match.arg(fit_type)
#find out which bit of data
dat_choose = switch(fit_dat, yearly = 1, quarterly = 2, monthly = 3)
#Get the dataset to use
curr_dat = obj %>% extract2(dat_choose)
#fit some models
if(fit_arg == 'lm'){
mod = curr_dat %$% lm(temp ~ x)
} else if(fit_arg == 'loess'){
mod = curr_dat %$% loess(temp ~ x)
} else if(fit_arg == 'smooth.spline'){
mod = curr_dat %$% smooth.spline(x, temp)
}
print(mod)
out = list(model = mod, data = curr_dat, dat_type = fit_dat, fit_type = fit_arg)
class(out) = 'climr_fit'
invisible(out)
}
|
1cdb144129b7dde8174e7034e449643f343aaaf9 | 919de4d940af1e2280cd07665ef8a91789767014 | /man/split_vec.Rd | f03f30d89603bff22f141fa1527a66758e8f6523 | [
"MIT"
] | permissive | dannydaniel/eegUtils | ded837c9fd84b7ce7f146a573bf54f8b6ff8c638 | 8af2ff8a9e94b550b54d7222304f1799b61d0162 | refs/heads/master | 2020-07-22T21:40:20.330013 | 2019-04-27T11:13:27 | 2019-04-27T11:13:27 | 207,336,581 | 1 | 0 | MIT | 2019-09-09T15:03:07 | 2019-09-09T15:03:07 | null | UTF-8 | R | false | true | 520 | rd | split_vec.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/frequency_analysis.R
\name{split_vec}
\alias{split_vec}
\title{Segment data}
\usage{
split_vec(vec, seg_length, overlap)
}
\arguments{
\item{vec}{Data vector to be split up into segments.}
\item{seg_length}{Length of segments to be FFT'd (in samples).}
\item{overlap}{Overlap between segments (in samples).}
}
\description{
Split data into segments for Welch PSD.
}
\author{
Matt Craddock \email{matt@mattcraddock.com}
}
\keyword{internal}
|
62cce5e6263552858d9f3c84c2b6c13e3be67e13 | ad9bb63b558e5b60048380eeedfff5bd10d6ac91 | /utility.R | 5b57e8e80ba4655caa9b8a383405f5d2b21c826d | [] | no_license | mccomark21/NBA_Draft_2020 | 695c3e69eabcfc97a3494748234620eaff2786ff | 92170d95b2e20239d023b8a552e40f1065d93303 | refs/heads/main | 2023-01-08T08:46:38.792054 | 2020-11-10T04:17:15 | 2020-11-10T04:17:15 | 308,236,471 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 165 | r | utility.R | # Loading Screen for charts----
loadingscreen <- function(plot){
shinycssloaders::withSpinner(plot, type = 8, color = "#0072B2", size = 1.5, hide.ui = FALSE)
} |
7d9e5156476dbe989d7266c7d385102f6e886027 | 9e8fcdf0d7f018f540dde7a65d9b3f2d65c9cbc6 | /man/feature.missingness.Rd | 88887052c77cee0e7c8c1724c1a40c2772ee7582 | [
"MIT"
] | permissive | yiheng-aug30/metaboprep | 6c02797bd3949c593d8ed70db6e7dcf67cea2eeb | 500b051ad8174a54f11b52caa41b9c57f14dcdc4 | refs/heads/master | 2023-08-29T08:14:57.380002 | 2021-11-15T13:27:40 | 2021-11-15T13:27:40 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,148 | rd | feature.missingness.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/feature.missingness.R
\name{feature.missingness}
\alias{feature.missingness}
\title{estimate feature missingness}
\usage{
feature.missingness(
wdata,
samplemissingness = NULL,
extreme_sample_mis_threshold = 0.5
)
}
\arguments{
\item{wdata}{the metabolite data matrix. samples in row, metabolites in columns}
\item{samplemissingness}{a vector of sample missingness for each sample}
\item{extreme_sample_mis_threshold}{a numeric value above which individuals with sample missingness should be excluded from the feature missingess estimator. Default is 0.5.}
}
\value{
a data frame of percent missingness for each feature
}
\description{
This function estimates feature missingess, with a step to exclude poor samples identified as those with a sample missingness greater than 50%.
}
\examples{
ex_data = sapply(1:5, function(x){rnorm(10, 45, 2)})
ex_data[ sample(1:length(ex_data), 15) ] = NA
smis = apply(ex_data, 1, function(x){ sum(is.na(x))/length(x) })
feature.missingness(wdata = ex_data, samplemissingness = smis)
}
\keyword{feature}
\keyword{missingness}
|
7e9503213f35c99fb4ffdd25cc8fee2587a19f78 | 9229b21b2365424fe0ff6617cf282b5b03439ef6 | /r-demo/helloworld.r | 6a287e46d6262a069a2c7ec185a1576a698e828d | [] | no_license | Huize501/GoogleCloud | 169b83b0d0324b04dc02ee73b2cd0358f79ed9b1 | 191c80ed21117a0a0d936916e7d9e618f27377ba | refs/heads/master | 2023-02-22T22:01:28.965706 | 2022-09-16T05:57:51 | 2022-09-16T05:57:51 | 116,589,846 | 4 | 2 | null | 2023-02-15T22:49:54 | 2018-01-07T18:39:50 | Jupyter Notebook | UTF-8 | R | false | false | 495 | r | helloworld.r | print("You can write any R code and package it into a container")
print("Including your favorite packages")
print("It will then run on the cloud")
print("You can submit as many jobs as you like")
print("And tear down the infrastructure after its finished")
print("So that you can safe some money :)")
print(sum(5,5))
print(sum(5,4))
print(sum(5,3))
print(sum(5,2))
print(sum(5,1))
print(sum(3,2))
print(sum(3,1))
print(sum(2,1))
print(sum(1,1))
print(sum(1,0))
print(sum(0,0))
print("Bye!!!!") |
6685234d936cab551283ea7d12e5a670777591cf | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/sigora/examples/reaM.Rd.R | 28ee55278c0e8e7f3fbe9d81b45f5746f6e81ead | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 190 | r | reaM.Rd.R | library(sigora)
### Name: reaM
### Title: Pathway GPS data, extracted from Reactome repository (Mouse).
### Aliases: reaM
### Keywords: datasets
### ** Examples
data(reaM)
str(reaM)
|
d8ae0d55fde5f921ae412109224fc95f35e44d29 | c9dcad8a10c0e8f7571dda9460fa535e105919b9 | /Experiments/Tibshirani2013/SGL/R/predict.SGL.R | c02faa4f93f1d92db60eeedbb125a14119166889 | [] | no_license | adityagc/MS-Research | f0fd57420768ac270dd6007fa3ed015b5878e17c | 5f148103a6205092fb3ae114c8613f7b6e849a84 | refs/heads/master | 2020-03-18T16:34:47.512135 | 2018-07-02T19:24:50 | 2018-07-02T19:24:50 | 134,974,224 | 3 | 0 | null | null | null | null | UTF-8 | R | false | false | 493 | r | predict.SGL.R | predict.SGL = function(x,newX,lam){
cvobj = x
X <- newX
if(!is.null(x$X.transform)){
X <- t(t(newX) - x$X.transform$X.means)
X <- t(t(X) / x$X.transform$X.scale)
}
intercept <- 0
if(!is.null(x$intercept)){
intercept <- x$intercept[lam]
}
eta <- X %*% x$beta[,lam] + intercept
if(x$type == "linear"){
y.pred <- eta
}
if(x$type == "logit"){
y.pred = exp(eta)/(1+exp(eta))
}
if(x$type == "cox"){
y.pred = exp(eta)
}
return(y.pred)
}
|
e6caf8b83ae33dd9657e3a27391da1d4ffda5740 | da584e949c39b879460632fd42c8a3916248bc91 | /Clean_code/Clean_Code7.R | 23440ca558ac9348cde751bd8437feb8981ac9a3 | [] | no_license | JordanEvan77/Population-Spread | d57c16ba99acee13e6079b50cb12ce0d901796eb | 26781e09434a411bccd84b12100cb5d7781854f5 | refs/heads/main | 2023-04-27T09:42:47.310415 | 2021-05-14T19:13:16 | 2021-05-14T19:13:16 | 367,454,708 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,511 | r | Clean_Code7.R | library(tidyverse)
library(haven)
#loading libraries for data manipulation
#Question 1 and 2 Dimensions of data
nfhs <- read_dta('Raw_data/IAHR52FL.dta') #This is the full initial raw data
#Question 3, variables between "hhid" and "shstruc".
nfhs_reduced <- select(nfhs, hhid:shstruc) %>%
rename(survey_month = hv006) %>%
rename(loc_type = hv026) #much easier to view table, less columns, only necessary data.
nfhs_urban <- select(nfhs, hhid:shstruc) %>%
rename(survey_month = hv006) %>%
rename(home_loc = hv025) %>%
rename(loc_type = hv026) %>%
filter(home_loc == 1)#smaller, urban only, rural dropped
#Question 4, Plot the distribution of the number of listed household members
#for the entire sample.
ggplot(data = nfhs_reduced,
mapping = aes(x = hv009), binwidth = 1) +
geom_histogram() +
xlab("Number of household members") #Works! simple bar plot, skewed to right showing full data distribution.
#QUestion 5, Create a boxplot plot using the data frame for urban area.
#FACTOR:
nfhs_1 <-as.factor(nfhs_urban$loc_type) #factor type for sorting urban only
#PLOT:
urban_plot <- ggplot(nfhs_urban) + aes(x = nfhs_1, y = hv009)
urb_labels <- c("Large City", "Small city", "Town", "Country Side", "Missing")
urban_plot + geom_boxplot() + xlab("Home Location") +
ylab("Number Of Household Members") +
scale_x_discrete(labels = urb_labels)#very nice box plot for showing the house member counts per location type.
#Question 6,Use "group_by" and "summarise" to find the means and medians of the number of household members
#by type of urban area.
nfhs_urban %>%
group_by(loc_type) %>%
summarise_at(vars(hv009), list(name=mean))#list of means should show below
nfhs_urban %>%
group_by(loc_type) %>%
summarise_at(vars(hv009), list(name=median))#list of medians should show as below
#LIST OF MEANS PEOPLE PER HOUSE
#[capital, large city] 4.65
#[small city] 4.88
#[town] 4.69
#LIST OF MEDIAN PEOPLE PER HOUSE
#[capital, large city] 4
#[small city] 4
#[town] 4
#Question 7, What does the relationship between the mean and median tell you about
#the distribution of household size?
#The distribution doesn't vary much. This might indicate that large intergenerational homes
#aren't the norm for the urban families, because the means are all within .23 "people" of eachother
#and the median is the same for each location type. Additional comments on quiz. |
f7e5b9a64adef987d4f06f4f5c1a1059a334e447 | 1486b7de41ac7a9882ecd92722d886802d49f9aa | /create_data/oceans/create_oceans.R | 6c97a3f5ee78765f71af6e491f77c1442ab98304 | [] | no_license | richardsc/ocedata | 4f155f7ad72ac90a42c6003fb316c5e41b7e5a8b | ad804a151f3d50ea41df6f7cfd401e0446ddac6a | refs/heads/master | 2021-01-01T17:58:48.937286 | 2017-07-24T16:20:25 | 2017-07-24T16:20:25 | 98,209,705 | 0 | 0 | null | 2017-07-24T16:08:07 | 2017-07-24T16:08:07 | null | UTF-8 | R | false | false | 221 | r | create_oceans.R | # http://ngdc.noaa.gov/mgg/global/etopo1_ocean_volumes.html
oceans <- read.table("oceans.dat")
# make area m^2
oceans$Area <- oceans$Area * 1e6
save(oceans, file="oceans.rda")
tools::resaveRdaFiles(".", compress="auto")
|
e2e623032d56ca56c336418e7c33037cc7911449 | caca7203b28507ec914b0be0042f96eb66db71ab | /code/study_var_significativity.R | 2d37cb5f02a4abfa06947ee4105104f2f0a7e405 | [] | no_license | boulbi777/returns-to-schooling-on-earning | e74e86ef94e8e7439a329f5239648746c8b67c6e | b9a64399e0cdf6ccc743276e30c99a59d81d58d0 | refs/heads/master | 2022-12-19T21:53:07.746490 | 2020-09-04T14:55:51 | 2020-09-04T14:55:51 | 293,286,960 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 494 | r | study_var_significativity.R | #significativity test : 15-20%
print("age :")
var_significativity(data$age) #ok
print("age2 :")
var_significativity(data$age2) #ok
print("female :")
var_significativity(data$female) #ok
print("for. :")
var_significativity(data$for.) #ok
print("reg1 :")
var_significativity(data$reg1, FALSE) #ok
print("reg2 :")
var_significativity(data$reg2, FALSE) #not ok
print("reg3 :")
var_significativity(data$reg3, FALSE) #not really ok
print("reg4 :")
var_significativity(data$reg4, FALSE) #ok |
47b5e7d25cd2267cfafb422f0a1acb832f5518b6 | 360bf7d821f1338c3d1e489c99608bfaaff97d26 | /man/aggregate_points_space_time.Rd | 4acbd01174ea749d602e0fbe0fed3fae0cde9816 | [
"MIT"
] | permissive | disarm-platform/disarmr | 0cd17c7e0270b2baa6036cf126d418fc9a39d9d2 | a43b01d77d3552f8353caf9ae542a6798a25a384 | refs/heads/master | 2020-08-26T22:30:07.855368 | 2020-02-27T15:56:09 | 2020-02-27T15:56:09 | 217,168,297 | 1 | 1 | MIT | 2020-02-27T16:01:18 | 2019-10-23T22:59:12 | R | UTF-8 | R | false | true | 377 | rd | aggregate_points_space_time.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/space_time_ppmify_helpers.R
\name{aggregate_points_space_time}
\alias{aggregate_points_space_time}
\title{The aggregate_points_space_time function}
\usage{
aggregate_points_space_time(points, ppmx, periods, date_start_end,
reference_raster)
}
\description{
Helper function for space_time_ppmify
}
|
613d525934deba71f76dacd2552bc43d1ac17abc | fcaf0c32ae844cb182dcb32f61649e2e81493809 | /man/transitive.closure.Rd | c8f12703bfc09041f04f7efe679bbf588d320b08 | [] | no_license | cran/clusthaplo | 2d855b36fa4961bd70ec8dc20b7f847f1e7aa8e5 | 7bb3a732d17178223f0ae6ce0bcf359af15c9765 | refs/heads/master | 2016-09-06T08:43:09.813511 | 2013-07-05T00:00:00 | 2013-07-05T00:00:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,310 | rd | transitive.closure.Rd | \name{transitive.closure}
\alias{transitive.closure}
\title{Performs transitive closure.}
\usage{
transitive.closure(pair.simi, Npar=ncol(.gen))
}
\note{
This function can only be run from inside a clusthaplo context as returned by clusthaplo(...).
}
\arguments{
\item{pair.simi}{The result of a call to \code{\link{pairwise.similarities}}.}
\item{Npar}{The number of haplotypes for which the pairwise similarities were computed.}
}
\description{
Performs transitive closure on the whole set of pairwise similarities for one chromosome.
Transitive closure means that, at a given locus, if haplotypes A and B match, and A and C match, then
B and C are assumed to match also.
}
\value{
A matrix with one column per parent and one row per scanned locus, giving, at each locus and
for each parent, the smallest parent index belonging in their clique. For instance, if haplotypes 1 3 4 look
alike amidst 5 haplotypes, and 2 and 5 are singletons, the row will be \code{c(1, 2, 1, 1, 5)}.
}
\seealso{
\code{\link{pairwise.similarities}}
}
\examples{
data(parents.map, parents.gen)
clu <- clusthaplo(parents.map, NULL, parents.gen)
clu$select.chromosome('chr1')
clu$train()
tc <- clu$transitive.closure(clu$pairwise.similarities())
print(head(tc))
}
|
2de68f7654b45b51edc77aa4c1a73a504a9c5222 | 1d7ed56a3d9b6616509e901dbdac6728e6102bf2 | /R/MergeC.R | 1d2957285ceb173adcbc3e42f5a8414493989376 | [] | no_license | EricMarcon/entropart | 082f32c6c35b964d39c46fbd6b3c49f1b207d1fe | a24fd0cf8cd19896bf2db21e5a619dcdf22c1ae4 | refs/heads/master | 2023-07-20T14:16:47.132287 | 2023-07-14T09:01:21 | 2023-07-14T09:01:21 | 95,141,768 | 6 | 5 | null | 2020-11-09T19:23:35 | 2017-06-22T17:49:13 | R | UTF-8 | R | false | false | 1,002 | r | MergeC.R | MergeC <-
function(MClist, Weights = rep(1, length(MClist)), CheckArguments = TRUE)
{
if (CheckArguments)
CheckentropartArguments()
# Metacommunities must have names
if (is.null(names(MClist)))
names(MClist) <- paste("MC", seq_along(MClist), sep="")
CommunityNames <- function(MClist) {
MCnames <- rep(names(MClist), unlist(lapply(MClist, function(x) length(x$Ni))))
paste(MCnames, unlist(lapply(MClist, function(x) names(x$Ni))), sep=".")
}
# Merge metacommunities Nsi
Reduce(function(...) mergeandlabel(...), lapply(MClist, function(x) x$Nsi)) -> Gabundances
NumCommunities <- unlist(lapply(MClist, function(x) length(x$Ni)))
MCnames <- rep(names(MClist), NumCommunities)
names(Gabundances) <- paste(MCnames, unlist(lapply(MClist, function(x) names(x$Ni))), sep=".")
MCWeights <- unlist(lapply(MClist, function(x) x$Wi))
# Create the global MC
return(MetaCommunity(Gabundances, MCWeights*rep(Weights, NumCommunities)))
}
|
a98aa35b5f01ab4a41d5acf63e532a39d7041b97 | 323e8daf6ec798daf4527280e65473f10ae6ba16 | /transform.R | 1eb396cc0976b3ba709f103f924f4e163c98ffb9 | [] | no_license | stjordanis/util-1 | 41812e236a93c2ddb1132228578bdbb41ac8d7bb | 413433da0dbacca86bb0ad80b84c836a3aadf4bc | refs/heads/master | 2021-12-02T20:10:14.899922 | 2012-11-05T20:17:15 | 2012-11-05T20:17:15 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 696 | r | transform.R |
##' Turn a factor in multiple dichotom factors
##'
##' This function takes a vector with an encoded factor and splits it up in a data.frame
##' with one column for each factor level. The value is one if the factor has the certain level, else
##' it is 0.
##' @title Dichotomize a factor
##' @param fac A factor vector
##' @return A data.frame with columns for each factor level
##' @author chris
dichotomize <- function (fac, name){
fac_unique <- unique(fac)
df <- as.data.frame(sapply(fac_unique,
function(x) {
ifelse(fac == x, 1, 0)
}))
colnames(df) <- paste(name, fac_unique, sep = "_")
df
}
|
2d53660fd26d77d39fb6d033f91a6b85dfe14d4e | 7688e8e60019de7fca6fed79465904d765dcccd1 | /Assignment_KNN/KNN-zoo.R | 02a1368e08227fbbc88584e3162f865cfbff07e3 | [] | no_license | AmitkumarShende/Assignments | 35e12bc64bd06781695c3f8e6e1f58eda522d426 | 5402d022cda0343831f5f132d29f04c2f3bbe9ea | refs/heads/master | 2021-02-09T22:53:37.847942 | 2020-03-02T09:37:03 | 2020-03-02T09:37:03 | 244,331,870 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,284 | r | KNN-zoo.R | #**************install the packages if unavailable**************
#install.packages('caret')
#install.packages('dplyr')
library(caret)
# Read the dataset
zoo <- read.csv(file.choose())
#EDA
table(zoo$type)
summary(zoo)
str(zoo)
# excluding 1st column having categorical values
zoo1 <- zoo[,2:18]
str(zoo1)
#converting int variable type to factor type
library(dplyr)
con.names = zoo1 %>% select_if(is.numeric) %>% colnames()
#con.names
zoo1[,con.names] = data.frame(apply(zoo1[con.names], 2, as.factor))
str(zoo1)
# Data partition
set.seed(123)
ind <- sample(2,nrow(zoo1), replace = T, prob = c(0.7,0.3))
train <- zoo1[ind==1,]
test <- zoo1[ind==2,]
#Creating performance Model
# KNN Model
trcontrol <- trainControl(method = "repeatedcv", number = 10,repeats = 3
# classprobs are needed when u want to select ROC for optimal K Value
)
set.seed(123)
fit <- train(type ~., data = train, method = 'knn', tuneLength = 20,
trControl = trcontrol, preProc = c("center","scale"))
# Default metric is accuracy but if u want to use ROC, then mention the same
# Model Performance :
fit # the optimum value for k should be 5
plot(fit)
varImp(fit)
pred <- predict(fit, newdata = test )
confusionMatrix(pred, test$type) #Accuracy of modelis 86.21
|
87ecbdeb9575196e886b8b28c792df4c59927753 | 5c0b9f40f6b93356c701941609850d90e2a56d15 | /tests/testthat/test-make_wallpapr.R | 8be7741330079d0c8aff366f1d7cf49f055147f9 | [] | no_license | JBGruber/wallpapr | 27710f46d3f3d76fd007c6f753effebe007d59e2 | 8ca68c982dbeccc40bff0f0ba5b90cbf0161f2a5 | refs/heads/master | 2022-10-12T18:31:09.090297 | 2022-10-04T09:43:29 | 2022-10-04T09:43:29 | 181,216,998 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 425 | r | test-make_wallpapr.R | test_that("make_wallpapr", {
expect_equal({
out <- make_wallpapr(
system.file("extdata", "mull.jpg", package = "wallpapr"),
month = as.Date("2021-03-01"),
return_plot = TRUE
)
df <- out$data
c(nrow(df), ncol(df), sum(df$week), sum(df$size),
class(out), attr(out, "dims"))
}, list(39L, 6L, 387, 40, "wallpapr", "gg", "ggplot", dpi = 900,
width = 1613L, height = 907L))
})
|
6aeda57a86214362ef98d77066db8d88c5acccb2 | bea5690609b4cc2aded036e104685af3d996abd3 | /createsimulationsobject.R | 798773b958d9bbb20bc59afb026a8d1b99631f1e | [] | no_license | dluzzi/spup-app | 6cc7f336d8690c26c162932777d516397e2b5666 | 7355e147eea0ab6fd1a1df763118847a3b354ba9 | refs/heads/master | 2016-08-11T10:51:16.429093 | 2016-03-15T13:56:16 | 2016-03-15T13:56:16 | 47,685,043 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,450 | r | createsimulationsobject.R | ## Used for testing only
## Creates an obejct of class "simulation"
library(raster)
# Create Class ------------------------------------------------------------
# Create "Simulations" class
setClass("Simulations", slots =
c(Realisations="RasterBrick",
Mean = "RasterLayer",
Standard.Deviation = "RasterLayer",
Most.Likely.Class = "RasterLayer",
Class.Probabilities = "RasterBrick",
Quantiles = "RasterBrick")
)
# Load data ---------------------------------------------------------------
# Initialise rasterbrick
zlatibor.brick <- brick()
# Load data to create object
for (i in 1:100){
#input <- Insert directory
#input <- paste("D:/DamianoLuzzi-Thesis-DO-NOT-REMOVE/spup/data/zlatibor_dem_simulations/DEMsim", i, ".asc", sep = "")
DEM <- raster(input)
zlatibor.brick <- addLayer(zlatibor.brick, DEM)
}
# Convert stack to brick
zlatibor.brick <- brick(zlatibor.brick)
# Calculate mean, sd and quantiles
std<-calc(zlatibor.brick, fun = sd, na.rm = T)
mean <- mean(zlatibor.brick, na.rm = T)
quantiles <- calc(zlatibor.brick, fun = function(x) {quantile(x,
probs = c(.05,.25, .5, .75, .95),na.rm=TRUE)} )
# Create object of class Simulations --------------------------------------
simulations <- new("Simulations", Realisations = zlatibor.brick, Mean = mean,
Standard.Deviation = std, Quantiles = quantiles)
|
2edb076da127ecd6e6ba156becb0c9ac8cb58c21 | f8dba00a0e35196f01a174002350a0dbee36cdbb | /man/UKBBcleanR-package.Rd | f565abeefdda584290332f4e4a38130c27841e61 | [
"MIT"
] | permissive | machiela-lab/UKBBcleanR | 9801079ab95a88d0508113fb4d11544475b7a699 | 73c76b46258d810d936bdc5f4a3ec8b7d51cea41 | refs/heads/main | 2023-04-07T03:38:31.741924 | 2023-01-26T14:53:15 | 2023-01-26T14:53:15 | 520,682,551 | 5 | 0 | null | null | null | null | UTF-8 | R | false | true | 1,425 | rd | UKBBcleanR-package.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/package.R
\docType{package}
\name{UKBBcleanR-package}
\alias{UKBBcleanR-package}
\alias{UKBBcleanR}
\title{The UKBBcleanR Package}
\description{
Prepare electronic medical record data from the UK Biobank for time-to-event analyses
}
\details{
Prepares time-to-event data from raw UK Biobank \url{https://www.ukbiobank.ac.uk/} electronic medical record data. The prepared data can be used for cancer outcomes but could be modified for other health outcomes.
Key content of the 'UKBBcleanR' package include:\cr
\code{\link{tte}} Prepares time-to-event data from raw UK Biobank \url{https://www.ukbiobank.ac.uk/} electronic medical record data.
}
\section{Dependencies}{
The 'UKBBcleanR' package relies heavily upon \code{\link{data.table}}, \code{\link{dplyr}}, and \code{\link{stringr}} to clean raw UK Biobank data \url{https://www.ukbiobank.ac.uk/} and output a time-to-event data set.
}
\author{
Alexander Depaulis\cr \emph{Integrative Tumor Epidemiology Branch (ITEB), Division of Cancer Epidemiology and Genetics (DCEG), National Cancer Institute (NCI), National Institutes of Health (NIH), Rockville, Maryland (MD), USA} \cr
Derek W. Brown\cr \emph{ITEB, DCEG, NCI, NIH, Rockville, MD, USA} \cr
Aubrey K. Hubbard\cr \emph{ITEB, DCEG, NCI, NIH, Rockville, MD, USA} \cr
Maintainer: D.W.B. \email{derek.brown@nih.gov}
}
\keyword{package}
|
6f6d6e194218735dca1af2807a28401ac5bd778b | 44f56ff45d793bc38f638f2fc88afd21893c72ba | /man/get_fpr.Rd | dcc1d932df3a9453faec0e8985f1d8118376d0d9 | [] | no_license | pedrostrusso/annotator | e5fbeef5f954f4abd78a66985b740676d96c6c9f | 4370b0e23395362db12a671e1574dd8bc525c0ac | refs/heads/master | 2020-03-18T15:06:52.590658 | 2019-06-10T22:00:14 | 2019-06-10T22:00:14 | 134,887,810 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 547 | rd | get_fpr.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tpr_fpr.R
\name{get_fpr}
\alias{get_fpr}
\title{Calculate false positive rate between two clustering methods}
\usage{
get_fpr(group1, group2)
}
\arguments{
\item{group1}{The first clustering method}
\item{group2}{The reference ("true") method}
}
\value{
The false positive rate
}
\description{
Calculate false positive rate between two clustering methods
}
\examples{
g1 <- sample(1:2, size=10, replace=TRUE)
g2 <- sample(1:3, size=10, replace=TRUE)
get_fpr(g1, g2)
}
|
0b092099c5873dfb3f0123a983782436b14e239d | e1967b6cf66fd951e75f10d9e2b4e68da6efce2c | /quickstart.R | 52f9dade73dbaf264b19fbc903e72ab0047edd03 | [] | no_license | PhilippPro/surrogate | f1e45e30b4e0ca2330b9a73f61cc8d274fad2fec | 0e9d0b0252f6e200efa0208ad7ad3dbc167454cf | refs/heads/master | 2021-01-19T00:22:43.953683 | 2017-04-06T13:43:23 | 2017-04-06T13:43:23 | 87,162,451 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,702 | r | quickstart.R | library(devtools)
# replace this soon
load_all("/home/probst/Paper/Exploration_of_Hyperparameters/OMLbots")
# This has to be replaced by the database extraction (Daniel) ----------------------------------------------
tag = "mlrRandomBot"
numRuns = 140000
results = do.call("rbind",
lapply(0:floor(numRuns/10000), function(i) {
return(listOMLRuns(tag = tag, limit = 10000, offset = (10000 * i) + 1))
})
)
table(results$flow.id, results$task.id)
table(results$uploader)
res = do.call("rbind",
lapply(0:floor(nrow(results)/100), function(i) {
return(listOMLRunEvaluations(run.id = results$run.id[((100*i)+1):(100*(i+1))]))
})
)
# dauert ewig
df = res %>%
mutate(flow.version = c(stri_match_last(flow.name, regex = "[[:digit:]]+\\.*[[:digit:]]*")),
learner.name = stri_replace_last(flow.name, replacement = "", regex = "[([:digit:]]+\\.*[[:digit:]*)]"))
as.data.frame.matrix(table(df$learner.name, df$data.name))
# -----------------------------------------------------------------------------------------------------------
overview = getMlrRandomBotOverview("botV1")
print(overview)
tbl.results = getMlrRandomBotResults("botV1")
print(tbl.results)
tbl.hypPars = getMlrRandomBotHyperpars("botV1")
print(tbl.hypPars)
task.data = makeBotTable(measure.name = "area.under.roc.curve", learner.name = "mlr.classif.glmnet",
tbl.results = tbl.results, tbl.hypPars = tbl.hypPars, tbl.metaFeatures = NULL)
task.ids = unique(tbl.results$task.id)
surr = makeSurrogateModel(measure.name = "area.under.roc.curve", learner.name = "mlr.classif.glmnet",
task.id = task.ids, tbl.results = tbl.results, tbl.hypPars = tbl.hypPars, param.set = lrn.par.set$classif.glmnet.set$param.set)
|
51fa4fb4dc766f9cd2d2033fe9fb1e345eb2fcdf | b62a6e662910152cf4e41e8e98c3b090f69ae245 | /pould/R/parseGenotypes.R | 242715ea9e5764c113dd68fee461502130c37a81 | [] | no_license | sjmack/pould | b359d3f6cf44fc858dfd6666a9e2771b7d09bd09 | bd35da7715d6faa960620f3bf6816031d8c8d9b3 | refs/heads/master | 2021-06-29T20:53:53.423809 | 2020-10-08T18:30:00 | 2020-10-08T18:30:00 | 132,675,243 | 1 | 2 | null | 2018-05-19T07:58:12 | 2018-05-08T23:15:13 | R | UTF-8 | R | false | false | 4,357 | r | parseGenotypes.R | ## parseGenotypes -- Steven J. Mack April 10, 2020
## v1.00
## Accepts and converts 2-column/locus BIGDAWG/PyPop-formatted genotype data to the GL String format expected by LDWrap
#' Reformat columnnar genotype data to GL String format
#'
#' This function accepts genotype data organized in locus-column pairs, and returns GL String-formatted data structured for LDWrap(). Of the resulting multilocus haplotype pair, the first haplotype is constructed from the first column for each locus, and the second haplotype is constructed from the second column.
#' @param dataset A tab-delimited text file (with a .txt or .tsv filename suffix) with a header row or a data frame. Each row corresponds to a subject, with two columns per locus. Allele names can include a locus name (e.g., locus*allele) or can can exclude the locus, but all allele names in the dataset must either include or exclude the locus. Missing (untyped) allele data can be identified with an empty cell or a set of four asterisks in files, and with NA values in data frames. Column names for each locus pair must be adjacent, but can be either identical (e.g., "locus" and "locus"), or suffixed (e.g., "locus_1" and "locus_2", where "locus_1" always precedes "locus_2"). A optional column of sample identifiers can be included, but must be named "SampleID". A column named "Disease" can be included, but will be ignored. No other non-locus columns are permitted.
#' @note This function is for internal POULD use only.
#' @return A data frame of two columns. The "Relation" column includes sample identifiers if provided, or numbers from 1 to the number of subjects. The "GL String" column contains the GL String formatted genotypes.
#' @keywords LDformat reformat GL String
#' @export
#' @examples #
parseGenotypes <- function(dataset) {
if(missing(dataset)) {return(cat("Please provide a value for the dataset parameter.\n"))}
if(!is.data.frame(dataset)) { dataset <- read.table(dataset,header=T,sep="\t",colClasses = "character",stringsAsFactors = FALSE,as.is = TRUE,check.names = FALSE,na.strings = "****")}
colnames(dataset) <- toupper(colnames(dataset))
if("SAMPLEID" %in% colnames(dataset)) {
ids <- dataset$SAMPLEID
dataset <- dataset[,!colnames(dataset) %in% "SAMPLEID"] } else {
ids <- 1:nrow(dataset) }
if("DISEASE" %in% colnames(dataset)) {
dataset <- dataset[,!colnames(dataset) %in% "DISEASE"] }
if(ncol(dataset) %% 2 !=0 ) {return(cat("Odd number of locus columns (",ncol(dataset),"). Please review your dataset.\n",sep=""))}
colnames(dataset) <- sub("\\_\\d","",colnames(dataset))
if(ncol(dataset) == 2) {return(cat("This dataset contains data for a single locus (",colnames(dataset)[1],"). LD analysis requires two loci.\n",sep=""))}
if(!any(grepl("*",dataset,fixed=TRUE))) {dataset[] <- Map(paste,names(dataset),dataset,sep="*")}
# V0.3 remove NAs that become locus*NA
blanks <- paste(colnames(dataset),NA,sep="*")
for(i in 1:ncol(dataset)) {
if(nrow(dataset[dataset[,i] == blanks[i],][i]) != 0 ) {
dataset[dataset[,i] == blanks[i],][i] <- NA }
}
hap <-vector("list",2) # paste together haplotypes & clean up stragglers
for(x in FALSE:TRUE) { hap[[((1*x)+1)]] <- apply(dataset[,rep(c(TRUE,FALSE),(ncol(dataset)/2))==x],1,paste,collapse="~")
hap[[((1*x)+1)]] <- gsub("[N][A]","",hap[[((1*x)+1)]]) # eliminate all 'NA' from missing data cells
hap[[((1*x)+1)]] <- gsub("~+","~",hap[[((1*x)+1)]]) # eliminate tilde-runs for empty cells
hap[[((1*x)+1)]][substr(hap[[((1*x)+1)]],1,1)=="~"] <- substr(hap[[((1*x)+1)]][substr(hap[[((1*x)+1)]],1,1)=="~"],2,nchar(hap[[((1*x)+1)]][substr(hap[[((1*x)+1)]],1,1)=="~"])) ## trim leading tilde
hap[[((1*x)+1)]][substr(hap[[((1*x)+1)]],(nchar(hap[[((1*x)+1)]])),nchar(hap[[((1*x)+1)]]))=="~"] <- substr(hap[[((1*x)+1)]][substr(hap[[((1*x)+1)]],(nchar(hap[[((1*x)+1)]])),nchar(hap[[((1*x)+1)]]))=="~"],1,nchar(hap[[((1*x)+1)]])-1) ## trim trailing tilde
}
fdataset <- cbind(as.data.frame(ids,stringsAsFactors = FALSE),as.data.frame(paste(hap[[2]],hap[[1]],sep="+"),stringsAsFactors = FALSE))
colnames(fdataset) <- c("Relation","Gl.String")
fdataset
}
|
e566fc8b712b52cb5cec57c3f5f487b6002db897 | 9eac9f8e7495d916f7596c4444461521b1a39086 | /scripts/regionAnalysis.R | eac666de0313a555b3df26d5b091072296033db8 | [
"Apache-2.0"
] | permissive | uniqueg/scripts | bbb42d455196f8e047df2681661a02d38e4a762f | 9fdcb93f740c0d353b8f9c0fe3ceab6a941af87d | refs/heads/master | 2023-04-08T17:08:00.911197 | 2023-03-16T08:46:40 | 2023-03-16T08:46:40 | 211,389,152 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 11,633 | r | regionAnalysis.R | #!/usr/bin/env Rscript
##############################
### GLOBAL PARAMETERS // ###
##############################
region_types <- c("three_prime_utr", "five_prime_utr", "CDS", "none")
def_region_types <- c("three_prime_utr", "five_prime_utr", "CDS")
reg_colors_def <- c("#33ccff", "#666666", "#ff8000")
##############################
### // GLOBAL PARAMETERS ###
##############################
######################
### FUNCTIONS // ###
######################
formatTypes <- function(chr) {
chr <- gsub("five_prime_utr", "5'UTR", chr)
chr <- gsub("three_prime_utr", "3'UTR", chr)
chr <- gsub("none", "Undefined", chr)
return(chr)
}
#-----------------------#
pieChart <- function(x, col, legend, main=NULL, cex.main=1.6, legend.pos="topright", cex.leg=1.4) {
pie(x, clockwise=TRUE, col=col, labels=NA, main=main, cex.main=cex.main)
legend(x=legend.pos, legend=legend, fill=col, bty="n", cex=cex.leg)
return(NULL)
}
#-----------------------#
chiSquare <- function(x, p) {
#
chsq <- chisq.test(x=x, p=p)
meth <- chsq$method
sep <- paste(rep("-", nchar(meth)), collapse="")
stat <- paste("Statistic:", chsq$statistic)
df <- paste("Degrees of freedom:", chsq$parameter)
p <- paste("P-value:", chsq$p.value)
obs <- paste("Observed:", paste(chsq$observed, collapse=", "))
exp <- paste("Expected:", paste(chsq$expected, collapse=", "))
res <- paste("Pearson residuals:", paste(chsq$residuals, collapse=", "))
stdres <- paste("Standardized residuals:", paste(chsq$stdres, collapse=", "))
chsq$print <- paste(meth, sep, stat, df, p, obs, exp, res, stdres, sep="\n")
return(chsq)
}
#-----------------------#
categoryVectorListToBED <- function(vectorList, nms, categories, start, prefix) {
#
grl <- GRangesList(mapply(function(name, start) {
site <- vectorList[[name]]
site_coll <- rle(site)
type <- site_coll$values %in% categories
site_coll <- setNames(site_coll$lengths[type], site_coll$values[type])
start <- start + c(0, cumsum(site_coll[-length(site_coll)]))
ranges <- IRanges(start=start, width=site_coll)
seqnames <- Rle(rep(name, length(ranges)))
gr <- GRanges(seqnames=seqnames, ranges=ranges, name=names(site_coll))
}, nms, start))
gr <- unlist(grl)
names(gr) <- NULL
outFile <- paste(prefix, "regions.bed", sep=".")
write(paste("Writing region types to BED file ", outFile, "...", sep="'"), stdout())
export(gr, outFile, format="bed")
return(gr)
}
#-----------------------#
plotFormats <- function(FUN, formats, prefix) {
if ("pdf" %in% formats) {
outFile <- paste(prefix, "pdf", sep=".")
write(paste("Plotting pie chart to file ", outFile, "...", sep="'"))
pdf(outFile)
dump <- FUN
dev.off()
}
# if ("png" %in% formats) {
# outFile <- paste(prefix, "png", sep=".")
# write(paste("Plotting pie chart to file ", outFile, "...", sep="'"))
# png(outFile)
# dump <- FUN
# dev.off()
# }
# if ("svg" %in% formats) {
# outFile <- paste(prefix, "svg", sep=".")
# write(paste("Plotting pie chart to file ", outFile, "...", sep="'"))
# svg(outFile)
# dump <- FUN
# dev.off()
# }
}
#-----------------------#
processAnnotations <- function(gtf, outDir) {
# Load packages
write("Loading package 'rtracklayer'...", stdout())
library("rtracklayer")
# Make output directory & prepare output filenames
write("Generating output directory...", stdout())
dir.create(outDir, recursive=TRUE, showWarnings = FALSE)
gtfBase <- unlist(strsplit(basename(gtf), ".gtf"))[1]
out_prefix <- file.path(outDir, gtfBase)
# Import GTF annotations
write(paste("Importing GTF annotation data from file ", gtf, "...", sep="'"), stdout())
gtf <- import(gtf, format="gtf", asRangedData=FALSE)
outFile <- paste(out_prefix, "GRanges.R", sep=".")
write(paste("Saving GTF object in R file ", outFile, "...", sep="'"), stdout())
save(gtf, file=outFile)
# Subset exons and split by transcript identifier
write("Subsetting exons...", stdout())
exons_gr <- gtf[gtf$type == "exon"]
mcols(exons_gr) <- list(transcript_id=factor(exons_gr$transcript_id))
exons_grl <- split(exons_gr, exons_gr$transcript_id)
# Subset & process region annotations (5' UTR, CDS, 3' UTR)
write("Subsetting/processing region annotations...", stdout())
regions_gr <- gtf[gtf$type %in% c("five_prime_utr", "CDS", "stop_codon", "three_prime_utr")]
regions_gr$type[regions_gr$type == "stop_codon"] <- "CDS"
mcols(regions_gr) <- list(transcript_id=factor(regions_gr$transcript_id), type=factor(regions_gr$type))
regions_grl <- split(regions_gr, regions_gr$transcript_id)
# Get exons with and without region annotation
write("Identify exons with/without region annotations...", stdout())
exons_w_regions_grl <- exons_grl[intersect(names(exons_grl), names(regions_grl))]
exons_wo_regions_grl <- exons_grl[setdiff(names(exons_grl), names(exons_w_regions_grl))]
exons_w_partial_regions_grl <- psetdiff(exons_w_regions_grl, regions_grl)
# Update region annotations with type "none"
write("Add annotation type 'none' to unavailable/undefined region annotations...", stdout())
no_regions_gr <- c(unlist(exons_wo_regions_grl, use.names=FALSE), unlist(exons_w_partial_regions_grl, use.names=FALSE))
mcols(no_regions_gr)$type <- factor(rep("none", length(no_regions_gr)))
regions_gr <- sort(c(regions_gr, no_regions_gr))
regions_grl <- split(regions_gr, regions_gr$transcript_id)
# Get list of vectors of region types
# List contains one vector for each transcript
# Each vector is composed of region types for each position
write("Obtaining region type information per nucleotide (this may take long)...", stdout())
reg_vec_all_ls <- lapply(regions_grl, function(trx) {
strand <- unique(strand(trx))
if ( length(strand) != 1 | ! strand %in% c("+", "-") ) {
write("[WARNING] Strand information unclear.", stderr())
return(NULL)
}
vec <- as.character(unlist(mapply(rep, x=trx$type, each=width(trx))))
if ( strand == "+" ) return(vec) else return(rev(vec))
})
outFile <- paste(out_prefix, "regionByNucleotide.R", sep=".")
write(paste("Saving nucleotide-level composition information in file ", outFile, "...", sep="'"), stdout())
save(reg_vec_all_ls, file=outFile)
# Summarize region type nucleotide composition
write("Counting nucleotides per region type...", stdout())
reg_cts_all <- table(unlist(reg_vec_all_ls))
reg_cts_all <- setNames(as.numeric(reg_cts_all), names(reg_cts_all))
outFile <- paste(out_prefix, "regionCounts.R", sep=".")
write(paste("Saving counts in file ", outFile, "...", sep="'"), stdout())
save(reg_cts_all, file=outFile)
# Generate BED file of regions
names_found <- names(reg_vec_all_ls[! sapply(reg_vec_all_ls, is.null)])
start <- rep(1, length(names_found))
gr <- categoryVectorListToBED(reg_vec_sites_ls, names_found, region_types, start, out_prefix)
# Generate pie chart
reg_cts_all_def <- reg_cts_all[def_region_types]
plotFormats(pieChart(x=reg_cts_all_def[def_region_types], col=reg_colors_def, legend=formatTypes(def_region_types), main="control"), formats=c("pdf", "png", "svg"), prefix=paste(out_prefix, "regionCounts.pie", sep="."))
# Return list of objects
obj_ls <- list(gtf=gtf, regions_gr=regions_gr, reg_vec_all_ls=reg_vec_all_ls, reg_cts_all=reg_cts_all, gr=gr, reg_cts_all_def=reg_cts_all_def)
return(obj_ls)
}
#-----------------------#
processSample <- function (csv, regionPerNt, reg_cts_all, outDir) {
# Make output directory & prepare output filenames
write("Generating output directory...", stdout())
dir.create(outDir, recursive=TRUE, showWarnings = FALSE)
csvBase <- unlist(strsplit(basename(csv), ".csv"))[1]
out_prefix <- file.path(outDir, csvBase)
# Loading annotation data
write(paste("Obtaining annotation R objects...", sep="'"), stdout())
if ( mode(regionPerNt) == "character" ) {
load(regionPerNt)
} else {
reg_vec_all_ls <- regionPerNt
}
if ( mode(regionCounts) == "character" ) {
load(regionCounts)
} else {
reg_cts_all <- regionCounts
}
# Importing CSV file of sites
write(paste("Importing sites from file ", csv, "...", sep="'"), stdout())
sites <- read.delim(csv, stringsAsFactors=FALSE)
# Get list of vectors of region types
# List contains one vector for each transcript
# Each vector is composed of region types for each position
reg_vec_sites_ls <- apply(sites, 1, function(site) {
reg_vec_all_ls[[site["seqnames"]]][site["start"]:site["end"]]
})
names(reg_vec_sites_ls) <- sites$seqnames
outFile <- paste(out_prefix, "regionByNucleotide.R", sep=".")
write(paste("Saving nucleotide-level composition information in file ", outFile, "...", sep="'"), stdout())
save(reg_vec_sites_ls, file=outFile)
# Summarize region type nucleotide composition
write("Counting nucleotides per region type...", stdout())
reg_cts_sites <- table(unlist(reg_vec_sites_ls))
reg_cts_sites <- setNames(as.numeric(reg_cts_sites), names(reg_cts_sites))
outFile <- paste(out_prefix, "regionCounts.R", sep=".")
write(paste("Saving counts in file ", outFile, "...", sep="'"), stdout())
save(reg_cts_sites, file=outFile)
# Generate BED file of regions
names_found <- names(reg_vec_sites_ls[! sapply(reg_vec_sites_ls, is.null)])
start <- sites$start[sites$seqnames %in% names_found]
gr <- categoryVectorListToBED(reg_vec_sites_ls, names_found, region_types, start, out_prefix)
# Generate pie chart
reg_cts_sites_def <- reg_cts_sites[def_region_types]
plotFormats(pieChart(x=reg_cts_sites_def, col=reg_colors_def, legend=formatTypes(def_region_types), main="sample"), formats=c("pdf", "png", "svg"), prefix=paste(out_prefix, "regionCounts.pie", sep="."))
# Run Pearson's Chi-squared test
write("Running Pearson's Chi-squared test...", stdout())
reg_cts_all_def <- reg_cts_all[def_region_types]
chsq <- chiSquare(x=reg_cts_sites_def, p=reg_cts_all_def/sum(reg_cts_all_def))
outFile <- paste(out_prefix, "chiSquare.txt", sep=".")
write(paste("Writing Chi-square summary to file", outFile, "...", sep="'"), stdout())
write(chsq$print, file=outFile)
# Return list of objects
obj_ls <- list(reg_vec_all_ls=reg_vec_all_ls, reg_cts_all=reg_cts_all, sites=sites, reg_vec_sites_ls=reg_vec_sites_ls, reg_cts_sites=reg_cts_sites, reg_cts_sites_def=reg_cts_sites_def, reg_cts_all_def=reg_cts_all_def, gr=gr, chsq=chsq)
return(obj_ls)
}
######################
### // FUNCTIONS ###
######################
#################
### MAIN // ###
#################
# Initiate objects
obj_ls <- NULL
# Process annotations
if ( ! is.null(gtf) ) {
annot_obj_ls <- processAnnotations(gtf, outDir)
if ( is.null(regionPerNt) ) regionPerNt <- annot_obj_ls$reg_vec_all_ls
if ( is.null(regionCounts) ) regionCounts <- annot_obj_ls$reg_cts_all
}
# Process sample
if ( ! any(is.null(c(csv, regionPerNt, regionCounts))) ) {
sample_obj_ls <- processSample(csv, regionPerNt, regionCounts, outDir)
}
# Save session
outFile <- file.path(outDir, "session.R")
write(paste("Saving R session in file ", outFile, "...", sep="'"), stdout())
save.image(file=outFile)
#################
### // MAIN ###
#################
|
f3839bb2d1033962fb2109b54a723286636dbe23 | 7f9c3c40ec2ab8938e0482b2f07e2ceb1301b0be | /R/functions-simulation.R | c43047abdf5880303984426909224855623cb6ed | [] | no_license | graveja0/modeling-health-insurance | c7cfa157c4d15dfda76e39d5366e473042cf108a | ce1c10e205e0d5d026010b19b527af0d0749b89d | refs/heads/master | 2021-07-10T09:04:41.948224 | 2020-07-21T14:31:58 | 2020-07-21T14:31:58 | 177,799,095 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 29,958 | r | functions-simulation.R | theme_tufte_revised <- function(base_size = 11, base_family = "Gill Sans", ticks = TRUE) {
ret <- ggplot2::theme_bw(base_family = base_family, base_size = base_size) +
ggplot2::theme(
axis.line = ggplot2::element_line(color = 'black'),
axis.title.x = ggplot2::element_text(vjust = -0.3),
axis.title.y = ggplot2::element_text(vjust = 0.8),
legend.background = ggplot2::element_blank(),
legend.key = ggplot2::element_blank(),
legend.title = ggplot2::element_text(face="plain"),
panel.background = ggplot2::element_blank(),
panel.border = ggplot2::element_blank(),
panel.grid = ggplot2::element_blank(),
plot.background = ggplot2::element_blank(),
strip.background = ggplot2::element_blank()
)
if (!ticks) {
ret <- ret + ggplot2::theme(axis.ticks = ggplot2::element_blank())
}
ret
}
get_params <- function(x,params) {
map2(params,x,~(
.x(.y)
))
}
qfixed <- function(x, value) value
get_takeup_coef <- function(df = df_wtp_and_costs,params) {
sampled <- sample(df$iteration,1)
tmp <-
df %>%
filter(iteration == sampled & fpl == params$pop_fpl & type == params$plan_type & outcome=="s") %>%
select(-type,-outcome,-fpl,-iteration)
tmp_coef <-
tmp %>%
gather(coef,value) %>%
mutate(value = as.numeric(paste0(value))) %>%
pull(value)
names(tmp_coef) <- names(tmp)
return(tmp_coef)
}
get_cost_coef <- function(df = df_wtp_and_costs, params) {
sampled <- sample(df$iteration,1)
tmp <-
df %>%
filter(iteration == sampled & fpl == params$pop_fpl & type == params$plan_type & outcome=="cost") %>%
select(-type,-outcome,-fpl,-iteration)
tmp_coef <-
tmp %>%
gather(coef,value) %>%
mutate(value = as.numeric(paste0(value))) %>%
pull(value)
names(tmp_coef) <- names(tmp)
return(tmp_coef)
}
get_takeup <- function(params, premium ) {
if (is.null(premium)) prem = params$plan_premium else prem = premium
tmp_out <-
data.frame(fpl = params$pop_fpl,
type = params$plan_type,
prem = prem) %>%
mutate(estimate = params$takeup_coef['intercept'] + params$takeup_coef['wtp'] * prem + params$takeup_coef['i_wtp_2'] * prem^2 + params$takeup_coef['i_wtp_3'] * prem ^3) %>%
pull(estimate) %>% unname()
return(pmax(0,pmin(1,tmp_out)))
}
get_cost <- function(params, premium ) {
if (is.null(premium)) prem = params$plan_premium else prem = premium
tmp_out <-
data.frame(fpl = params$pop_fpl,
type = params$plan_type,
prem = prem) %>%
mutate(estimate = params$cost_coef['intercept'] + params$cost_coef['wtp'] * prem + params$cost_coef['i_wtp_2'] * prem^2 + params$cost_coef['i_wtp_3'] * prem ^3) %>%
pull(estimate) %>% unname()
return(pmax(0,tmp_out))
}
fn_uncomp <- function(cost, uninsured_oop_share , phi ) {
# x is the share of the uninsured’s total health care costs that they pay out of pocket
# φ denotes the percentage increase in costs that result from insurance coverage (moral hazard)
(1 - uninsured_oop_share) * (cost / (1 + phi))
}
TwoWaySA<-function(indata,outcome="NHB",parm1,parm2,range1,range2,lambda){
# Get Outcome
lhs <- indata %>% select(psa_id,contains("dQALY"),contains("dCOST")) %>%
mutate(psa_id=row_number()) %>%
reshape2::melt(id.vars='psa_id') %>%
tidyr::separate(variable,c("outcome","strategy"),"_") %>%
reshape2::dcast(psa_id+strategy~outcome) %>%
mutate(NHB = dQALY-dCOST * lambda ,
NMB = dQALY*lambda - dCOST)
# Get Parameters
rhs <- indata %>% select(-contains("dQALY"),-contains("dCOST"),
-contains("NMB"),-contains("NHB"),-psa_id)
# Map to existing code inputs
Strategies <- unique(lhs$strategy)
Parms <- rhs %>% tbl_df() %>% data.frame()
cat(outcome)
lhs$Y <- lhs[,outcome]
Outcomes <- lhs %>% select(strategy,psa_id,Y) %>%
reshape2::dcast(psa_id~strategy,value.var="Y") %>%
select(-psa_id)
#Extract parameter column number in Parms matrix
x1<-which(colnames(Parms)==parm1)
x2<-which(colnames(Parms)==parm2)
dep<-length(Strategies) #Number of dependent variables, i.e., strategies
indep<-ncol(Parms) #Number of independent variables, i.e., parameters
Sim <- data.frame(Outcomes,Parms)
if (ncol(Parms)==2) {
Parms$constant = 1
Sim$constant = 1
}
#Determine range of of the parameer to be plotted
if (!missing("range1")&!missing("range2")){ #If user defines a range
vector1<-seq(from=range1[1],to=range1[2],length.out=301)
vector2<-seq(from=range2[1],to=range2[2],length.out=301)
} else if (!missing("range1")&missing("range2")){ #Default range given by the domanin of the parameter's sample
#vector to define 400 samples between the 2.5th and 97.5th percentiles
vector1<-seq(from=range1[1],to=range1[2],length.out=301)
y2 = seq(2.5,97.5,length.out=301)
j2 = round(y2*(length(Parms[,x2])/100)) #indexing vector;j=round(y*n/100) where n is the size of vector of interest
vector2<-sort(Parms[j2,x2])
} else if (missing("range1")&!missing("range2")){ #Default range given by the domanin of the parameter's sample
#vector to define 400 samples between the 2.5th and 97.5th percentiles
vector2<-seq(from=range2[1],to=range2[2],length.out=301)
y1 = seq(2.5,97.5,length.out=301)
j1 = round(y1*(length(Parms[,x1])/100)) #indexing vector;j=round(y*n/100) where n is the size of vector of interest
vector1<-sort(Parms[j1,x1])
} else{
y1 = seq(2.5,97.5,length.out=301)
y2 = seq(2.5,97.5,length.out=301)
j1 = round(y1*(length(Parms[,x1])/100)) #indexing vector;j=round(y*n/100) where n is the size of vector of interest
j2 = round(y2*(length(Parms[,x2])/100))
vector1<-sort(Parms[j1,x1])
vector2<-sort(Parms[j2,x2])
}
#Generate a formula by pasting column names for both dependent and independent variables
f <- as.formula(paste('cbind(',paste(colnames(Sim)[1:dep],collapse=','),
') ~ (','poly(',parm1,',8)+','poly(',parm2,',8)+' ,
paste(colnames(Parms)[c(-x1,-x2)], collapse='+'),')'))
#Run Multiple Multivariate Regression (MMR) Metamodel
Tway.mlm = lm(f,data=Sim)
TWSA <- expand.grid(parm1=vector1,parm2=vector2)
#Generate matrix to use for prediction
Sim.fit<-matrix(rep(colMeans(Parms)),nrow=nrow(TWSA),ncol=ncol(Parms), byrow=T)
Sim.fit[,x1]<-TWSA[,1]
Sim.fit[,x2]<-TWSA[,2]
Sim.fit<-data.frame(Sim.fit) #Transform to data frame, the format required for predict
colnames(Sim.fit)<-colnames(Parms) #Name data frame's columns with parameters' names
#Predict Outcomes using MMMR Metamodel fit
Sim.TW = data.frame(predict(Tway.mlm, newdata = Sim.fit))
#Find optimal strategy in terms of maximum Outcome
Optimal <- max.col(Sim.TW)
#Get Outcome of Optimal strategy
OptimalOut<-apply(Sim.TW,1,max)
plotdata = Sim.fit #Append parameter's dataframe to predicted outcomes dataframe
#A simple trick to define my variables in my functions environment
plotdata$parm1<-plotdata[,parm1];
plotdata$parm2<-plotdata[,parm2];
plotdata$Strategy<-factor(Optimal,labels=Strategies[as.numeric(names(table(Optimal)))])
plotdata$value<-OptimalOut
txtsize<-12
p <- ggplot(plotdata, aes(x=parm1,y=parm2))+
geom_tile(aes(fill=Strategy)) +
theme_bw() +
#ggtitle(expression(atop("Two-way sensitivity analysis",
# atop("Net Health Benefit")))) +
scale_fill_discrete("Strategy: ", l=50)+
xlab(parm1)+
ylab(parm2)+
theme(legend.position="bottom",legend.title=element_text(size = txtsize),
legend.key = element_rect(colour = "black"),
legend.text = element_text(size = txtsize),
title = element_text(face="bold", size=15),
axis.title.x = element_text(face="bold", size=txtsize),
axis.title.y = element_text(face="bold", size=txtsize),
axis.text.y = element_text(size=txtsize),
axis.text.x = element_text(size=txtsize))+
scale_fill_grey(start = 0, end = 1)
return(p)
}
OneWaySA<-function(indata,outcome="NHB",lambda,parm,range){
# Get Outcome
lhs <- indata %>% select(psa_id,contains("dQALY"),contains("dCOST")) %>%
mutate(psa_id = row_number()) %>%
reshape2::melt(id.vars='psa_id') %>%
tidyr::separate(variable,c("outcome","strategy"),"_") %>%
reshape2::dcast(psa_id+strategy~outcome) %>%
mutate(NHB = dQALY-dCOST * lambda ,
NMB = dQALY*lambda - dCOST)
# Get Parameters
rhs <- indata %>% select(-contains("dQALY"),-contains("dCOST"),
-contains("NMB"),-contains("NHB"),-psa_id)
# Map to existing code inputs
Strategies <- unique(lhs$strategy)
Parms <- rhs %>% tbl_df() %>% data.frame()
lhs$Y <- lhs[,outcome]
Outcomes <- lhs %>% select(strategy,psa_id,Y) %>%
reshape2::dcast(psa_id~strategy,value.var="Y") %>%
select(-psa_id)
#Extract parameter column number in Parms matrix
x<-which(colnames(Parms)==parm)
dep<-length(Strategies) #Number of dependent variables, i.e., strategies outcomes
indep<-ncol(Parms) #Number of independent variables, i.e., parameters
Sim <- data.frame(Outcomes,Parms)
#Determine range of of the parameer to be plotted
if (!missing("range")){ #If user defines a range
vector<-seq(range[1],range[2],length.out=400)
}
else{ #Default range given by the domanin of the parameter's sample
#vector to define 400 samples between the 2.5th and 97.5th percentiles
y = seq(2.5,97.5,length=400)
j = round(y*(length(Parms[,x])/100)) #indexing vector;j=round(y*n/100) where n is the size of vector of interest
vector<-sort(as.data.frame(Parms)[j,x])
}
#Generate a formula by pasting column names for both dependent and independent variables. Imposes a 1 level interaction
f <- as.formula(paste('cbind(',paste(colnames(Sim)[1:dep],collapse=','), ') ~ (','poly(',parm,',2)+' ,paste(colnames(Parms)[-x], collapse='+'),')'))
#Run Multiple Multivariate Regression (MMR) Metamodel
Oway.mlm = lm(f,data=Sim)
#Generate matrix to use for prediction
Sim.fit<-matrix(rep(colMeans(Parms)),nrow=length(vector),ncol=ncol(Parms), byrow=T)
Sim.fit[,x]<-vector
Sim.fit<-data.frame(Sim.fit) #Transform to data frame, the format required for predict
colnames(Sim.fit)<-colnames(Parms) #Name data frame's columns with parameters' names
#Predict Outcomes using MMMR Metamodel fit
plotdata = data.frame(predict(Oway.mlm, newdata = Sim.fit))
colnames(plotdata) <- Strategies #Name the predicted outcomes columns with strategies names
#Reshape dataframe for ggplot
plotdata = stack(plotdata, select=Strategies) #
plotdata = cbind(Sim.fit, plotdata) #Append parameter's dataframe to predicted outcomes dataframe
#A simple trick to define my variables in my functions environment
plotdata$parm<-plotdata[,parm];
library(directlabels)
txtsize<-12 #Text size for the graphs
ggplot(data = plotdata, aes(x = parm, y = values, lty = ind)) +
geom_line() +
#ggtitle("One-way sensitivity analysis \n Net Health Benefit") +
xlab(parm) +
ylab("E[NHB]") +
scale_colour_hue("Strategy", l=50) +
#scale_x_continuous(breaks=number_ticks(6)) + #Adjust for number of ticks in x axis
#scale_y_continuous(breaks=number_ticks(6)) +
theme_bw() +
theme(legend.position="bottom",legend.title=element_text(size = txtsize),
legend.key = element_rect(colour = "black"),
legend.text = element_text(size = txtsize),
title = element_text(face="bold", size=15),
axis.title.x = element_text(face="bold", size=txtsize),
axis.title.y = element_text(face="bold", size=txtsize),
axis.text.y = element_text(size=txtsize),
axis.text.x = element_text(size=txtsize))+
geom_dl(aes(label = ind), method = list(dl.combine("last.bumpup"), cex = 0.8))
}
CEAC<-function(lambda_range,indata){
# Get Outcome
lhs <- indata %>% select(contains("dQALY"),contains("dCOST")) %>%
mutate(psa_id = row_number()) %>%
reshape2::melt(id.vars='psa_id') %>%
tidyr::separate(variable,c("outcome","strategy"),"_") %>%
reshape2::dcast(psa_id+strategy~outcome)
# Get Parameters
rhs <- indata %>% select(-contains("dQALY"),-contains("dCOST"),-psa_id)
# Map to existing code inputs
Strategies <- unique(lhs$strategy)
Parms <- rhs %>% tbl_df()
Outcomes <- lhs %>% select(strategy,psa_id,contains("dCOST"),contains("dQALY"))
# Outcomes must be ordered in a way that for each strategy the cost must appear first then the effectiveness
lambda<- lambda_range
NHB <- array(0, dim=c(dim(Outcomes)[1],length(Strategies))) # Matrix to store NHB for each strategy
colnames(NHB)<-Strategies
CEA<-array(0,dim=c(length(lambda),length(Strategies)))
#
NHB <- lambda %>% purrr::map(~(Outcomes$dQALY-Outcomes$dCOST * .x)) %>% do.call("cbind",.)
colnames(NHB) <- paste0("lambda_",lambda)
NHB <- data.frame(NHB)
NHB$strategy <- Outcomes$strategy
NHB$psa_id <- Outcomes$psa_id
NHB2 <- NHB %>% reshape2::melt(id.vars=c("strategy","psa_id"))
NHB2 <- NHB2 %>% split(NHB2$variable)
foo <- NHB2 %>% map2(.,names(.),~select(.x,-variable)) %>%
map2(.,names(.),~mutate(.x,lambda=as.numeric(gsub("lambda_","",.y)))) %>%
map2(.,names(.),~mutate(.x,NHB="NHB")) %>%
map2(.,names(.),~reshape2::dcast(.x,psa_id~NHB+strategy))
Optimal <- CEA <- list()
for (i in names(foo)) {
max.temp <- foo[[i]][,-1] %>% apply(.,1,max)
Optimal[[i]] <- foo[[i]][,-1] %>% tbl_df() %>% mutate_all(funs(as.integer(.==max.temp)))
CEA[[i]] <- colMeans(Optimal[[i]])
}
CEA <- do.call("rbind",CEA) %>% tbl_df() %>% mutate(lambda=as.numeric(gsub("lambda_","",names(foo))))
colnames(CEA)<- gsub("NHB_","",colnames(CEA))
CEAC<-reshape2::melt(CEA, id.vars = "lambda")
library(directlabels)
txtsize<-12
CEAC <- CEAC %>% mutate(variable = paste0(" ",variable," "))
p <- ggplot(data = CEAC, aes(x = lambda, y = value, color = variable)) +
geom_point() +
geom_line() +
#ggtitle("Cost-Effectiveness Acceptability Curves") +
scale_colour_hue("Strategies: ",l=50) +
#scale_x_continuous(breaks=number_ticks(6))+
xlab(expression("Policy Adoption Threshold "(lambda))) +
ylab("Pr Cost-Effective") +
theme_bw() +
theme(legend.position="bottom",legend.title=element_text(size = txtsize),
legend.key = element_rect(colour = "black"),
legend.text = element_text(size = txtsize),
title = element_text(face="bold", size=15),
axis.title.x = element_text(face="bold", size=txtsize),
axis.title.y = element_text(face="bold", size=txtsize),
axis.text.y = element_text(size=txtsize),
axis.text.x = element_text(size=txtsize))+scale_colour_grey(start = .5, end = 1)+
geom_dl(aes(label = variable), method = list(dl.combine( "last.points"), cex = 0.8))
return(p)
}
TornadoDiag <- function(indata,outcome,lambda) {
# Get Outcome
lhs <- indata %>% select(psa_id,contains("dQALY"),contains("dCOST")) %>%
mutate(psa_id = row_number()) %>%
reshape2::melt(id.vars='psa_id') %>%
tidyr::separate(variable,c("outcome","strategy"),"_") %>%
reshape2::dcast(psa_id+strategy~outcome) %>%
mutate(NHB = dQALY-dCOST * lambda ,
NMB = dQALY*lambda - dCOST)
# Get Parameters
rhs <- indata %>% select(-contains("dQALY"),-contains("dCOST"),
-contains("NMB"),-contains("NHB"),-psa_id)
# Map to existing code inputs
Strategies <- unique(lhs$strategy)
Parms <- rhs %>% tbl_df()
lhs$Y <- lhs[,outcome]
Outcomes <- lhs %>% select(strategy,psa_id,Y) %>%
reshape2::dcast(psa_id~strategy,value.var="Y") %>%
select(-psa_id)
# Find the Optimal
opt<-which.max(colMeans(Outcomes)); opt
# calculate min and max vectors of the parameters (e.g., lower 2.5% and 97.5%)
X <- as.matrix(Parms)
y <- as.matrix(Outcomes[,opt])
Y <- as.matrix(Outcomes)
ymean <- mean(y)
n <- nrow(Parms)
nParams <- ncol(Parms)
paramNames <- colnames(Parms)
Parms.sorted <- apply(Parms,2,sort,decreasing=F) #Sort in increasing order each column of Parms
lb <- 2.5
ub <- 97.5
Xmean <- rep(1,nParams) %*% t(colMeans(X))
XMin <- Xmean
XMax <- Xmean
paramMin <- as.vector(Parms.sorted[round(lb*n/100),])
paramMax <- as.vector(Parms.sorted[round(ub*n/100),])
diag(XMin) <- paramMin
diag(XMax) <- paramMax
XMin <- cbind(1, XMin)
XMax <- cbind(1, XMax)
X <- cbind(1,X)
B <- solve(t(X) %*% X) %*% t(X) %*% y # Regression for optimal strategy
library(matrixStats)
bigBeta <- solve(t(X) %*% X) %*% t(X) %*% Y # Regression for all strategies
yMin <- rowMaxs(XMin %*% bigBeta - ymean)
yMax <- rowMaxs(XMax %*% bigBeta - ymean)
ySize <- abs(yMax - yMin)
rankY<- order(ySize)
xmin <- min(c(yMin, yMax)) + ymean
xmax <- max(c(yMin, yMax)) + ymean
paramNames2 <- paste(paramNames, "[", round(paramMin,2), ",", round(paramMax,2), "]")
strategyNames<-Strategies
colfunc <- colorRampPalette(c("black", "white"))
strategyColors <- colfunc(length(Strategies))
## Polygon graphs:
nRect <- 0
x1Rect <- NULL
x2Rect <- NULL
ylevel <- NULL
colRect <- NULL
for (p in 1:nParams){
xMean <- colMeans(X)
xStart = paramMin[rankY[p]]
xEnd = paramMax[rankY[p]]
xStep = (xEnd-xStart)/1000
for (x in seq(xStart,xEnd, by = xStep)){
#for each point determine which one is the optimal strategy
xMean[rankY[p] + 1] <- x # +1 moves beyond the constant
yOutcomes <- xMean %*% bigBeta
yOptOutcomes <- max(yOutcomes)
yOpt <- strategyNames[which.max(yOutcomes)]
if (x == xStart){
yOptOld <- strategyNames[which.max(yOutcomes)]
y1 <- yOptOutcomes
}
#if yOpt changes, then plot a rectangle for that region
if (yOpt != yOptOld | x == xEnd){
nRect <- nRect + 1
x1Rect[nRect] <- y1
x2Rect[nRect] <- yOptOutcomes
ylevel[nRect] <- p
colRect[nRect] <- yOptOld
yOptOld <- yOpt
y1 <- yOptOutcomes
}
}
}
txtsize <-8
d=data.frame(x1=x2Rect, x2=x1Rect, y1=ylevel-0.4, y2=ylevel+0.4, t=colRect, r = ylevel)
p <- ggplot(d, aes(xmin = x1, xmax = x2, ymin = y1, ymax = y2, fill = t)) +
xlab(paste0("Expected ",outcome)) +
ylab("Parameters") +
geom_rect()+
theme_bw() +
scale_y_continuous(limits = c(0.5, nParams + 0.5),breaks=seq(1:ncol(Parms)), labels=paramNames2[rankY]) +
scale_fill_grey(start = 0, end = .9)+
geom_vline(xintercept=ymean, linetype="dotted") +
theme(legend.position="bottom",legend.title=element_text(size = txtsize),
legend.key = element_rect(colour = "black"),
legend.text = element_text(size = txtsize),
title = element_text(face="bold", size=1),
axis.title.x = element_text(face="bold", size=txtsize),
axis.title.y = element_text(face="bold", size=txtsize),
axis.text.y = element_text(size=txtsize),
axis.text.x = element_text(size=txtsize))+ labs(fill="")
return(p)
}
predict.ga <- function(object, n, n0, verbose = T){
#### Function to compute the preposterior for each of the
#### basis functions of the GAM model.
#### Inputs:
#### - object: gam object
#### - n: scalar or vector of new sample size to compute evsi on
#### - n0: scalar or vector of effective prior sample size
#### - verbose: Prints the variance reduction factor for each parameter
### Name of parameters
names.data <- colnames(object$model)
### Create dataframe with parameter values
data <- data.frame(object$model[,-1])
## Name columns of dataframe
colnames(data) <- names.data[-1]
### Number of parameters
n.params <- ncol(data)
### Sanity checks
if(!(length(n)==1 | length(n)==n.params)){
stop("Variable 'n' should be either a scalar or a vector
the same size as the number of parameters")
}
if(!(length(n0)==1 | length(n0)==n.params)){
stop("Variable 'n0' should be either a scalar or a vector
the same size as the number of parameters")
}
### Make n & n0 consistent with the number of parameters
if(length(n) == 1){
n <- rep(n, n.params)
}
if(length(n0) == 1){
n0 <- rep(n0, n.params)
}
### Compute variance reduction factor
v.ga <- sqrt(n/(n+n0))
if (verbose){
print(paste("Variance reduction factor =", round(v.ga, 3)))
}
### Number of smoothers
n.smooth <- length(object$smooth)
### Number of total basis functions
n.colX <- length(object$coefficients)
### Number of observations
n.rowX <- nrow(object$model)
### Initialize matrix for preposterior of total basis functions
X <- matrix(NA, n.rowX, n.colX)
X[, 1] <- 1
for (k in 1:n.smooth) { # k <- 1
klab <- substr(object$smooth[[k]]$label, 1, 1)
if (klab == "s"){
Xfrag <- Predict.smooth.ga(object$smooth[[k]], data, v.ga[k])
} else {
Xfrag <- Predict.matrix.tensor.smooth.ga(object$smooth[[k]], data, v.ga)
}
X[, object$smooth[[k]]$first.para:object$smooth[[k]]$last.para] <- Xfrag
}
### Coefficients of GAM model
Beta <- coef(object)
### Compute conditional Loss
Ltilde <- X %*% Beta
return(Ltilde)
}
Predict.smooth.ga <- function (object, data, v.ga = 1) {
#### Function to compute the preposterior for each of the
#### basis functions of a smooth for one parameter
### Produce basis functions for one parameter
X <- PredictMat(object, data) # ‘mgcv’ version 1.8-17
## Number of observations
n.obs <- nrow(X)
### Apply variance reduction to compute the preposterior
### for each of the basis functions
## Vector of ones
ones <- matrix(1, n.obs, 1)
## Compute phi on each of the basis function
X.ga <- v.ga*X + (1-v.ga)*(ones %*% colMeans(X))
return(X.ga)
}
Predict.matrix.tensor.smooth.ga <- function (object,
data,
v.ga = rep(1, ncol(data))){
#### Function to compute the preposterior for each of the
#### basis functions for one or more parameters and calculates
#### the tensor product if more than one parameter is selected
#### (Heavily based on function Predict.matrix.tensor.smooth from
#### mgcv package)
m <- length(object$margin)
X <- list()
for (i in 1:m) { # i <- 1
term <- object$margin[[i]]$term
dat <- list()
for (j in 1:length(term)) { # j <- 1
dat[[term[j]]] <- data[[term[j]]]
}
X[[i]] <- if (!is.null(object$mc[i])) # before: object$mc[i]
PredictMat(object$margin[[i]], dat, n = length(dat[[1]])) # ‘mgcv’ version 1.8-17
else Predict.matrix(object$margin[[i]], dat)
n.obs <- nrow(X[[i]])
} # end for 'i'
mxp <- length(object$XP)
if (mxp > 0)
for (i in 1:mxp) if (!is.null(object$XP[[i]]))
X[[i]] <- X[[i]] %*% object$XP[[i]]
### Apply variance reduction to compute the preposterior
### for each of the basis functions
## Vector of ones
ones <- matrix(1, n.obs, 1)
## Initialize and fill list with preposterior of basis functions
## for each parameter
X.ga <- list()
for (i in 1:m) { # i <- 1
X.ga[[i]] <- v.ga[i]*X[[i]] + (1-v.ga[i])*(ones %*% colMeans(X[[i]]))
}
### Compute tensor product
T.ga <- tensor.prod.model.matrix(X.ga) # ‘mgcv’ version 1.8-17
return(T.ga)
}
## For Simulating Medicaid
cov_sim <- function(params) {
p <- params[grep("^p_",names(params))] %>% unlist()
R <- params[grep("^R_",names(params))] %>% unlist() %>%
data.frame() %>%
rownames_to_column(var = "type") %>%
separate(type,into= c("exa","exp"), sep ="_TO_") %>%
set_names(c("exa","exp","value")) %>%
spread(exp,value) %>%
select(-exa) %>%
as.matrix()
DD <- params[grep("^DD_",names(params))] %>% unlist() %>%
data.frame() %>%
rownames_to_column(var = "type") %>%
separate(type,into= c("exa","exp"), sep ="_TO_") %>%
set_names(c("exa","exp","value")) %>%
spread(exp,value) %>%
select(-exa) %>%
as.matrix()
baseline <- t(p) %*% R
expmedicaid <- t(p) %*% (R + DD)
mvpf_med <- calculate_wtp_public(params)
mvpf_subsidy <- simulate_subsidy(params)
R_subsidy <- R
R_subsidy[4,2] <- R_subsidy[4,4] * params$frac_uninsured_elig * mvpf_subsidy$takeup
R_subsidy[4,4] <- 1 - sum(R_subsidy[4,1:3])
subsidy <- t(p) %*% R_subsidy
# Now need to include estimation of MVPF cost and benefits.
out <-
list(baseline = baseline, med = expmedicaid, subsidy = subsidy) %>%
bind_rows() %>%
tbl_df() %>%
mutate(type = insurance_sipp_lut) %>%
select(type,baseline,med, subsidy) %>%
gather(key,value,-type) %>%
mutate(iteration = 1) %>%
unite("tmp",key,type) %>%
spread(tmp,value) %>%
bind_cols(mvpf_med %>% data.frame()) %>%
rename(med_mvpf = mvpf,
med_mvpf_num = mvpf_num,
med_mvpf_denom = mvpf_denom,
med_wtp = wtp,
med_cost = cost,
med_N = N) %>%
bind_cols(mvpf_subsidy %>% data.frame()) %>%
rename(subsidy_takeup = takeup,
subsidy_C_H = C_H,
subsidy_uncomp = uncomp,
subsidy_mvpf_num = mvpf_num,
subsidy_mvpf_denom = mvpf_denom,
subsidy_mvpf = mvpf)
return(out)
}
calculate_wtp_public <- function(params, scaling_factor = 1) {
#The net cost of Medicaid equals the average increase in medical spending due to Medicaid
# plus the average decrease in out-of-pocket spenign due to Medicaid (see equation 22).
p_1 <- params$OOP_Tx / params$G
p_0 <- params$OOP_Cx / params$G_Cx
MCD_SPEND = params$G - params$G_Cx
C = MCD_SPEND + params$OOP_Cx
# The monetary transfer from Medicaid to external parties, N, is the difference between G and C.
N <- params$G - C
welfare_weight <- params$v_i / params$v_j
# We estimate the transfer component and pure-insurance component separately, and combine them
# for our estimate of \gamma(1).
# Transfer component (p.29)
# using a linear approximation and the estimates of E[m(0,\theta)] and E[m(1,\theta)]
Tr <- (p_0-p_1)*(0.5 * (params$G_Cx + params$G))
net_cost_as_frac_gross <- C / params$G
moral_hazard_cost <- params$G - Tr - N
# wtp <- Tr + params$I
#
# mvpf_gov <- wtp / C
# mvpf_indiv <- (wtp + params$G * welfare_weight * (N / params$G)) / params$G
#
# mvpf_num_gov <- wtp
# mvpf_denom_gov <- C
# mvpf_num_indiv <- (wtp + params$G * welfare_weight * (N / params$G))
# mvpf_denom_indiv <- params$G
#
# mvpf_num <- params$gov_incidence * mvpf_num_gov + (1 - params$gov_incidence) * mvpf_num_indiv
# mvpf_denom <- params$gov_incidence * mvpf_denom_gov + (1 - params$gov_incidence) * mvpf_denom_indiv
#
# mvpf <- mvpf_num / mvpf_denom
#out <- list(mvpf = mvpf , mvpf_num = (mvpf_num / 12)/100 , mvpf_denom = (mvpf_denom / 12)/100, wtp = wtp , cost = C , N = N)
######################################################
# Scale all relevant values by government cost of
# Medicaid so it can be measured in terms of a single
# dollar spent on Medicaid.
######################################################
scaling_factor = C
wtp <- Tr + params$I + params$fudge
mvpf_gov <- (wtp/scaling_factor ) / (C/scaling_factor )
mvpf_indiv <- (wtp/scaling_factor + params$G/scaling_factor * welfare_weight * (N / params$G)) / (params$G/scaling_factor )
mvpf_num_gov <- wtp / scaling_factor
mvpf_denom_gov <- C /scaling_factor
mvpf_num_indiv <- (wtp/scaling_factor + params$G/scaling_factor * welfare_weight * (N / params$G))
mvpf_denom_indiv <- params$G/scaling_factor
mvpf_num <- params$gov_incidence * mvpf_num_gov + (1 - params$gov_incidence) * mvpf_num_indiv
mvpf_denom <- params$gov_incidence * mvpf_denom_gov + (1 - params$gov_incidence) * mvpf_denom_indiv
mvpf <- mvpf_num / mvpf_denom
out <- list(mvpf = mvpf , mvpf_num = mvpf_num , mvpf_denom = mvpf_denom, wtp = wtp , cost = C , N = N)
return(out)
}
simulate_subsidy <- function(params) {
takeup <- get_takeup(params, premium = params$plan_premium)
# takeup_deriv = 1/(get_takeup(params, premium = params$plan_premium+1) - get_takeup(params, premium = params$plan_premium))
# cost_reformed <- get_cost(params, premium = params$plan_premium)
# uncomp = fn_uncomp(cost = cost_reformed, uninsured_oop_share = params$uninsured_oop_share, phi = params$phi)
#
# mvpf_num = takeup +
# params$eta *
# (
# pmax(0,uncomp) /
# (-1 * takeup_deriv)
# )
# mvpf_denom = takeup +
# ((pmax(0,cost_reformed - params$gov_incidence * uncomp - params$plan_premium)) /
# (-1 * takeup_deriv))
# mvpf = mvpf_num / mvpf_denom
#
# Alternative Version
s_star <- get_takeup(params, premium = params$plan_premium)
ds_dpH <- get_takeup(params, premium = params$plan_premium)-get_takeup(params, premium = params$plan_premium+1)
C_H <- get_cost(params, premium = params$plan_premium)
p_H <- params$plan_premium
uncomp = fn_uncomp(cost = C_H, uninsured_oop_share = params$uninsured_oop_share, phi = params$phi)
welfare_weight <- params$v_i / params$v_j
mvpf_num <- s_star + welfare_weight * uncomp * ds_dpH
cost_of_new_enrollees <- ds_dpH * (C_H - params$gov_incidence * uncomp - p_H)
mvpf_denom <- s_star + cost_of_new_enrollees
mvpf<- mvpf_num / mvpf_denom
output <- list(
takeup = takeup,
# takeup_deriv = takeup_deriv,
C_H = C_H,
uncomp = uncomp,
mvpf_num = mvpf_num,
mvpf_denom = mvpf_denom,
mvpf = mvpf
)
return(output)
}
|
0853dd67327248b42904eac0f4b478575d4818be | ce9f7ef440ba0a08d07e7a181fead89fd5874615 | /featuresToBed.R | 4088f58dd21559904d764ce40e253f4965d9bb1b | [
"CC0-1.0"
] | permissive | karakulahg/DTExpress | 706c7ce768a616fa92ad34cfe4499f860c4d907f | b344f752dd71ec3cf7df29644bfc05a4a67eea48 | refs/heads/main | 2023-06-14T04:16:36.577983 | 2023-04-25T09:41:30 | 2023-04-25T09:41:30 | 200,231,047 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 732 | r | featuresToBed.R | args = commandArgs(trailingOnly = TRUE)
if (length(args) < 1) {
stop("At least one argument must be supplied (input file).n", call. = FALSE)
} else if (length(args) == 1) {
# default output file
args[2] = "output.bed"
}
options(stringsAsFactors = F)
#read.csv("GCF_000001735.4_TAIR10.1_feature_table.txt", sep = "\t") -> features
read.csv(args[1], sep = "\t") -> features
features.subset <- subset(features, X..feature == "gene")
which(features.subset$symbol == "") -> symbolMissing
features.subset$symbol[symbolMissing] <-
features.subset$locus_tag[symbolMissing]
features.bed <- features.subset[, c(7, 8, 9, 15, 16, 10)]
write.table(
features.bed,
args[2],
quote = F,
col.names = F,
row.names = F,
sep = "\t"
)
|
9e795760b7b95d970368689182bb6a68b82027c2 | 6840c1a88a8aa9bbe2723c9d424c3126751bca7f | /man/check_prior.Rd | 13599f097465632554f32e3965b2ba346af848ef | [] | no_license | brieuclehmann/multibergm | edd8170bc5dc7892254974d4aeb979f4c0fe5890 | a5f57abc8da185534f38eae9b84a0c58573931d1 | refs/heads/master | 2022-06-11T14:38:53.370619 | 2022-06-01T08:18:40 | 2022-06-01T08:18:40 | 206,797,419 | 3 | 0 | null | 2021-05-12T12:59:16 | 2019-09-06T13:15:28 | R | UTF-8 | R | false | true | 811 | rd | check_prior.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/set_priors.R
\name{check_prior}
\alias{check_prior}
\alias{check_prior_mean}
\alias{is_covmat}
\alias{check_prior_cov}
\alias{check_prior_scale}
\alias{check_prior_df}
\title{Check validity of multibergm prior}
\usage{
check_prior(prior, n_terms, n_groups)
check_prior_mean(x, n_terms)
is_covmat(x)
check_prior_cov(x, n_terms)
check_prior_scale(x, n_terms)
check_prior_df(x, n_terms)
}
\arguments{
\item{prior}{A list of explicit prior specifications.}
\item{n_terms}{Number of terms (summary statistics) in the exponential random
graph model}
\item{n_groups}{Number of distinct groups}
\item{x}{Prior mean or covariance to be checked}
}
\description{
Internal functions to check compatibility of the prior with the model.
}
|
1729bd0497be9add6ab34a0f7e9107414c6e47d0 | 7f72ac13d08fa64bfd8ac00f44784fef6060fec3 | /RGtk2/man/gtkFontButtonGetShowStyle.Rd | 112e19762307e992b097cffa28ee2ba0d06658e5 | [] | no_license | lawremi/RGtk2 | d2412ccedf2d2bc12888618b42486f7e9cceee43 | eb315232f75c3bed73bae9584510018293ba6b83 | refs/heads/master | 2023-03-05T01:13:14.484107 | 2023-02-25T15:19:06 | 2023-02-25T15:20:41 | 2,554,865 | 14 | 9 | null | 2023-02-06T21:28:56 | 2011-10-11T11:50:22 | R | UTF-8 | R | false | false | 452 | rd | gtkFontButtonGetShowStyle.Rd | \alias{gtkFontButtonGetShowStyle}
\name{gtkFontButtonGetShowStyle}
\title{gtkFontButtonGetShowStyle}
\description{Returns whether the name of the font style will be shown in the label.}
\usage{gtkFontButtonGetShowStyle(object)}
\arguments{\item{\verb{object}}{a \code{\link{GtkFontButton}}}}
\details{Since 2.4}
\value{[logical] whether the font style will be shown in the label.}
\author{Derived by RGtkGen from GTK+ documentation}
\keyword{internal}
|
086e77f7b14e6f82f64c3c0d2739e67dfba9e3c6 | 02f3c8b878eb6e8d2a99c547f251b3a4dd2805da | /man/grib_cube.Rd | 06a963303d59c88e146bea04350bf275ea69b9cc | [
"BSD-3-Clause"
] | permissive | nawendt/gribr | 86d4d7f228b12f4dd44653e8e06cdb1d42446d73 | 3d2a6f438a1b88c36cb5071906ab2a5ddbff1041 | refs/heads/main | 2023-07-19T15:06:39.424009 | 2023-07-10T14:13:44 | 2023-07-10T14:13:44 | 51,981,861 | 20 | 1 | BSD-3-Clause | 2023-04-03T14:22:37 | 2016-02-18T05:32:30 | R | UTF-8 | R | false | true | 1,621 | rd | grib_cube.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/grib_cube.R
\name{grib_cube}
\alias{grib_cube}
\title{Create 3D volume of a GRIB variable}
\usage{
grib_cube(gribObj, shortName, typeOfLevel, decreasing = FALSE)
}
\arguments{
\item{gribObj}{\code{GRIB} class object.}
\item{shortName}{The short name given in the GRIB file of the variable to
select.}
\item{typeOfLevel}{The vertical coordinate to use as given by the typeOfLevel
key in the GRIB file.}
\item{decreasing}{Parameter to tell the array's vertical coordinate to be
increasing or decreasing.}
}
\value{
Returns a three-dimenional array.
}
\description{
\code{grib_cube} creates a three-dimensional array from one variable along a
chosen vertical coordinate.
}
\details{
\code{grib_cube} is a wrapper function for \code{grib_select} to conveniently
create a three-dimensional cube. The user inputs a variable to search for and
the vertical coordinate to use when finding each level.
Because \code{grib_cube} uses \code{grib_select}, speed can become an issue.
This is meant as a convenience to "get the job done". If you want more speed,
it will always be better to know which message number you want, set up your
own loop, and use \code{grib_get_message} as that will avoid the overhead of
searching through the GRIB file.
}
\examples{
g <- grib_open(system.file("extdata", "lfpw.grib1", package = "gribr"))
cube <- grib_cube(g, 'u', 'isobaricInhPa', TRUE)
grib_close(g)
}
\seealso{
\code{\link{grib_get_message}} \code{\link{grib_list}}
\code{\link{grib_expand_grids}} \code{\link{grib_latlons}}
\code{\link{grib_select}}
}
|
94fec547bb361970a5b5c2177f32bf2eb9e725c8 | ae3173d4ffc3e1e9f45acd00772a30760ec2d187 | /mtDNAheatmap.R | 9b96ca648de0f19d2916c21d4c0e70fa3baec358 | [] | no_license | cmiciano/RNA-seq-FA18 | 41d854ae9fb8cf10cbaf40fbaca1521745088b81 | 2983653f50a60b82ef05e267e50c9fd2ff9eaa18 | refs/heads/master | 2022-02-21T19:57:32.081617 | 2019-10-04T18:31:58 | 2019-10-04T18:31:58 | 212,876,971 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,174 | r | mtDNAheatmap.R | library(gplots)
yb<-colorRampPalette(c("blue","white","red"))(100)
fpkmyeast <- read.delim("~/Documents/salk/fpkmyeast.txt")
#Change rownames of fpkm to standard gene symbol
names(fpkmyeast)[1]<-"Transcript"
fpkmname <- as.character(fpkmyeast$Annotation.Divergence)
fpkmsym<- sapply(fpkmname, function(x) strsplit(x,"|", fixed = T)[[1]][1])
rownames(fpkmyeast) <- fpkmsym
#Read in curated and high throughput
curatedgenes <- read.table("~/Downloads/mitochondrion_annotations_Manually Curated.txt", sep = "\t", skip = 1, header = T, comment.char = "!")
highthrugenes <- read.table("~/Downloads/mitochondrion_annotations_High-throughput.txt", sep = "\t", skip = 1, header = T, comment.char = "!")
#Write curated genes for IPA pathway analysis
transcur <- as.character(curatedgenes$Gene.Systematic.Name)
curinfpkm <- subset(fpkmyeast, fpkmyeast$Transcript %in% transcur)
curtransf <- log(curinfpkm[,9:14]+5) #genes by sample
#Remove genes that haven't changed between WT and KO
curvar <- curtransf[ apply(curtransf, 1, var, na.rm = TRUE) != 0 , ]
#Renamed curated col names for heatmap
comb2cur <- sapply(colnames(curvar), function(x) paste(strsplit(x,"_", fixed = T)[[1]][2],
strsplit(x,"_",fixed = T)[[1]][3],
sep = "_"
))
colnames(curvar) <- comb2cur
#############Curated heatmap ##############
pdf("Curated.pdf", width=7, height=7)
par(mar=c(2,2,2,2), cex=1.0)
heatmap.2(as.matrix(curvar), col=yb, scale="row", dendrogram = "row", labRow = "",
key=TRUE, symkey=FALSE, density.info="none", trace="none",
cexRow=0.3,lhei=c(0.20,0.70), lwid = c(0.25,0.5),
reorderfun=function(d,w) reorder(d, w, agglo.FUN=mean),
distfun=function(x) as.dist(1-cor(t(x))),
hclustfun=function(x) hclust(x, method="ward.D2"))
dev.off()
#####High throughput genes
transhighthru <- as.character(highthrugenes$Gene.Systematic.Name)
highinfpkm <- subset(fpkmyeast, fpkmyeast$Transcript %in% transhighthru)
hightransf <- log(highinfpkm[,9:14]+5) #genes by sample
#Remove genes that haven't changed between WT and KO
highvar <- hightransf[ apply(hightransf, 1, var, na.rm = TRUE) != 0 , ]
#Renamed curated col names for heatmap
comb2high <- sapply(colnames(highvar), function(x) paste(strsplit(x,"_", fixed = T)[[1]][2],
strsplit(x,"_",fixed = T)[[1]][3],
sep = "_"
))
colnames(highvar) <- comb2high
#############High throughput heatmap ##############
pdf("Highthroughput.pdf", width=7, height=7)
par(mar=c(2,2,2,2), cex=1.0)
heatmap.2(as.matrix(highvar), col=yb, scale="row", dendrogram = "row", labRow = "",
key=TRUE, symkey=FALSE, density.info="none", trace="none",
cexRow=0.3,lhei=c(0.20,0.70), lwid = c(0.25,0.5),
reorderfun=function(d,w) reorder(d, w, agglo.FUN=mean),
distfun=function(x) as.dist(1-cor(t(x))),
hclustfun=function(x) hclust(x, method="ward.D2"))
dev.off()
|
792936326bd5a3c84fb035aaa9ce78d9b4a591c7 | 091518481f7e2d022c98ea16a45eb3491953671e | /R/assignECMWFTDataToMonitor.R | e0d7f091ebeae2771ca94f765d6dd2319ae79af4 | [
"MIT"
] | permissive | stevenjoelbrey/SmokeInTheCity | 87625532edf1721a7deb5ab183d159b44d964e36 | 481a4d2b70fe380e14fe75e8e4fdecbf6b41001f | refs/heads/master | 2021-01-10T03:28:58.049008 | 2017-01-24T18:48:40 | 2017-01-24T18:48:40 | 50,605,479 | 0 | 2 | null | null | null | null | UTF-8 | R | false | false | 10,194 | r | assignECMWFTDataToMonitor.R | # assignReanalysisTDataToMonitor.R
# This script assigns 00Z ecmwf reanalysis data to monitors
# 2m - ecmwf data was downloaded
# http://apps.ecmwf.int/datasets/data/interim-full-daily/levtype=sfc/
library(fields)
library(maps)
library(geosphere)
################################################################################
# Assigns gridded temperature data to PM monitors passed here
################################################################################
getStationWeatherData <- function(workSpaceData=workSpaceData,
sanityCheck=FALSE){
# Load the ecmwf Temperature data
load("ecmwfData/t2m.RData")
gridLon <- t2m[["gridLon"]]
gridLat <- t2m[["gridLat"]]
ecmwfTime <- t2m[["ecmwfTime"]]
t2m <- t2m[["t2m"]] # this will erase the list
# Plot the data on a map, with monitor locations, to make sure everything
# lines up and makes sense
map('world')
image.plot(gridLon, gridLat,t2m[,,1], add=TRUE)
map('world', add=TRUE)
# Load the monitor locations to plot with this data, do they overlap?
lon <- workSpaceData[['lon']]
lat <- workSpaceData[['lat']]
points(lon,lat, pch=19, cex=0.5)
image.plot(gridLon, gridLat,t2m[,,1])
map("world", xlim=c(min(gridLon), max(gridLon)),
ylim=c(min(gridLat), max(gridLat)), add=TRUE)
map('state', add=TRUE)
points(lon,lat, pch=19)
title(paste("showing the ecmwf data grid for ", ecmwfTime[1]))
# Load the cloud cover data (exact same grid, same lon lat and time)
load("ecmwfData/tcc.RData")
tcc <- tcc[["tcc"]] # this will erase the list
##############################################################################
# NOTE ON ECMWFTime:
##############################################################################
# This 00Z time is 6:00 PM MDT 8PM EDT and 5 PM PDT of the PREVIOUS day.
# 00Z is the date of the day just starting, still previous date in US at this
# hour.
# AKA: 00Z is 7 hours ahead of time here in Fort Collins MST.
# http://scc-ares-races.org/generalinfo/utcchart.html
# http://www.timeanddate.com/worldclock/timezone/zulu
# So the date in North America is not the same as that of 00Z because they are
# 7 hours ahead and 00Z has the date of the day just starting.
# Take a day away to make this time more useful to the PM and ozone data
# which is in US time and date. That daily data will most meaningfully be
# matched with evenning temperature data of the 00Z data.
# NOTE: Evening temperature snapshot is not a perfect way to determine
# NOTE: ozone relevant ozone production but should be decent at determining
# NOTE: what days are generally warmer and cooler than others.
secondsInDay <- 24 * 60^2
ecmwfTimeModified <- ecmwfTime - secondsInDay
# Load requested data packet
Hybrid_mdf <- workSpaceData[["Hybrid_mdf"]]
PM_df <- workSpaceData[["PM_df"]]
lon <- workSpaceData[["lon"]]
lat <- workSpaceData[["lat"]]
# copycat will be replaced with T and cloud fraction values from ecmwf
T_df <- PM_df # temperature dataframe
CC_df <- PM_df # cloud cover dataframe
nMonitors <- dim(PM_df)[2] # columns of PM_df are PM Monitors at stations
# These dates are made from local times in the US. which are a day behind 00Z
# I am placing them in UTC time zone so that it will be possible to match
# the modified ecmwf time series. In reality there is no actual time of day
# associated with daily PM and ozone data.
measuredDataDate <- rownames(T_df)
PMTime <- as.POSIXct(measuredDataDate, tz="UTC") # SO THAT WE CAN MATCH!
# NOTE: The time zone of the loaded ecmwf time is UTC.
# Loop through monitors and assign the appropriate grid of ecmwf time series
# data
for (i in 1:nMonitors){
# Clear out the dataframe of PM values resulting from copying
T_df[,i] <- NA
CC_df[,i] <- NA
# Find the grid box the monitor falls inside of, create a mask
lonIndex <- which.min(abs(gridLon - lon[i]))
latIndex <- which.min(abs(gridLat - lat[i]))
# Compute the haversine distance of monitor to gridpoint center chosen
meterPerkm <- 1000
glon <- gridLon[lonIndex]
glat <- gridLat[latIndex]
greatCircleDistance <- distHaversine(c(glon,glat), c(lon[i], lat[i]))/meterPerkm
#print(greatCircleDistance)
# The maximum dy of this ecmwf data should be the hypotenouse of the
# A=dy/2, B=dy/2 of ecmwf data (dy=0.75 degrees). C = (111.3195/2km^2 + 111.3195/2^2)^.5
# so C=78.71477km
maxDistanceAccepted <- 64 #km
if(greatCircleDistance > maxDistanceAccepted){
stop("You are not pulling the correct temperature data for this monitor.")
}
# Plot the grid center and monitor location, contributes to general sanity
points(lon[i],lat[i], col="white", pch=19)
points(gridLon[lonIndex], gridLat[latIndex], col="pink", pch=19)
points(gridLon[lonIndex], gridLat[latIndex], col="pink", pch=3)
# Exstract monitors temperature array
temp <- t2m[lonIndex, latIndex,]
# Convert to degrees C
temp <- temp - 273.15 # 0C == 273.15 K
# Now convert the temperature from C to f
temp <- temp * 9/5 + 32.0
# Exstract monitors cloud cover fraction array
CC <- tcc[lonIndex, latIndex,]
# Where in ecmwfTimeModified do PMTimes land?
matchecmwfTimeToPM <- match(PMTime, ecmwfTimeModified)
T_df[,i] <- temp[matchecmwfTimeToPM]
CC_df[,i] <- CC[matchecmwfTimeToPM]
# Commented date assigment in lines below is the most effective way to make
# sure that you are assigning the correct dates temperature data using
# the match() function. This has caused major headaches before...
##T_df[,i] <- as.character(ecmwfTimeModified[matchecmwfTimeToPM])
##CC_df[,i] <- as.character(ecmwfTimeModified[matchecmwfTimeToPM])
} # End of for loop looping through PM/ozone monitors
# Place new variables into the workspace
workSpaceData[["T_df"]] <- T_df
workSpaceData[["CC_df"]] <- CC_df
print("assigned temperature and cloud data based on ecmwf grid")
return(workSpaceData)
}
# ################################################################################
# # Now use this ecmwf temperature data to create a clear T mask that ensures
# # smoke-free days are warmer than smoke-impacted days.
# ################################################################################
# createSmokeFreeTMask <- function(workSpaceData,
# TSdFactor = 1,
# applySkyMask=FALSE,
# maxCloudCoverPercent=10){
#
# # Get the required data
# T_df <- workSpaceData[["T_df"]]
# smokeImpactMask <- workSpaceData[["smokeImpactMask"]]
#
# # set dimensions for new temperature mask
# nMonitor <- dim(T_df)[2]
# meanSmokedT <- rep(NA, nMonitor)
# sdSmokedT <- rep(NA, nMonitor)
# smokeFreeMask <- smokeImpactMask # Copying for proper dimensions and labels
#
# # Loop through each monitor, figuring out the temperature threshold based
# # on arguments given to this function
# for (i in 1:nMonitor){
#
# # clear out smokeImpactMask data
# smokeFreeMask[,i] <- FALSE # Assume FALSE until proven otherwise
#
# # Which rows are smoke-impacted based on work so far?
# smokedRows <- smokeImpactMask[,i]
# smokedDaysT <- as.numeric(T_df[smokedRows,i])
#
# # Get the statistics on the smoke-impacted temperatures
# meanSmokedT[i] <- mean(smokedDaysT, na.rm=TRUE)
# sdSmokedT[i] <- sd(smokedDaysT, na.rm=TRUE)
#
# # Figure out where the temperature is greater than smoky day average
# TThresh <- meanSmokedT[i] + sdSmokedT[i] * TSdFactor
# TCuttoffMask <- T_df[,i] >= TThresh
#
#
# if(is.nan(TThresh) & sum(smokedRows)==0){
# # There are no smoke impacted days, so all rows are smoke free.
# # We know this because TThresh is not a number and there are zero smoked
# # rows.
# smokeFreeRows <- rep(TRUE, length(smokedRows))
# } else {
# # There are smoke impacted days so we need to choose smoke-free carefully
# # Also, we want to be sure that these warm days are not also smoke-impacted!
# # NOTE: smokedRows == TRUE where smoke-impacted. Use ! to change those to
# # NOTE: FALSE and smoke-free days to TRUE
# smokeFreeRows <- !smokedRows & TCuttoffMask
#
# # TODO: Could add PM mask as well to ensure that clear days are not high
# # TODO: PM days. Require PM measurement for smokeFreeDays
#
# }
# # Store the smokeFreeRows (days) information in TMask
# smokeFreeMask[,i] <- smokeFreeRows
#
# }
#
# # Include the smokeFreeMask in the workspace data
# workSpaceData[["smokeFreeMask"]] <- smokeFreeMask
# workSpaceData[["meanSmokedT"]] <- meanSmokedT
# workSpaceData[["sdSmokedT"]] <- sdSmokedT
#
# # For testing purposes give this information back to console
# print(paste("The sum of smokeFreeMask after T-Control is:",
# sum(smokeFreeMask,na.rm=TRUE)))
#
#
# # Now apply the skycondition mask if desired
# if(applySkyMask){
#
# # Get the Cloud Cover Dataframe
# CC_df <- workSpaceData[["CC_df"]] * 100 # to make %
#
# # Where are the skies more clear than specified %?
# cloudFreeMask <- CC_df <= maxCloudCoverPercent
#
# # Add this cloudMask to the workspace
# workSpaceData[["cloudFreeMask"]] <- cloudFreeMask
#
# # Modify smokeFreeMask based on this new cloud condition
# smokeFreeMaskNew <- smokeFreeMask & cloudFreeMask
#
# # Overwrite the original smokeFreeMask
# workSpaceData[["smokeFreeMask"]] <- smokeFreeMaskNew
#
#
# print(paste("The sum of smokeFreeMask after sky-control is:",
# sum(smokeFreeMaskNew,na.rm=TRUE)))
# print("If the later is not small than the former, you have a problem.")
# }
#
# # Return the appended workSpaceData
# return(workSpaceData)
#
#
# }
|
7b6c98a65a6f7db03608d035b4b91063e086ab54 | 109734b597c2d760725a1a050174a5d11b3c1a9b | /man/Extract.anylist.Rd | ebeb85fb7fc71c3890426e92727c8354d4e15828 | [] | no_license | rubak/spatstat | c293e16b17cfeba3e1a24cd971b313c47ad89906 | 93e54a8fd8276c9a17123466638c271a8690d12c | refs/heads/master | 2020-12-07T00:54:32.178710 | 2020-11-06T22:51:20 | 2020-11-06T22:51:20 | 44,497,738 | 2 | 0 | null | 2020-11-06T22:51:21 | 2015-10-18T21:40:26 | R | UTF-8 | R | false | false | 1,206 | rd | Extract.anylist.Rd | \name{Extract.anylist}
\alias{[.anylist}
\alias{[<-.anylist}
\title{Extract or Replace Subset of a List of Things}
\description{
Extract or replace a subset of a list of things.
}
\usage{
\method{[}{anylist}(x, i, \dots)
\method{[}{anylist}(x, i) <- value
}
\arguments{
\item{x}{
An object of class \code{"anylist"} representing a list of things.
}
\item{i}{
Subset index. Any valid subset index in the usual \R sense.
}
\item{value}{
Replacement value for the subset.
}
\item{\dots}{Ignored.}
}
\value{
Another object of class \code{"anylist"}.
}
\details{
These are the methods for extracting and replacing subsets
for the class \code{"anylist"}.
The argument \code{x} should be an object of class \code{"anylist"}
representing a list of things. See \code{\link{anylist}}.
The method replaces a designated
subset of \code{x}, and returns an object of class \code{"anylist"}.
}
\seealso{
\code{\link{anylist}},
\code{\link{plot.anylist}},
\code{\link{summary.anylist}}
}
\examples{
x <- anylist(A=runif(10), B=runif(10), C=runif(10))
x[1] <- list(A=rnorm(10))
}
\author{
\spatstatAuthors
}
\keyword{spatial}
\keyword{list}
\keyword{manip}
|
052ad634c60461990965f8ea791d9f2cc29572a3 | ae38e72efce94388334551b1864c297a3824ec3a | /man/create_airly_api_response.Rd | 687d6da73408e67cad860cf65460864fcda16a47 | [] | no_license | cran/aiRly | dde7a8a722247b69fd3ad1b5b4d08e0d45614692 | a950aff2eab1389e7424dacee07e6a499746e77c | refs/heads/master | 2021-04-05T11:17:24.630452 | 2020-03-19T13:00:02 | 2020-03-19T13:00:02 | 248,550,766 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 697 | rd | create_airly_api_response.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/airly_api_response.R
\name{create_airly_api_response}
\alias{create_airly_api_response}
\title{Creates an object representing a response from the
Airly API. Also every API call return information about current limits
What is used to assign variables in pkg.env}
\usage{
create_airly_api_response(response)
}
\arguments{
\item{response}{response object}
}
\value{
object representing a response from the
Airly API
}
\description{
Creates an object representing a response from the
Airly API. Also every API call return information about current limits
What is used to assign variables in pkg.env
}
|
b8940ad2b088b249bce9bce1105ea1cc936b35fa | f7114c3a1ee36fee6fefebeb468c7ad0131cac46 | /fire.pcvalues.R | 2a9eff0dab1e5a2b0844a09d8e30462b9eaac203 | [] | no_license | HaireLab/Fuego-en-la-Frontera | 6058639da782349536a98723b41e3d6a8b2a37f4 | 15b34fef4779c549b9ca86475e1a59c349670456 | refs/heads/master | 2023-09-03T12:39:57.239902 | 2021-10-27T13:56:09 | 2021-10-27T13:56:09 | 279,133,955 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,293 | r | fire.pcvalues.R | ##----------------------------------------------------------------
##
## fire.polygon.pcvalues.R
##
## Purpose: append princiipal component values (mean within fire perimeter)
## to fire polygon shp to use in analysis of high severity metrics
##
## Author: S. Haire, @HaireLab
##
##
## Date: 3 july 2020
##
##--------------------------------------------------------------
##
## Notes:
## Before running this script, download the fire perimeters
## < https://doi.org/10.5066/P9BB5TIO >
##
## PC1 and PC2 rasters are available in the respository data folder
##
##---------------------------------------------------------------
library(raster)
library(rgdal)
library(landscapemetrics)
library(plyr)
library(dplyr)
library(geosphere)
## Read in the data and project the fire perimeter polygons to match the principal component layers
## paths to input data
perimpath<-'./data/sp'# fire perimeters...put the shp with new attributes here too
pcpath<-'./data/PCA/' ## PCA rasters
## data
## pc's have bioclim data projection
bioclim.prj<-"+proj=lcc +lat_1=49 +lat_2=77 +lat_0=0 +lon_0=-95 +x_0=0 +y_0=0 +datum=WGS84 +units=m +no_defs" # from metadata lambert conformal conic
## read in pc's and assign prj
pc1<-raster(paste(pcpath, "PC1b.rot.tif", sep="")); crs(pc1)<-bioclim.prj
pc2<-raster(paste(pcpath, "PC2b.rot.tif", sep="")); crs(pc2)<-bioclim.prj
## read in perimeters/sp polys and project the perimeters to match the pc's
perims<-readOGR(perimpath, "Sky_Island_Fire_Polys_1985_2017")
perims.lcc<-spTransform(perims, bioclim.prj)
## Extract pc 1 & pc2 values and output the mean w/in polygon (fire perimeter). Save appended shp.
## stack the pc's and extract values within the polygons
s<-stack(pc1,pc2)
pc.ex<-extract(s, perims.lcc, method="bilinear",df=TRUE)
## calulate the mean values within the fire perimeters
mean.pc<-ddply(pc.ex,~ID, summarise, mean.pc1 = mean(PC1b.rot), mean.pc2=mean(PC2b.rot))
## add pc mean values to the spatial polygons
perims$pc1<-mean.pc[,2]; perims$pc2<-mean.pc[,3]
perims<-perims[,c(1:5,15,16)] ## just save the year, name & id, country, sky island and pc values
## save the perim shp
writeOGR(perims, "./data/sp", "fire.polys.pcvalues", driver="ESRI Shapefile", overwrite=TRUE)
|
36a3356a3fb5056285f0c46868f1d82e88c68f2b | 7a031aad2eba871dd062de1b5a1b790c5cb64ea5 | /R/fwiGrid.R | 6de984e871556f4de611e2061fcdf2aa2c80102a | [] | no_license | SantanderMetGroup/fireDanger | 7554a254dbbbe51f813f012beb184b95ef4815ee | a2cbeb780184b695d58f37565258c423f2249931 | refs/heads/master | 2020-12-25T03:00:52.506116 | 2018-10-04T15:04:21 | 2018-10-04T15:04:21 | 61,045,602 | 1 | 1 | null | 2016-06-13T15:02:27 | 2016-06-13T15:02:27 | null | UTF-8 | R | false | false | 14,069 | r | fwiGrid.R | #' @title Fire Weather Index applied to multigrids
#'
#' @description Implementation of the Canadian Fire Weather Index System for multigrids
#'
#' @param multigrid containing Tm (temperature records in deg. Celsius); H (relative humidity records in \%);
#' r (last 24-h accumulated precipitation in mm); W (wind velocity records in Km/h). See details.
#' @param mask Optional. Binary grid (0 and 1, 0 for sea areas) with \code{dimensions} attribute \code{c("lat", "lon")}.
#' @param what Character string. What component of the FWI system is computed?. Default to \code{"FWI"}.
#' Note that unlike \code{\link{fwi1D}}, only one single component choice is possible in \code{fwiGrid}.
#' See \code{\link{fwi1D}} for details and possible values.
#' @param nlat.chunks For an efficient memory usage, the computation of FWI can be split into
#' latitudinal bands (chunks) sepparately. The number of chunks is controlled here.
#' Default to \code{NULL} (i.e., no chunking applied).
#' @param restart.annual Logical. Should the calculation be restarted at the beginning of every year?
#' If the grid encompasses just one season (e.g. JJA...), this is the recommended option. Default to \code{TRUE}.
#' @param ... Further arguments passed to \code{\link{fwi1D}}.
#' @template templateParallelParams
#'
#' @return A grid corresponding to the variable defined in argument \code{what}.
#'
#' @details
#'
#' \strong{Variable names}
#'
#' The variables composing the input multigrid are expected to have standard names, as defined by the dictionary
#' (their names are stored in the \code{multigrid$Variable$varName} component).
#' These are: \code{"tas"} for temperature, \code{"tp"} for precipitation, \code{"wss"} for windspeed. In the case of relative humidity,
#' either \code{"hurs"} or \code{"hursmin"} are accepted, the latter in case of FWI calculations according to the \dQuote{proxy} version
#' described in Bedia \emph{et al} 2014.
#'
#' Note that the order of the variables within the multigrid is not relevant. These are indexed by variable names.
#'
#' \strong{Landmask definition}
#'
#' The use of a landsmask is highly recommended when using RCM/GCM data becasue (i) there is no point in calculating
#' FWI over sea areas and (ii) for computational efficiency, as sea gridboxes will be skipped before calculations.
#'
#' The landmask must be a grid spatially consistent with the input multigrid. You can use
#' \code{\link[transformeR]{interpGrid}} in combination with the \code{getGrid} method to ensure this condition is fulfilled. . Its \code{data} component can be either a 2D or 3D array with the \code{dimensions}
#' attribute \code{c("lat","lon")} or \code{c("time","lat","lon")} respectively. In the latter case, the length of the time
#' dimension should be 1. Note that values of 0 correspond to sea areas (thus discarded for FWI calculation), being land areas any other
#' values different from 0 (tipically 1 or 100, corresponding to the land/sea area fraction).
#'
#' \strong{Latitudinal chunking}
#'
#' Splitting the calculation in latitudinal chunks is highly advisable, and absolutely necessary when
#' considering large spatial domains, otherwise running out of memory during the computation. The number
#' of latitudinal chunks need to be estimated on a case-by-case basis, but in general there are no restrictions in the
#' number of chunks that can be used, as long as it does not exceed the number of actual latitudes
#' in the model grid.
#'
#' @template templateParallel
#'
#' @references
#' \itemize{
#' \item Lawson, B.D. & Armitage, O.B., 2008. Weather guide for the Canadian Forest Fire Danger Rating System. Northern Forestry Centre, Edmonton (Canada).
#'
#' \item van Wagner, C.E., 1987. Development and structure of the Canadian Forest Fire Weather Index (Forestry Tech. Rep. No. 35). Canadian Forestry Service, Ottawa, Canada.
#'
#' \item van Wagner, C.E., Pickett, T.L., 1985. Equations and FORTRAN program for the Canadian forest fire weather index system (Forestry Tech. Rep. No. 33). Canadian Forestry Service, Ottawa, Canada.
#' }
#'
#' @author J. Bedia \& M. Iturbide
#'
#' @export
#'
#' @importFrom abind abind asub
#' @importFrom parallel parLapply splitIndices
#' @importFrom transformeR redim getDim getShape parallelCheck getYearsAsINDEX subsetGrid array3Dto2Dmat mat2Dto3Darray
fwiGrid <- function(multigrid,
mask = NULL,
what = "FWI",
nlat.chunks = NULL,
restart.annual = TRUE,
parallel = FALSE,
ncores = NULL,
max.ncores = 16,
...) {
what <- match.arg(what,
choices = c("FFMC", "DMC", "DC", "ISI", "BUI", "FWI", "DSR"),
several.ok = FALSE)
fwi1D.opt.args <- list(...)
months <- as.integer(substr(multigrid$Dates[[1]]$start, start = 6, stop = 7))
fwi1D.opt.args <- c(fwi1D.opt.args, list("what" = what))
if ("lat" %in% names(fwi1D.opt.args)) {
message("NOTE: argument 'lat' will be overriden by the actual latitude of gridboxes\n(See help of fwi1D for details).")
fwi1D.opt.args[-grep("lat", names(names(fwi1D.opt.args)))]
}
varnames <- multigrid$Variable$varName
ycoords <- multigrid$xyCoords$y
xcoords <- multigrid$xyCoords$x
co <- expand.grid(ycoords, xcoords)[2:1]
dimNames.mg <- getDim(multigrid)
n.mem <- tryCatch(getShape(multigrid, "member"),
error = function(er) 1L)
## if (n.mem == 1L) multigrid <- redim(multigrid)
yrsindex <- getYearsAsINDEX(multigrid)
nyears <- length(unique(yrsindex))
if (!is.null(mask)) {
dimNames.mask <- getDim(mask)
}
if (is.null(nlat.chunks)) {
nlat.chunks <- 1L
}
if (nlat.chunks <= 0) {
nlat.chunks <- 1L
message("Invalid 'nlat.chunks' argument value. It was ignored")
}
idx.chunk.list <- parallel::splitIndices(length(ycoords), nlat.chunks)
if (any(vapply(idx.chunk.list, FUN = "length", FUN.VALUE = numeric(1)) < 2L)) {
stop("Too many latitudinal chunks. Reduce the value of 'nlat.chunks' to a maximum of ", length(ycoords) %/% 2)
}
message("[", Sys.time(), "] Calculating ", what)
aux.list <- lapply(1:nlat.chunks, function(k) {
## Lat chunking
ind.lat <- idx.chunk.list[[k]]
dims <- grep("lat", dimNames.mg)
multigrid_chunk <- multigrid
mask_chunk <- mask
if (nlat.chunks > 1) {
aux <- asub(multigrid$Data, idx = ind.lat, dims = dims)
attr(aux, "dimensions") <- dimNames.mg
multigrid_chunk$Data <- aux
multigrid_chunk$xyCoords$y <- multigrid$xyCoords$y[ind.lat]
## Mask chunking
if (!is.null(mask)) {
aux <- asub(mask$Data, idx = ind.lat, dims = grep("lat", dimNames.mask))
attr(aux, "dimensions") <- dimNames.mask
mask_chunk$Data <- aux
mask_chunk$xyCoords$y <- mask_chunk$xyCoords$y[ind.lat]
}
aux <- NULL
}
## Multigrid subsetting
Tm1 <- subsetGrid(multigrid_chunk, var = grep("tas", varnames, value = TRUE))
Tm1 <- redim(Tm1, drop = FALSE)
H1 <- subsetGrid(multigrid_chunk, var = grep("hurs", varnames, value = TRUE))
H1 <- redim(H1, drop = FALSE)
r1 <- subsetGrid(multigrid_chunk, var = "tp")
r1 <- redim(r1, drop = FALSE)
W1 <- subsetGrid(multigrid_chunk, var = "wss")
W1 <- redim(W1, drop = FALSE)
multigrid_chunk <- NULL
## Parallel checks
parallel.pars <- parallelCheck(parallel, max.ncores, ncores)
if (n.mem < 2 && isTRUE(parallel.pars$hasparallel)) {
parallel.pars$hasparallel <- FALSE
message("NOTE: parallel computing only applies to multimember grids. The option was ignored")
}
if (parallel.pars$hasparallel) {
apply_fun <- function(...) {
parallel::parLapply(cl = parallel.pars$cl, ...)
}
on.exit(parallel::stopCluster(parallel.pars$cl))
} else {
apply_fun <- lapply
}
## Landmask
if (!is.null(mask)) {
if (!("^time" %in% dimNames.mask)) {
aux <- unname(abind(mask_chunk$Data, along = 0L))
attr(aux, "dimensions") <- c("time", dimNames.mask)
} else {
aux <- mask_chunk$Data
}
msk <- array3Dto2Dmat(aux)[1,]
ind <- which(msk > 0)
msk <- NULL
} else {
aux <- suppressWarnings(subsetGrid(Tm1, members = 1))$Data
aux <- array3Dto2Dmat(aux)
ind <- which(apply(aux, MARGIN = 2, FUN = function(y) !all(is.na(y))))
}
aux <- NULL
## FWI calculation
message("[", Sys.time(), "] Processing chunk ", k, " out of ", nlat.chunks, "...")
a <- apply_fun(1:n.mem, function(x) {
Tm2 <- array3Dto2Dmat(subsetGrid(Tm1, members = x)$Data)
H2 <- array3Dto2Dmat(subsetGrid(H1, members = x)$Data)
r2 <- array3Dto2Dmat(subsetGrid(r1, members = x)$Data)
W2 <- array3Dto2Dmat(subsetGrid(W1, members = x)$Data)
b <- array(dim = dim(Tm2))
if (length(ind) > 0) {
for (i in 1:length(ind)) {
if (isTRUE(restart.annual)) {
## Iterate over years
annual.list <- lapply(1:nyears, function(j) {
idx <- which(yrsindex == unique(yrsindex)[j])
arg.list2 <- list("months" = months[idx],
"Tm" = Tm2[idx,ind[i]],
"H" = H2[idx,ind[i]],
"r" = r2[idx,ind[i]],
"W" = W2[idx,ind[i]],
"lat" = co[ind[i],2])
arg.list <- c(fwi1D.opt.args, arg.list2)
z <- tryCatch({suppressWarnings(drop(do.call("fwi1D",
args = arg.list)))},
error = function(err) {rep(NA, length(idx))})
## if (length(z) < length(idx)) z <- rep(NA, length(idx))
return(z)
})
b[,ind[i]] <- do.call("c", annual.list)
} else {
arg.list2 <- list("months" = months,
"Tm" = Tm2[,ind[i]],
"H" = H2[,ind[i]],
"r" = r2[,ind[i]],
"W" = W2[,ind[i]],
"lat" = co[ind[i],2])
arg.list <- c(fwi1D.opt.args, arg.list2)
z <- tryCatch({suppressWarnings(drop(do.call("fwi1D",
args = arg.list)))},
error = function(err) {rep(NA, length(months))})
## if (length(z) < nrow(b)) z <- rep(NA, nrow(b))
b[,ind[i]] <- z
}
}
out <- mat2Dto3Darray(mat2D = b,
x = Tm1$xyCoords$x,
y = Tm1$xyCoords$y)
return(out)
}
})
Tm1 <- r1 <- H1 <- W1 <- NULL
unname(do.call("abind", list(a, along = 0)))
})
message("[", Sys.time(), "] Done.")
## Final grid and metadata
fwigrid <- redim(subsetGrid(multigrid, var = varnames[1]), drop = FALSE)
multigrid <- NULL
dimNames <- getDim(fwigrid)
fwigrid$Data <- unname(do.call("abind", c(aux.list, along = grep("lat", dimNames))))
aux.list <- NULL
attr(fwigrid$Data, "dimensions") <- dimNames
fwigrid$Variable <- list()
fwigrid$Variable$varName <- what
fwigrid$Variable$level <- NA
desc <- switch(what,
"FFMC" = "Fine Fuel Moisture Code",
"DMC" = "Duff Moisture Code",
"DC" = "Drought Code",
"ISI" = "Initial Spread Index",
"BUI" = "Builtup Index",
"FWI" = "Fire Weather Index",
"DSR" = "Daily Severity Rating")
attr(fwigrid$Variable, "use_dictionary") <- FALSE
attr(fwigrid$Variable, "description") <- desc
attr(fwigrid$Variable, "units") <- "adimensional"
attr(fwigrid$Variable, "longname") <- paste(desc, "component of the Canadian Fire Weather Index System")
attr(fwigrid, "calculation") <- "Calculated with the fireDanger package (https://github.com/SantanderMetGroup/fireDanger)"
return(fwigrid)
}
|
aa50d822c149a368b699b8800e38a7fe889ba0bd | b75ec7c515a9fa9f7f988577d20e602e9c839240 | /letters-learn-then-test/modeling/run-models.R | 7f61e05ebef6b6a5031014134a4e5a5da9b2988a | [] | no_license | jodeleeuw/dynamic-constraints | 1c6af747551945db71c1fe9fa83681cc687b2ea4 | ffbde719a8df4e71713b3189851748f0748f1720 | refs/heads/master | 2021-01-21T04:47:19.711750 | 2016-06-30T18:31:42 | 2016-06-30T18:31:42 | 49,590,213 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 5,146 | r | run-models.R | #### package requirements ####
require(V8) # to run JS code
require(jsonlite) # to parse JSON output from JavaScript
require(plyr) # data storage manipulation
#### load script that can generate sequences ####
source('modeling/sequence-generators.R')
#### set parameters for model testing ####
reps_per_condition <- 1000
reps_per_item_in_seq <- 25 # behavioral experiment was 25
# PARSER parameters
PARSER_maximum_percept_size <- 3
PARSER_initial_lexicon_weight <- 1
PARSER_shaping_weight_threshold <- 1
PARSER_reinforcement_rate <- 0.5
PARSER_forgetting_rate <- 0.05
PARSER_interference_rate <- 0.005
PARSER_logging <- "false"
# MDLChunker parameters
MDL_perceptual_span <- 25
MDL_memory_span <- 150
MDL_logging <- "false"
# TRACX parameters
#### functions to run each model once ####
run_PARSER <- function(model, seq, condition) {
if(condition=='seeded'){
seed_str <- ",[{word:['D','E','F'], weight: 100.0},{word:['G','H','I'], weight: 100.0},{word:['J','K','L'], weight: 100.0}]"
} else {
seed_str <- ""
}
ct <- new_context();
ct$source(model)
ct$eval(paste0("PARSER.setup('",seq,"',{",
"maximum_percept_size:",PARSER_maximum_percept_size,",",
"initial_lexicon_weight:",PARSER_initial_lexicon_weight,",",
"shaping_weight_threshold:",PARSER_shaping_weight_threshold,",",
"reinforcement_rate:",PARSER_reinforcement_rate,",",
"forgetting_rate:",PARSER_forgetting_rate,",",
"interference_rate:",PARSER_interference_rate,",",
"logging:",PARSER_logging,
"}",seed_str,");"))
ct$eval("PARSER.run()")
#lexicon <- fromJSON(ct$eval("JSON.stringify(PARSER.getLexicon())"))
base_weight <- as.numeric(ct$eval("PARSER.getWordStrength('A')"))+as.numeric(ct$eval("PARSER.getWordStrength('B')"))+as.numeric(ct$eval("PARSER.getWordStrength('C')"))
target_weight <- as.numeric(ct$eval("PARSER.getWordStrength('ABC')")) + base_weight
foil_weight <- base_weight
# filter weights below 1.0
if(target_weight < 1.0){ target_weight <- 0 }
if(foil_weight < 1.0){ foil_weight <- 0 }
return(list(model="PARSER", condition=condition, target=target_weight, foil=foil_weight))
}
run_MDLChunker <- function(model, seq, condition) {
if(condition=='seeded'){
seq <- paste0(seed_sequence(100), seq)
}
ct <- new_context();
ct$source(model)
ct$eval(paste0("MDLChunker.setup('",seq,"',{",
"memory_span:",MDL_memory_span,",",
"perceptual_span:",MDL_perceptual_span,",",
"logging:",MDL_logging,
"});"))
ct$eval("MDLChunker.run()")
#lexicon <- fromJSON(ct$eval("JSON.stringify(MDLChunker.getLexicon())"))
target_weight <- as.numeric(ct$eval("MDLChunker.getCodeLengthForString('ABC')"))
foil_weight <- as.numeric(ct$eval("MDLChunker.getCodeLengthForString('ACB')")) +
as.numeric(ct$eval("MDLChunker.getCodeLengthForString('BAC')")) +
as.numeric(ct$eval("MDLChunker.getCodeLengthForString('BCA')")) +
as.numeric(ct$eval("MDLChunker.getCodeLengthForString('CBA')")) +
as.numeric(ct$eval("MDLChunker.getCodeLengthForString('CAB')"))
foil_weight <- foil_weight / 5
#memory <- fromJSON(ct$eval("MDLChunker.getMemory()"))
#print(memory)
return(list(model="MDLChunker",condition=condition,target=target_weight, foil=foil_weight))
}
# run_TRACX <- function(model, seq, condition) {
#
# # if(condition=='seeded'){
# # seq.prepend <-
# # }
#
# ct <- new_context();
# ct$source('modeling/models/TRACX-dependencies/sylvester.js')
# ct$source('modeling/models/TRACX-dependencies/seedrandom-min.js')
# ct$source(model)
# ct$eval(paste0("TRACX.setTrainingData('",seq,"');"))
# ct$eval('TRACX.getInputEncodings();')
# ct$eval('TRACX.setTestData({Words:"ABC", PartWords: "", NonWords: "DHL"})')
# ct$eval('TRACX.setSingleParameter("randomSeed","")')
# ct$eval('TRACX.reset()')
# lexicon <- fromJSON(ct$eval('JSON.stringify(TRACX.runFullSimulation(function(i,m){}))'))
# target_weight <- as.numeric(lexicon$Words$mean)
# foil_weight <- as.numeric(lexicon$NonWords$mean)
# return(list(model="TRACX",condition=condition,target=target_weight, foil=foil_weight))
# }
#### run all models ####
# vectors to store data
runs <- list()
length(runs) <- reps_per_condition * 2 * 2 # 1 = number of models, 2 = number of conditions
counter <- 1
# run models
for(i in 1:reps_per_condition){
cat(paste('\r',i,'of',reps_per_condition))
four_seq <- four_triple_sequence(reps_per_item_in_seq)
runs[[counter]] <- run_PARSER('modeling/models/parser.js',four_seq, 'unseeded')
counter <- counter + 1
runs[[counter]] <- run_PARSER('modeling/models/parser.js',four_seq, 'seeded')
counter <- counter + 1
runs[[counter]] <- run_MDLChunker('modeling/models/mdlchunker.js', four_seq, 'unseeded')
counter <- counter + 1
runs[[counter]] <- run_MDLChunker('modeling/models/mdlchunker.js', four_seq, 'seeded')
counter <- counter + 1
}
model_run_data <- ldply(runs, data.frame)
save(model_run_data, file='modeling/output/model_run_data.Rdata') |
c18a5ab9e8b20890259a1ade1ff84d21974e6edc | 71abde1c9025f7ab6d13f074192101dbab41c32d | /R Codes/r-tutorial-src/rtutor-pt1-c02.R | f1dada8b7131e368ee1c983eabfd19d7795c9496 | [
"MIT"
] | permissive | hejibo/Psychological-Statistics | 844ce22f8b70a860ba033b279b2f8f1823459062 | 2e245228f0e9d599ffaa50d01f41e3cdfbd3b17a | refs/heads/master | 2021-05-09T10:14:53.475249 | 2018-05-01T01:27:00 | 2018-05-01T01:27:00 | 118,957,025 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 1,240 | r | rtutor-pt1-c02.R | ###########################################################
#
# Copyright (C) 2012 by Chi Yau
# All rights reserved
#
# http://www.r-tutor.com
#
################################
# c02
c(2, 3, 5)
c(TRUE, FALSE, TRUE, FALSE, FALSE)
c("aa", "bb", "cc", "dd", "ee")
length(c("aa", "bb", "cc", "dd", "ee"))
################################
# c02-s01
n = c(2, 3, 5)
s = c("aa", "bb", "cc", "dd", "ee")
c(n, s)
################################
# c02-s02
a = c(1, 3, 5, 7)
b = c(1, 2, 4, 8)
5 * a
a + b
a - b
a * b
a / b
u = c(10, 20, 30)
v = c(1, 2, 3, 4, 5, 6, 7, 8, 9)
u + v
w = c(10, 20, 30, 40)
w + v
################################
# c02-s03
s = c("aa", "bb", "cc", "dd", "ee")
s[3]
s[-3]
s[10]
################################
# c02-s04
s = c("aa", "bb", "cc", "dd", "ee")
s[c(2, 3)]
s[c(2, 3, 3)]
s[c(2, 1, 3)]
s[2:4]
################################
# c02-s05
s = c("aa", "bb", "cc", "dd", "ee")
L = c(FALSE, TRUE, FALSE, TRUE, FALSE)
s[L]
s[c(FALSE, TRUE, FALSE, TRUE, FALSE)]
################################
# c02-s06
v = c("Mary", "Sue")
v
names(v) = c("First", "Last")
v
v["First"]
v[c("Last", "First")]
|
3bbeb7f598690ebc091e06b29e2831a11b725aff | dd04fe1a24ad591212149ce4bd00f12c19a589db | /R/generics.R | f977f0c5d264e41b3f66f46929521dc8ad393565 | [] | no_license | akgold/ynabR | 87d6876b3daca41f6621df12dde222dbcdf4ee85 | 5c1f28df57293bf54184f079d8b52e0dcc1f92c4 | refs/heads/master | 2020-04-12T15:39:36.074593 | 2018-12-23T22:08:18 | 2018-12-23T22:08:18 | 162,588,489 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 215 | r | generics.R | set_token <- function(x, token) {
UseMethod("set_token")
}
get_token <- function(x) {
UseMethod("get_token")
}
download <- function(x) {
UseMethod("download")
}
parse <- function(x) {
UseMethod("parse")
} |
7ee2f3b83fee5c2f435edc3f9cc9d94d838587a2 | fe9d04e42bf96f0b490d1a0f5e94889b20e8420d | /R/load.NOAA.OISST.V2.R | d5424643882f01770aa4fff944b56805e51ed510 | [] | no_license | mdsumner/probGLS | b8172865a6a497849bbb7a1d3edcda2a0245c815 | b3808f6037dedacb48afa6c4155c87c06aa5bc2c | refs/heads/master | 2020-12-31T22:11:07.263901 | 2018-11-29T07:43:37 | 2018-11-29T07:43:37 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,839 | r | load.NOAA.OISST.V2.R | #' load NOAA OISST V2
#'
#' This function takes 1-year-long NetCDF files from the
#' ftp://ftp.cdc.noaa.gov/Datasets/noaa.oisst.v2.highres/ directory
#' @param fname full path to NetCDF data file
#' @param lsmask full path to land-sea mask NetCDF file
#' @param lonW western-most longitude of search area, must be smaller than lonE
#' @param lonE eastern-most longitude of search area, must be larger than lonW
#' @param latS southern-most latitude of search area, must be smaller than latN
#' @param latN northern-most latitude of search area, must be larger than latS
#' @param date1 first date in file to extract, must be Date class
#' @param date2 last date in file to extract, must be Date class
#' @param use.landmask use land mask TRUE or FALSE
#' @param extract.value which data to extract: "sst" - SST, "err" - SST error, "icec" - sea ice concentration
#' @return A 3-dimensional array with latitudes in rows, longitudes in columns, and dates along the 3rd dimension. The value [1,1,1] is the northernmost, westernmost lat/long location on the 1st date. The value [1,1,2] is the 2nd date at the same lat/long location (if more than 1 date is requested).
#' @return To extract lat/lon/date values from the output array, use the dimnames() function:
#' @return lats = as.numeric(dimnames(sst2)$Lat)
#' @return lons = as.numeric(dimnames(sst2)$Long)
#' @return dates = as.Date(dimnames(sst2)$Date)
#' @return
#' @return NetCDF files should be downloaded from the links on:
#' @return http://www.esrl.noaa.gov/psd/data/gridded/data.noaa.oisst.v2.highres.html
#' @return In addition to the temperature data files, also download a copy of the landmask file lsmask.oisst.v2.nc from the same page. Inside the NetCDF files, data are available on a 0.25 degree latitude x 0.25 degree longitude global grid (720x1440 cells) From -89.875N to 89.875N, 0.125E to 359.875E. Locations are at the CENTER of a grid cell.
#' @return modified after Luke Miller accessed Nov 25, 2014; https://github.com/millerlp/Misc_R_scripts/blob/master/NOAA_OISST_ncdf4.R
#' @export
load.NOAA.OISST.V2 = function(fname,lsmask,lonW,lonE,latS,latN,
date1, date2,use.landmask=F,
extract.value='sst'){
# Generate set of grid cell latitudes (center of cell) from south to north
lats = seq(-89.875,89.875,0.25)
# Generate set of grid cell longitudes (center of cell)
lons = seq(0.125,359.875,0.25)
# Create connection to NetCDF data file
nc = nc_open(fname)
lonWindx = which.min(abs(lonW - lons)) #get index of nearest longitude value
lonEindx = which.min(abs(lonE - lons)) # Get index of nearest longitude value to lonE
latSindx = which.min(abs(latS - lats)) #get index of nearest latitude value
latNindx = which.min(abs(latN - lats)) # Get index of nearest latitude value to latN
# The lon/lat indx values should now correspond to indices in the NetCDF
# file for the desired grid cell.
nlon = (lonEindx - lonWindx) + 1 # get number of longitudes to extract
nlat = (latNindx - latSindx) + 1 # get number of latitudes to extract
# Extract available dates from netCDF file
ncdates = nc$dim$time$vals
ncdates = as.Date(ncdates,origin = '1800-1-1') #available time points in nc
if (class(date1) == 'Date'){
# Get index of nearest time point
date1indx = which.min(abs(date1 - ncdates))
} else if (class(date1) == 'character'){
# Convert to a Date object first
date1 = as.Date(date1)
date1indx = which.min(abs(date1 - ncdates))
}
if (missing(date2)) {
# If date2 isn't specified, reuse date1
date2indx = which.min(abs(date1 - ncdates))
cat('Only 1 date specified\n')
} else {
if (class(date2) == 'Date'){
# If date2 exists, get index of nearest time point to date2
date2indx = which.min(abs(date2 - ncdates))
} else if (class(date2) == 'character'){
date2 = as.Date(date2)
date2indx = which.min(abs(date2 - ncdates))
}
}
ndates = (date2indx - date1indx) + 1 #get number of time steps to extract
# Define the output array
sstout = array(data = NA, dim = c(nlon,nlat,ndates))
# Extract the data from the NetCDF file
sstout[,,] = ncvar_get(nc, varid = extract.value,
start = c(lonWindx,latSindx,date1indx),
count = c(nlon,nlat,ndates))
# close SST ncdf
nc_close(nc)
# If there are missing data in the NetCDF, they should appear as 32767.
# Replace that value with NA if it occurs anywhere.
sstout = ifelse(sstout == 32767, NA, sstout)
# Get dimensions of sstout array
dims = dim(sstout)
if(use.landmask==T) {
nc2 = nc_open(lsmask)
# Create array to hold land-sea mask
mask = array(data = NA, dim = c(nlon,nlat,1))
# Get land-sea mask values (0 or 1)
mask[,,] = ncvar_get(nc2, varid = "lsmask",
start = c(lonWindx,latSindx,1), count = c(nlon,nlat,1))
#close land mask
nc_close(nc2)
# Replace 0's with NA's
mask = ifelse(mask == 0,NA,1)
for (i in 1:dims[3]) sstout[,,i] = sstout[,,i] * mask[,,1] # All masked values become NA
}
for (i in 1:dims[3]){
# Add dimension names
attr(sstout,'dimnames') = list(Long = seq(lons[lonWindx],lons[lonEindx],
by = 0.25),
Lat = seq(lats[latSindx],lats[latNindx],
by = 0.25),
Date = as.character(seq(ncdates[date1indx],
ncdates[date2indx],by = 1)))
}
# Rearrange the output matrix or array so that latitudes run from north to
# south down the rows, and longitudes run from west to east across columns.
# Make new output array to hold rearranged data. The dimension names will
# match the newly rearranged latitude and longitude values
sst2 = array(data = NA, dim = c(dims[2],dims[1],dims[3]),
dimnames = list(Lat = rev(seq(lats[latSindx],lats[latNindx],
by = 0.25)),
Long = seq(lons[lonWindx],lons[lonEindx],by = 0.25),
Date = as.character(seq(ncdates[date1indx],
ncdates[date2indx],by = 1))))
# Step through each page of array and rearrange lat/lon values
for (i in 1:dims[3]){
# Extract one day's worth of lat/lon pairs
temp = as.matrix(sstout[,,i])
temp = t(temp) # transpose lon/lat to lat/lon
temp = temp[nrow(temp):1,] # reverse row order to reverse latitudes
sst2[,,i] = temp # write data to sst2 array
}
##########################
sst2 # return sst2 array
##########################
} # end of function
|
f975c1c9b36013432baf837ff37caa63ea9f64af | 48cf04931d5a0adbae40837f1322a689722e50a9 | /man/pcg.Rd | 36a3c0e53ea5698257e74d916d0e9d80725e83d7 | [
"MIT"
] | permissive | natbprice/pcg2 | 93c3e9160261ac652943562516f48ab68a6bf108 | 15e884c008a83d024fb9fac449aaf366c8c5fc08 | refs/heads/master | 2020-05-04T18:37:58.024373 | 2019-04-03T20:05:40 | 2019-04-03T20:05:40 | 179,360,915 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 776 | rd | pcg.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/pcg.R
\name{pcg}
\alias{pcg}
\title{Preconditioned conjugate gradient method solver}
\usage{
pcg(Ax, b, M, x0, maxiter = 1000, tol = 1e-06)
}
\arguments{
\item{Ax}{function that takes argument x and returns matrix product A*x}
\item{b}{right hand side of the linear system}
\item{M}{preconditioner for A}
\item{x0}{starting guess for solution}
\item{maxiter}{maximum number of iterations}
\item{tol}{tolerance for convergence on norm(residual, "2")}
}
\description{
Solve system of linear equations $Ax = b$
}
\examples{
A <- matrix(c(4, 1, 1, 3), nrow = 2)
b <- c(1, 2)
x0 <- c(2, 1)
Ax <- function(x) {
A \%*\% x
}
M <- matrix(c(4, 0, 0, 3), nrow = nrow(A))
opt <- pcg(Ax, b, M, x0)
}
|
37a5f94dc1b8a275654d2179647e7d33e0d9da92 | 8f9d93dc6c224cfc6f49cf82c7c650c80581032b | /PA_curves.R | 7b2082f3de00b7dbb61b09e72a429367788c7201 | [] | no_license | JackieOuzman/book_chap_weeds | 57655b2e5cbb2c462b67ff1e7a75a6d79583efb5 | 9a7d40e609c5fc854e685d97eddca4e6b1d03d76 | refs/heads/master | 2020-05-05T12:27:35.865941 | 2019-04-09T05:19:11 | 2019-04-09T05:19:11 | 180,029,719 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 32,100 | r | PA_curves.R |
library(tidyverse)
library(dplyr)
library(ggplot2)
#install.packages('DT')
library(DT)
library(readxl)
#test
#setwd("W:/Weeds/book_chapter_data")
setwd("C:/Users/ouz001/book_chapter_data")
#postcodes_with_coord <- read.csv("../Australian_Post_Codes_Lat_Lon.csv")
#postcodes_with_coord_dist <- distinct(postcodes_with_coord, postcode, .keep_all = TRUE)
#write.csv(postcodes_with_coord_dist, "postcodes_with_coord_dist.csv")
#bring in the PA study
advisors_PA <- read.csv("C:/Users/ouz001/book_chapter_data/PA/PA_advisors_adoption.csv")
advisors_PA <- select(advisors_PA,
farmers = Respondents,
GRDC_RegionQ3,
state,
region_ReZoned,
X12postcodes)
glimpse(advisors_PA)
#Try bring in more data from orginal study
PA_survey <- read_excel("C:/Users/ouz001/book_chapter_data/PA/PA_data_rawish.xlsx")
#SELECT JUST A FEW VARAIABLES FROM SURVEY
PA_survey <- select(PA_survey,
farmers = Respondents,
Agro_Yr_StartQ34,
Yr_No_Till_PA = NoTill_YrQ20,
Yr_AutoSteer = Asteer_YrQ46,
Yr_yld_map = Ymap_StartYearQ54,
Yr_soil_testing = SoilTest_StartYearQ75)
#JOIN THESE TWO TOGETHER
glimpse(advisors_PA)
glimpse(PA_survey)
PA_survey = left_join(advisors_PA, PA_survey, by = "farmers")
glimpse(PA_survey)
#NOW GET THE ZONE AND REGIONS - so I need a file that has farmer
XYadvisors_postcodes_join <- read.csv("C:/Users/ouz001/book_chapter_data/adoption_data/XYadvisors_postcodes_join.csv") %>%
select(farmers = farmer,
postcode,
study,
AGROECOLOG,
REGIONS,
state
)
glimpse(XYadvisors_postcodes_join)
##JOIN TO THE PA SURVEY ####
glimpse(XYadvisors_postcodes_join)
glimpse(PA_survey)
PA_survey_zone = left_join(XYadvisors_postcodes_join, PA_survey, by = "farmers") %>%
select(farmers,
postcode,
study,
state = state.x,
AGROECOLOG,
REGIONS,
Agro_Yr_StartQ34,
Yr_No_Till_PA,
Yr_AutoSteer,
Yr_yld_map,
Yr_soil_testing)
glimpse(PA_survey_zone)
PA_survey_zone <- mutate(PA_survey_zone,
Yr_Agro =as.factor(Agro_Yr_StartQ34),
Yr_No_Till_PA=as.factor(Yr_No_Till_PA),
Yr_AutoSteer_PA=as.factor(Yr_AutoSteer),
Yr_yld_map_PA=as.factor(Yr_yld_map),
Yr_soil_testing_PA=as.factor(Yr_soil_testing))
###I THINK THIS IS THE DATA SET I NEED TO USE#####
####PA_survey_zone#####
###NOW I WANT TO CHECK OUT HOW THIS THESE adoption curves compare###
#use data clm called PA_survey_zone
# use zone as states
#number of farmers in state
# year_as_factor is the adoption clm I want to use
#glimpse(PA_survey_zone)
#test <- count(PA_survey_zone, Yr_Agro)
fun_test2 <- function(df, zone, numb_farmers) {
df_subset <- filter(df, state == zone)
count_adoption_year <- count(df_subset,"Yr_Agro")
count_adoption_year <- select(count_adoption_year, year = Yr_Agro, freq) #this clm year is a factor
years_of_study <- data.frame(year = 1950:2014, #this is int
id = 1:65,
zone = zone)
years_of_study <- mutate(years_of_study, year = as.factor(year))
adoption_df <- left_join(years_of_study, count_adoption_year, by= "year" )
adoption_df1 <- mutate(adoption_df, freq = replace_na(adoption_df$freq, 0))
adoption_df2 <- mutate(adoption_df1, cummulative = cumsum(adoption_df1$freq))
adoption_df3 <- mutate(adoption_df2, cumm_percent = (adoption_df2$cummulative/numb_farmers)*100)
}
###PA Advisors BY state######
###PA data for agro use - chcek to see if it looks the same as other work - yes so far so good.
PA_AgroNSW <- fun_test2(PA_survey_zone, "NSW", 105)
PA_AgroSA <- fun_test2(PA_survey_zone, "SA", 186)
PA_AgroVIC <- fun_test2(PA_survey_zone, "VIC", 150)
PA_AgroWA <- fun_test2(PA_survey_zone, "WA", 128)
PA_Agro_state <- rbind(PA_AgroNSW,
PA_AgroSA,
PA_AgroVIC,
PA_AgroWA) %>%
mutate(year = as.integer(year))
glimpse(PA_Agro_state)
ggplot(PA_Agro_state, aes(year, cumm_percent))+
geom_line()+
theme_classic()+
theme(legend.position = "bottom")+
xlim(1980, 2015)+
ylim(0,100)+
facet_wrap(.~zone)+
labs(x = "Years",
y = "percenatge of farmers",
title = "Adoption of advisor use per states",
subtitle = "This study has no farmers in QLD")
###PA Yr_No_Till_PA BY state######
fun_test3 <- function(df, zone, numb_farmers) {
df_subset <- filter(df, state == zone)
count_adoption_year <- count(df_subset,"Yr_No_Till_PA")
count_adoption_year <- select(count_adoption_year, year = Yr_No_Till_PA, freq) #this clm year is a factor
years_of_study <- data.frame(year = 1950:2014, #this is int
id = 1:65,
zone = zone)
years_of_study <- mutate(years_of_study, year = as.factor(year))
adoption_df <- left_join(years_of_study, count_adoption_year, by= "year" )
adoption_df1 <- mutate(adoption_df, freq = replace_na(adoption_df$freq, 0))
adoption_df2 <- mutate(adoption_df1, cummulative = cumsum(adoption_df1$freq))
adoption_df3 <- mutate(adoption_df2, cumm_percent = (adoption_df2$cummulative/numb_farmers)*100)
}
Yr_No_Till_PANSW <- fun_test3(PA_survey_zone, "NSW", 105)
Yr_No_Till_PASA <- fun_test3(PA_survey_zone, "SA", 186)
Yr_No_Till_PAVIC <- fun_test3(PA_survey_zone, "VIC", 150)
Yr_No_Till_PAWA <- fun_test3(PA_survey_zone, "WA", 128)
Yr_No_Till_PA_state <- rbind(Yr_No_Till_PANSW,
Yr_No_Till_PASA,
Yr_No_Till_PAVIC,
Yr_No_Till_PAWA) %>%
mutate(year = as.integer(year))
glimpse(Yr_No_Till_PA_state)
ggplot(Yr_No_Till_PA_state, aes(year, cumm_percent))+
geom_line()+
theme_classic()+
theme(legend.position = "bottom")+
xlim(1980, 2015)+
ylim(0,100)+
facet_wrap(.~zone)
###put the no till data from PA survey over the advisor PA data###
glimpse(PA_Agro_state)
glimpse(Yr_No_Till_PA_state)
PA_Agro_state <- mutate(PA_Agro_state, adoption = "advisors")
Yr_No_Till_PA_state <- mutate(Yr_No_Till_PA_state, adoption = "No_till_PA")
PA_no_till_advisors <- rbind(PA_Agro_state, Yr_No_Till_PA_state )
ggplot(PA_no_till_advisors, aes(year, cumm_percent, group = adoption))+
geom_line(aes(linetype=adoption))+
theme_classic()+
theme(legend.position = "bottom")+
xlim(1980, 2015)+
ylim(0,100)+
facet_wrap(.~zone)
##this says its the same as the no till study in 2014
###OK for the 2014 weeds adoption data### no till
XYNoTill_postcodes_join <- read.csv("C:/Users/ouz001/book_chapter_data/adoption_data/XYNoTill_postcodes_join_GRDC_SLA.csv")
#change the year from interger to factor
XYNoTill_postcodes_join <- mutate(XYNoTill_postcodes_join, year_as_factor = as.factor(Q15_year_f))
glimpse(XYNoTill_postcodes_join)
adoption_curve_function_NoTill_S <- function(df, zone, numb_farmers) {
df_subset <- filter(df, state == zone)
count_adoption_year <- count(df_subset,"year_as_factor")
count_adoption_year <- select(count_adoption_year, year = year_as_factor, freq)
years_of_study <- data.frame(year = 1950:2014,
id = 1:65,
zone = zone)
years_of_study <- mutate(years_of_study, year = as.factor(year))
adoption_df <- left_join(years_of_study, count_adoption_year, by= "year" )
adoption_df <- mutate(adoption_df, freq = replace_na(adoption_df$freq, 0))
adoption_df <- mutate(adoption_df, cummulative = cumsum(adoption_df$freq))
adoption_df <- mutate(adoption_df, cumm_percent = (adoption_df$cummulative/numb_farmers)*100)
adoption_df$year <- as.double(adoption_df$year)
#return(df_subset)
return(adoption_df)
}
QLD_NT_weeds <- adoption_curve_function_NoTill_S(XYNoTill_postcodes_join, "QLD", 59)
NSW_NT_weeds <- adoption_curve_function_NoTill_S(XYNoTill_postcodes_join, "NSW", 153)
SA_NT_weeds <- adoption_curve_function_NoTill_S(XYNoTill_postcodes_join, "SA", 65)
VIC_NT_weeds <- adoption_curve_function_NoTill_S(XYNoTill_postcodes_join, "VIC", 141)
WA_NT_weeds <- adoption_curve_function_NoTill_S(XYNoTill_postcodes_join, "WA", 179)
NoTill_adoption_state_NT_weeds <- rbind(QLD_NT_weeds, NSW_NT_weeds, SA_NT_weeds,VIC_NT_weeds, WA_NT_weeds )
NoTill_adoption_state_NT_weeds <- mutate(NoTill_adoption_state_NT_weeds, adoption = "No_till_Weeds")
####Join adoption data for weeds - no till , PA no till and advisors
glimpse(PA_no_till_advisors)
glimpse(NoTill_adoption_state_NT_weeds)
PA_no_till_advisors_weeds_noTill <- rbind(PA_no_till_advisors, NoTill_adoption_state_NT_weeds)
ggplot(PA_no_till_advisors_weeds_noTill, aes(year, cumm_percent, group = adoption))+
geom_line(aes(linetype=adoption))+
theme_classic()+
theme(legend.position = "bottom")+
xlim(1980, 2015)+
ylim(0,100)+
facet_wrap(.~zone)
ggsave("1Adoption_no_till_and_advisor_PA_No_till_weeds.png", width = 9.8, height = 5.6, units = "in")
glimpse(PA_survey_zone)
###PA Autosteer Yr_AutoSteer_PA
fun_test4 <- function(df, zone, numb_farmers) {
df_subset <- filter(df, state == zone)
count_adoption_year <- count(df_subset,"Yr_AutoSteer_PA")
count_adoption_year <- select(count_adoption_year, year = Yr_AutoSteer_PA, freq) #this clm year is a factor
years_of_study <- data.frame(year = 1950:2014, #this is int
id = 1:65,
zone = zone)
years_of_study <- mutate(years_of_study, year = as.factor(year))
adoption_df <- left_join(years_of_study, count_adoption_year, by= "year" )
adoption_df1 <- mutate(adoption_df, freq = replace_na(adoption_df$freq, 0))
adoption_df2 <- mutate(adoption_df1, cummulative = cumsum(adoption_df1$freq))
adoption_df3 <- mutate(adoption_df2, cumm_percent = (adoption_df2$cummulative/numb_farmers)*100)
}
###PA Autosteer
Yr_AutoSteer_PANSW <- fun_test4(PA_survey_zone, "NSW", 105)
Yr_AutoSteer_PASA <- fun_test4(PA_survey_zone, "SA", 186)
Yr_AutoSteer_PAVIC <- fun_test4(PA_survey_zone, "VIC", 150)
Yr_AutoSteer_PAWA <- fun_test4(PA_survey_zone, "WA", 128)
Yr_AutoSteer_PA_state <- rbind(Yr_AutoSteer_PANSW,
Yr_AutoSteer_PASA,
Yr_AutoSteer_PAVIC,
Yr_AutoSteer_PAWA) %>%
mutate(year = as.integer(year))
glimpse(Yr_No_Till_PA_state)
Yr_AutoSteer_PA_state <- mutate(Yr_AutoSteer_PA_state, adoption = "PA_Auto_steer")
glimpse(Yr_AutoSteer_PA_state)
glimpse(NoTill_adoption_state_NT_weeds)
Yr_AutoSteer_PA_weeds_noTill <- rbind(Yr_AutoSteer_PA_state, NoTill_adoption_state_NT_weeds, PA_Agro_state)
ggplot(Yr_AutoSteer_PA_weeds_noTill, aes(year, cumm_percent, group = adoption))+
geom_line(aes(linetype=adoption))+
theme_classic()+
theme(legend.position = "bottom")+
xlim(1980, 2015)+
ylim(0,100)+
facet_wrap(.~zone)
ggsave("Yr_AutoSteer_PA_weeds_noTill.png", width = 9.8, height = 5.6, units = "in")
Yr_AutoSteer_PA_Agro <- rbind(Yr_AutoSteer_PA_state, PA_Agro_state)
ggplot(Yr_AutoSteer_PA_Agro, aes(year, cumm_percent, group = adoption))+
geom_line(aes(linetype=adoption))+
theme_classic()+
theme(legend.position = "bottom")+
xlim(1980, 2015)+
ylim(0,100)+
facet_wrap(.~zone)
ggsave("Yr_AutoSteer_PA_Agro.png", width = 9.8, height = 5.6, units = "in")
###Yr_yld_map
glimpse(PA_survey_zone)
fun_test5 <- function(df, zone, numb_farmers) {
df_subset <- filter(df, state == zone)
count_adoption_year <- count(df_subset,"Yr_yld_map_PA")
count_adoption_year <- select(count_adoption_year, year = Yr_yld_map_PA, freq) #this clm year is a factor
years_of_study <- data.frame(year = 1950:2014, #this is int
id = 1:65,
zone = zone)
years_of_study <- mutate(years_of_study, year = as.factor(year))
adoption_df <- left_join(years_of_study, count_adoption_year, by= "year" )
adoption_df1 <- mutate(adoption_df, freq = replace_na(adoption_df$freq, 0))
adoption_df2 <- mutate(adoption_df1, cummulative = cumsum(adoption_df1$freq))
adoption_df3 <- mutate(adoption_df2, cumm_percent = (adoption_df2$cummulative/numb_farmers)*100)
}
###PA Yr_yld_map
Yr_yld_map_PANSW <- fun_test5(PA_survey_zone, "NSW", 105)
Yr_yld_map_PASA <- fun_test5(PA_survey_zone, "SA", 186)
Yr_yld_map_PAVIC <- fun_test5(PA_survey_zone, "VIC", 150)
Yr_yld_map_PAWA <- fun_test5(PA_survey_zone, "WA", 128)
Yr_yld_map_PA_state <- rbind(Yr_yld_map_PANSW,
Yr_yld_map_PASA,
Yr_yld_map_PAVIC,
Yr_yld_map_PAWA) %>%
mutate(year = as.integer(year))
glimpse(Yr_yld_map_PA_state)
Yr_yld_map_PA_state <- mutate(Yr_yld_map_PA_state, adoption = "PA_Yld_map")
glimpse(Yr_yld_map_PA_state)
glimpse(NoTill_adoption_state_NT_weeds)
Yr_yld_map_PA_weeds_noTill <- rbind(Yr_yld_map_PA_state, NoTill_adoption_state_NT_weeds)
ggplot(Yr_yld_map_PA_weeds_noTill, aes(year, cumm_percent, group = adoption))+
geom_line(aes(linetype=adoption))+
theme_classic()+
theme(legend.position = "bottom")+
xlim(1980, 2015)+
ylim(0,100)+
facet_wrap(.~zone)
ggsave("Yr_yld_map_PA_weeds_noTill.png", width = 9.8, height = 5.6, units = "in")
Yr_yld_map_PA_Agro_state <- rbind(Yr_yld_map_PA_state, PA_Agro_state)
ggplot(Yr_yld_map_PA_Agro_state, aes(year, cumm_percent, group = adoption))+
geom_line(aes(linetype=adoption))+
theme_classic()+
theme(legend.position = "bottom")+
xlim(1980, 2015)+
ylim(0,100)+
facet_wrap(.~zone)
ggsave("Yr_yld_map_PA_Agro_state.png", width = 9.8, height = 5.6, units = "in")
###Yr_soil_testing_PA
glimpse(PA_survey_zone)
fun_test6 <- function(df, zone, numb_farmers) {
df_subset <- filter(df, state == zone)
count_adoption_year <- count(df_subset,"Yr_soil_testing_PA")
count_adoption_year <- select(count_adoption_year, year = Yr_soil_testing_PA, freq) #this clm year is a factor
years_of_study <- data.frame(year = 1950:2014, #this is int
id = 1:65,
zone = zone)
years_of_study <- mutate(years_of_study, year = as.factor(year))
adoption_df <- left_join(years_of_study, count_adoption_year, by= "year" )
adoption_df1 <- mutate(adoption_df, freq = replace_na(adoption_df$freq, 0))
adoption_df2 <- mutate(adoption_df1, cummulative = cumsum(adoption_df1$freq))
adoption_df3 <- mutate(adoption_df2, cumm_percent = (adoption_df2$cummulative/numb_farmers)*100)
}
###PA Yr_soil_testing_PA
Yr_soil_testing_PANSW <- fun_test6(PA_survey_zone, "NSW", 105)
Yr_soil_testing_PASA <- fun_test6(PA_survey_zone, "SA", 186)
Yr_soil_testing_PAVIC <- fun_test6(PA_survey_zone, "VIC", 150)
Yr_soil_testing_PAWA <- fun_test6(PA_survey_zone, "WA", 128)
Yr_soil_testing_PA_state <- rbind(Yr_soil_testing_PANSW,
Yr_soil_testing_PASA,
Yr_soil_testing_PAVIC,
Yr_soil_testing_PAWA) %>%
mutate(year = as.integer(year))
glimpse(Yr_soil_testing_PA_state)
Yr_soil_testing_PA_state <- mutate(Yr_soil_testing_PA_state, adoption = "PA_soil_test")
glimpse(Yr_soil_testing_PA_state)
glimpse(NoTill_adoption_state_NT_weeds)
Yr_soil_testing_PA_state_weeds_noTill <- rbind(Yr_soil_testing_PA_state, NoTill_adoption_state_NT_weeds)
ggplot(Yr_soil_testing_PA_state_weeds_noTill, aes(year, cumm_percent, group = adoption))+
geom_line(aes(linetype=adoption))+
theme_classic()+
theme(legend.position = "bottom")+
xlim(1980, 2015)+
ylim(0,100)+
facet_wrap(.~zone)
ggsave("Yr_soil_testing_PA_weeds_noTill.png", width = 9.8, height = 5.6, units = "in")
Yr_soil_testing_PA_Agro_state <- rbind(Yr_soil_testing_PA_state, PA_Agro_state)
ggplot(Yr_soil_testing_PA_Agro_state, aes(year, cumm_percent, group = adoption))+
geom_line(aes(linetype=adoption))+
theme_classic()+
theme(legend.position = "bottom")+
xlim(1980, 2015)+
ylim(0,100)+
facet_wrap(.~zone)
ggsave("Yr_soil_testing_PA_Agro_state.png", width = 9.8, height = 5.6, units = "in")
###OK for the 2014 weeds adoption data### no till
XYNoTill_postcodes_join <- read.csv("C:/Users/ouz001/book_chapter_data/adoption_data/XYNoTill_postcodes_join_GRDC_SLA.csv") %>%
select(farmer,
postcode,
study,
AGROECOLOG,
REGIONS,
state)
glimpse(XYNoTill_postcodes_join)
crop_top <- read_excel("C:/Users/ouz001/book_chapter_data/Weeds/Raw_data_Weeds_with_postcodes.xlsx")%>%
select(KEY,
Q20l1)
crop_top <- mutate(crop_top, Yr_crop_top = if_else(Q20l1 ==-99, 0, Q20l1))
glimpse(crop_top)
crop_top <- mutate(crop_top, Yr_crop_top = as.factor(Yr_crop_top),
farmer = KEY)
#join the data togther to get clm for crop topping and for states etc..
crop_top1 <- left_join(crop_top, XYNoTill_postcodes_join, by = "farmer")
glimpse(crop_top1)
adoption_curve_fun7 <- function(df, zone, numb_farmers) {
df_subset <- filter(df, state == zone)
count_adoption_year <- count(df_subset,"Yr_crop_top")
count_adoption_year <- select(count_adoption_year, year = Yr_crop_top, freq)
years_of_study <- data.frame(year = 1950:2014,
id = 1:65,
zone = zone)
years_of_study <- mutate(years_of_study, year = as.factor(year))
adoption_df <- left_join(years_of_study, count_adoption_year, by= "year" )
adoption_df <- mutate(adoption_df, freq = replace_na(adoption_df$freq, 0))
adoption_df <- mutate(adoption_df, cummulative = cumsum(adoption_df$freq))
adoption_df <- mutate(adoption_df, cumm_percent = (adoption_df$cummulative/numb_farmers)*100)
adoption_df$year <- as.double(adoption_df$year)
#return(df_subset)
return(adoption_df)
}
QLD_croptop <- adoption_curve_fun7(crop_top1, "QLD", 59)
NSW_croptop <- adoption_curve_fun7(crop_top1, "NSW", 153)
SA_croptop <- adoption_curve_fun7(crop_top1, "SA", 65)
VIC_croptop <- adoption_curve_fun7(crop_top1, "VIC", 141)
WA_croptop <- adoption_curve_fun7(crop_top1, "WA", 179)
croptop_states <- rbind(QLD_croptop, NSW_croptop, SA_croptop,VIC_croptop, WA_croptop )
croptop_states <- mutate(croptop_states, adoption = "crop top")
ggplot(croptop_states, aes(year, cumm_percent))+
geom_line(aes(linetype=adoption))+
#scale_linetype_manual(values=c("dashed", "dotted", "solid"))+
theme_classic()+
theme(legend.position = "bottom")+
xlim(1980, 2015)+
ylim(0,100)+
facet_wrap(.~zone)+
labs(x = "Years",
y = "Percentage of farmers")
glimpse(NoTill_adoption_state_NT_weeds)
croptop_advisors <- rbind(PA_Agro_state, croptop_states)
croptop_advisors <- filter(croptop_advisors, zone != "QLD")
glimpse(croptop_advisors)
ggplot(croptop_advisors, aes(year, cumm_percent, group = adoption))+
geom_line(aes(linetype=adoption))+
#scale_linetype_manual(values=c("dashed", "dotted", "solid"))+
theme_classic()+
theme(legend.position = "bottom")+
xlim(1980, 2015)+
ylim(0,100)+
facet_wrap(.~zone)+
labs(x = "Years",
y = "Percentage of farmers")
ggsave("croptop_advisors.png", width = 9.8, height = 5.6, units = "in")
####make a data set that has Advisor (PA study), no till, soil test, auto steer and crop topping######
glimpse(PA_Agro_state)
glimpse(NoTill_adoption_state_NT_weeds)
glimpse(Yr_soil_testing_PA_state)
glimpse(Yr_AutoSteer_PA_state)
glimpse(croptop_states)
Agro_noTill_soil_test_autoSteer_crop_top <- rbind(PA_Agro_state,
NoTill_adoption_state_NT_weeds,
Yr_soil_testing_PA_state,
Yr_AutoSteer_PA_state,
croptop_states)
Agro_noTill_soil_test_autoSteer_crop_top<- filter(Agro_noTill_soil_test_autoSteer_crop_top, zone!= "QLD")
ggplot(Agro_noTill_soil_test_autoSteer_crop_top, aes(year, cumm_percent, group = adoption))+
geom_line(aes(linetype=adoption))+
#scale_linetype_manual(values=c("dashed", "dotted", "solid"))+
theme_classic()+
theme(legend.position = "bottom")+
xlim(1980, 2015)+
ylim(0,100)+
facet_wrap(.~zone)+
labs(x = "Years",
y = "Percentage of farmers")
ggsave("Agro_noTill_soil_test_autoSteer_crop_top.png", width = 9.8, height = 5.6, units = "in")
#########make a data set that has Advisor (PA study), no till, soil test, and auto steer ######
glimpse(PA_Agro_state)
glimpse(NoTill_adoption_state_NT_weeds)
glimpse(Yr_soil_testing_PA_state)
glimpse(Yr_AutoSteer_PA_state)
Agro_noTill_soil_test_autoSteer <- rbind(PA_Agro_state,
NoTill_adoption_state_NT_weeds,
Yr_soil_testing_PA_state,
Yr_AutoSteer_PA_state)
Agro_noTill_soil_test_autoSteer<- filter(Agro_noTill_soil_test_autoSteer, zone!= "QLD")
ggplot(Agro_noTill_soil_test_autoSteer, aes(year, cumm_percent, group = adoption))+
geom_line(aes(linetype=adoption))+
#scale_linetype_manual(values=c("dashed", "dotted", "solid"))+
theme_classic()+
theme(legend.position = "bottom")+
xlim(1980, 2015)+
ylim(0,100)+
facet_wrap(.~zone)+
labs(x = "Years",
y = "Percentage of farmers")
ggsave("Agro_noTill_soil_test_autoSteer.png", width = 9.8, height = 5.6, units = "in")
####### Narrow windrow buring ###########
XYNoTill_postcodes_join <- read.csv("C:/Users/ouz001/book_chapter_data/adoption_data/XYNoTill_postcodes_join_GRDC_SLA.csv") %>%
select(farmer,
postcode,
study,
AGROECOLOG,
REGIONS,
state)
glimpse(XYNoTill_postcodes_join)
narrow_windrow_burn <- read_excel("C:/Users/ouz001/book_chapter_data/Weeds/Raw_data_Weeds_with_postcodes.xlsx")%>%
select(KEY,
Q21c1 )
narrow_windrow_burn <- mutate(narrow_windrow_burn, Yr_narrow_windrow_burn = if_else(Q21c1 ==-99, 0, Q21c1))
glimpse(narrow_windrow_burn)
narrow_windrow_burn <- mutate(narrow_windrow_burn, Yr_narrow_windrow_burn = as.factor(Yr_narrow_windrow_burn),
farmer = KEY)
#join the data togther to get clm for narrow windrow and for states etc..
narrow_windrow_burn1 <- left_join(narrow_windrow_burn, XYNoTill_postcodes_join, by = "farmer")
glimpse(narrow_windrow_burn1)
adoption_curve_fun7 <- function(df, zone, numb_farmers) {
df_subset <- filter(df, state == zone)
count_adoption_year <- count(df_subset,"Yr_narrow_windrow_burn")
count_adoption_year <- select(count_adoption_year, year = Yr_narrow_windrow_burn, freq)
years_of_study <- data.frame(year = 1950:2014,
id = 1:65,
zone = zone)
years_of_study <- mutate(years_of_study, year = as.factor(year))
adoption_df <- left_join(years_of_study, count_adoption_year, by= "year" )
adoption_df <- mutate(adoption_df, freq = replace_na(adoption_df$freq, 0))
adoption_df <- mutate(adoption_df, cummulative = cumsum(adoption_df$freq))
adoption_df <- mutate(adoption_df, cumm_percent = (adoption_df$cummulative/numb_farmers)*100)
adoption_df$year <- as.double(adoption_df$year)
#return(df_subset)
return(adoption_df)
}
QLD_narrow_burn <- adoption_curve_fun7(narrow_windrow_burn1, "QLD", 59)
NSW_narrow_burn <- adoption_curve_fun7(narrow_windrow_burn1, "NSW", 153)
SA_narrow_burn <- adoption_curve_fun7(narrow_windrow_burn1, "SA", 65)
VIC_narrow_burn <- adoption_curve_fun7(narrow_windrow_burn1, "VIC", 141)
WA_narrow_burn <- adoption_curve_fun7(narrow_windrow_burn1, "WA", 179)
narrow_burn_states <- rbind(QLD_narrow_burn, NSW_narrow_burn, SA_narrow_burn,VIC_narrow_burn, WA_narrow_burn )
narrow_burn_states <- mutate(narrow_burn_states, adoption = "narrow windrow burning")
####### Narrow windrow buring, crop top and advisor(PA) by states #######
glimpse(PA_Agro_state)
glimpse(narrow_burn_states)
Agro_advisor_crop_top_narrow_burn <- rbind(PA_Agro_state,
narrow_burn_states,
croptop_states)
Agro_advisor_crop_top_narrow_burn <- filter(Agro_advisor_crop_top_narrow_burn, zone!= "QLD")
ggplot(Agro_advisor_crop_top_narrow_burn , aes(year, cumm_percent, group = adoption))+
geom_line(aes(linetype=adoption))+
#scale_linetype_manual(values=c("solid", "dashed", "dotted" ))+
theme_classic()+
theme(legend.position = "bottom")+
theme(legend.position = "")+
xlim(1980, 2015)+
ylim(0,100)+
facet_wrap(.~zone)+
labs(x = "Years",
y = "Percentage of farmers")
ggsave("xxAgro_advisor_crop_top_narrow_burn.png", width = 9.8, height = 5.6, units = "in")
glimpse(Agro_advisor_crop_top_narrow_burn)
Agro_advisor_crop_top_narrow_burn_not_NSW_VIC <- filter(Agro_advisor_crop_top_narrow_burn, zone == "SA" | zone == "WA")
glimpse(Agro_advisor_crop_top_narrow_burn_not_NSW_VIC)
ggplot(Agro_advisor_crop_top_narrow_burn_not_NSW_VIC , aes(year, cumm_percent, group = adoption))+
geom_line(aes(linetype=adoption))+
#scale_linetype_manual(values=c("solid", "dashed", "dotted" ))+
theme_classic()+
theme(legend.position = "bottom")+
theme(legend.position = "")+
xlim(1980, 2015)+
ylim(0,100)+
facet_wrap(.~zone)+
labs(x = "Years",
y = "Percentage of farmers")
ggsave("xxAgro_advisor_crop_top_narrow_burn_SA_WA.png", width = 9.8, height = 5.6, units = "in")
########### Burn advisors and crop top by regions ########
###BURN
XYNoTill_postcodes_join <- read.csv("C:/Users/ouz001/book_chapter_data/adoption_data/XYNoTill_postcodes_join_GRDC_SLA.csv") %>%
select(farmer,
postcode,
study,
AGROECOLOG,
REGIONS,
state)
glimpse(XYNoTill_postcodes_join)
narrow_windrow_burn <- read_excel("C:/Users/ouz001/book_chapter_data/Weeds/Raw_data_Weeds_with_postcodes.xlsx")%>%
select(KEY,
Q21c1 )
narrow_windrow_burn <- mutate(narrow_windrow_burn, Yr_narrow_windrow_burn = if_else(Q21c1 ==-99, 0, Q21c1))
glimpse(narrow_windrow_burn)
narrow_windrow_burn <- mutate(narrow_windrow_burn, Yr_narrow_windrow_burn = as.factor(Yr_narrow_windrow_burn),
farmer = KEY)
#join the data togther to get clm for narrow windrow and for states etc..
narrow_windrow_burn1 <- left_join(narrow_windrow_burn, XYNoTill_postcodes_join, by = "farmer")
glimpse(narrow_windrow_burn1)
count(narrow_windrow_burn1,"REGIONS")
adoption_curve_fun7 <- function(df, zone, numb_farmers) {
df_subset <- filter(df, REGIONS == zone)
count_adoption_year <- count(df_subset,"Yr_narrow_windrow_burn")
count_adoption_year <- select(count_adoption_year, year = Yr_narrow_windrow_burn, freq)
years_of_study <- data.frame(year = 1950:2014,
id = 1:65,
zone = zone)
years_of_study <- mutate(years_of_study, year = as.factor(year))
adoption_df <- left_join(years_of_study, count_adoption_year, by= "year" )
adoption_df <- mutate(adoption_df, freq = replace_na(adoption_df$freq, 0))
adoption_df <- mutate(adoption_df, cummulative = cumsum(adoption_df$freq))
adoption_df <- mutate(adoption_df, cumm_percent = (adoption_df$cummulative/numb_farmers)*100)
adoption_df$year <- as.double(adoption_df$year)
#return(df_subset)
return(adoption_df)
}
glimpse(narrow_windrow_burn1)
Northern_narrow_burn <- adoption_curve_fun7(narrow_windrow_burn1, "Northern", 118)
Southern_narrow_burn <- adoption_curve_fun7(narrow_windrow_burn1, "Southern", 298)
Western_narrow_burn <- adoption_curve_fun7(narrow_windrow_burn1, "Western", 172)
narrow_burn_Regions <- rbind(Northern_narrow_burn, Southern_narrow_burn, Western_narrow_burn )
narrow_burn_Regions <- mutate(narrow_burn_Regions, adoption = "narrow windrow burning")
###crop
XYNoTill_postcodes_join <- read.csv("C:/Users/ouz001/book_chapter_data/adoption_data/XYNoTill_postcodes_join_GRDC_SLA.csv") %>%
select(farmer,
postcode,
study,
AGROECOLOG,
REGIONS,
state)
glimpse(XYNoTill_postcodes_join)
crop_top <- read_excel("C:/Users/ouz001/book_chapter_data/Weeds/Raw_data_Weeds_with_postcodes.xlsx")%>%
select(KEY,
Q20l1)
crop_top <- mutate(crop_top, Yr_crop_top = if_else(Q20l1 ==-99, 0, Q20l1))
glimpse(crop_top)
crop_top <- mutate(crop_top, Yr_crop_top = as.factor(Yr_crop_top),
farmer = KEY)
#join the data togther to get clm for crop topping and for states etc..
crop_top1 <- left_join(crop_top, XYNoTill_postcodes_join, by = "farmer")
glimpse(crop_top1)
adoption_curve_fun7 <- function(df, zone, numb_farmers) {
df_subset <- filter(df, REGIONS == zone)
count_adoption_year <- count(df_subset,"Yr_crop_top")
count_adoption_year <- select(count_adoption_year, year = Yr_crop_top, freq)
years_of_study <- data.frame(year = 1950:2014,
id = 1:65,
zone = zone)
years_of_study <- mutate(years_of_study, year = as.factor(year))
adoption_df <- left_join(years_of_study, count_adoption_year, by= "year" )
adoption_df <- mutate(adoption_df, freq = replace_na(adoption_df$freq, 0))
adoption_df <- mutate(adoption_df, cummulative = cumsum(adoption_df$freq))
adoption_df <- mutate(adoption_df, cumm_percent = (adoption_df$cummulative/numb_farmers)*100)
adoption_df$year <- as.double(adoption_df$year)
#return(df_subset)
return(adoption_df)
}
Northern_croptop <- adoption_curve_fun7(crop_top1, "Northern", 118)
Southern_croptop <- adoption_curve_fun7(crop_top1, "Southern", 298)
Western_croptop <- adoption_curve_fun7(crop_top1, "Western", 172)
croptop_Region <- rbind(Northern_croptop, Southern_croptop, Western_croptop )
croptop_Region <- mutate(croptop_Region, adoption = "crop top")
#### ADVISOR
glimpse(PA_survey_zone)
count(PA_survey_zone,"REGIONS")
fun_test2 <- function(df, zone, numb_farmers) {
df_subset <- filter(df, REGIONS == zone)
count_adoption_year <- count(df_subset,"Yr_Agro")
count_adoption_year <- select(count_adoption_year, year = Yr_Agro, freq) #this clm year is a factor
years_of_study <- data.frame(year = 1950:2014, #this is int
id = 1:65,
zone = zone)
years_of_study <- mutate(years_of_study, year = as.factor(year))
adoption_df <- left_join(years_of_study, count_adoption_year, by= "year" )
adoption_df1 <- mutate(adoption_df, freq = replace_na(adoption_df$freq, 0))
adoption_df2 <- mutate(adoption_df1, cummulative = cumsum(adoption_df1$freq))
adoption_df3 <- mutate(adoption_df2, cumm_percent = (adoption_df2$cummulative/numb_farmers)*100)
}
###PA Advisors BY Regions######
Southern_advisor <- fun_test2(PA_survey_zone, "Southern", 441)
Western_advisor <- fun_test2(PA_survey_zone, "Western", 128)
advisor_region <- rbind(Southern_advisor, Western_advisor )
advisor_region <- mutate(advisor_region, adoption = "advisor",
year = as.double(year))
glimpse(advisor_region)
glimpse(narrow_burn_Regions)
glimpse(croptop_Region)
glimpse(advisor_region)
advisor_narrow_crop_region <- rbind(advisor_region,narrow_burn_Regions, croptop_Region )
glimpse(advisor_narrow_crop_region)
advisor_narrow_crop_region_St_West <- filter(advisor_narrow_crop_region,zone != "Northern" )
glimpse(advisor_narrow_crop_region_St_West)
ggplot(advisor_narrow_crop_region_St_West , aes(year, cumm_percent, group = adoption))+
geom_line(aes(linetype=adoption))+
#scale_linetype_manual(values=c("solid", "dashed", "dotted" ))+
theme_classic()+
#theme(legend.position = "bottom")+
theme(legend.position = "")+
xlim(1980, 2015)+
ylim(0,100)+
facet_wrap(.~zone)+
labs(x = "Years",
y = "Percentage of farmers")
ggsave("xxAgro_advisor_crop_top_narrow_burn_Sth_West.png", width = 9.8, height = 5.6, units = "in")
|
e50d4809b08ed0aee617b9d7e8100e04c35b72df | a159106592c73eef0699c2485ce781ef092c6022 | /04-limma-voom.R | 574bcf311cb7161eb600e377eeb44773fbb3d8fa | [
"MIT"
] | permissive | bobia9991/RNA-seq | 4a0f87be27fbff876899f393940c2500f21049a2 | c29df866ba949a3e6abe118ef4c21f34162628e0 | refs/heads/main | 2023-06-19T20:16:32.091529 | 2021-07-16T00:38:11 | 2021-07-16T00:38:11 | 560,504,965 | 1 | 0 | MIT | 2022-11-01T16:42:15 | 2022-11-01T16:42:14 | null | UTF-8 | R | false | false | 16,056 | r | 04-limma-voom.R | # RNA-seq pipeline
# Ben Laufer
# Modifies and expands on these references:
#https://ucdavis-bioinformatics-training.github.io/2018-June-RNA-Seq-Workshop/thursday/DE.html
#https://www.bioconductor.org/packages/devel/workflows/vignettes/RNAseq123/inst/doc/limmaWorkflow.html
# Load packages -----------------------------------------------------------
setwd("~/Box/PEBBLES/RNA")
packages <- c("edgeR", "tidyverse", "RColorBrewer", "org.Mm.eg.db", "AnnotationDbi", "EnhancedVolcano",
"enrichR", "openxlsx", "gt", "glue", "Glimma", "sva", "DMRichR")
enrichR:::.onAttach() # Needed or else "EnrichR website not responding"
stopifnot(suppressMessages(sapply(packages, require, character.only = TRUE)))
#BiocManager::install("ben-laufer/DMRichR")
# To test and develop, assign the variable tissue and then just run the main sections
sink("RNA-seq_log.txt", type = "output", append = FALSE, split = TRUE)
tidyr::crossing(tissue = c("placenta", "brain"),
sex = c("male", "female")) %>%
purrr::pwalk(function(tissue, sex){
dir.create(glue::glue("{tissue}_{sex}"))
# Count Matrix ------------------------------------------------------------
#name <- gsub( "(?:[^_]+_){4}([^_ ]+)*$","", files)
# STAR quantMode geneCounts output:
#column 1: gene ID
#column 2: counts for unstranded RNA-seq
#column 3: counts for the 1st read strand aligned with RNA (htseq-count option -s yes)
#column 4: counts for the 2nd read strand aligned with RNA (htseq-count option -s reverse)
# KAPA mRNA HyperPrep Kit reads are reverse stranded, so select column 4
# Confirm by looking at the N_noFeature line for the 3rd and 4th column and pick the column with the lowest count.
sampleNames <- list.files(path = glue::glue(getwd(), "/GeneCounts"),
pattern = "*.ReadsPerGene.out.tab") %>%
stringr::str_split_fixed("_", n = 3) %>%
tibble::as_tibble() %>%
tidyr::unite(Name, c(V1:V2), sep = "-") %>%
dplyr::select(Name) %>%
purrr::flatten_chr()
# Could alternatively use edgeR::readDGE() but that calls to the slower read.delim()
ensemblIDs <- list.files(path = glue::glue(getwd(), "/GeneCounts"),
pattern = "*.ReadsPerGene.out.tab", full.names = TRUE)[1] %>%
data.table::fread(select = 1) %>%
purrr::flatten_chr()
countMatrix <- list.files(path = glue::glue(getwd(), "/GeneCounts"),
pattern = "*.ReadsPerGene.out.tab", full.names = TRUE) %>%
purrr::map_dfc(data.table::fread, select = 4, data.table = FALSE) %>%
magrittr::set_colnames(sampleNames) %>%
magrittr::set_rownames(ensemblIDs)
# Remove meta info
countMatrix <- countMatrix[-c(1:4),]
# Design Matrix -----------------------------------------------------------
designMatrix <- readxl::read_xlsx("sample_info.xlsx") %>%
dplyr::rename(group = Treatment) %>%
dplyr::mutate_if(is.character, as.factor) %>%
dplyr::mutate(Name = as.character(Name))
# # Recode sex
# designMatrix$Sex <- as.character(designMatrix$Sex)
# designMatrix$Sex[designMatrix$Sex == "F"] <- "0"
# designMatrix$Sex[designMatrix$Sex == "M"] <- "1"
# designMatrix$Sex <- as.factor(designMatrix$Sex)
samples.idx <- pmatch(designMatrix$Name, colnames(countMatrix))
designMatrix <- designMatrix[order(samples.idx),]
# Preprocessing -----------------------------------------------------------
print(glue::glue("Preprocessing {sex} {tissue} samples"))
# Select sample subset
designMatrix <- designMatrix %>%
dplyr::filter(Tissue == tissue & Sex == sex)
countMatrix <- countMatrix %>%
dplyr::select(contains(designMatrix$Name)) %>%
as.matrix
# Create DGE list and calculate normalization factors
countMatrix <- countMatrix %>%
DGEList() %>%
calcNormFactors()
# Reorder design matrix
samples.idx <- pmatch(designMatrix$Name, rownames(countMatrix$samples))
designMatrix <- designMatrix[order(samples.idx),]
stopifnot(rownames(countMatrix$samples) == designMatrix$Name)
# Add sample info from design matrix to DGE list
countMatrix$samples <- countMatrix$samples %>%
tibble::add_column(designMatrix %>% dplyr::select(-Name))
# Add gene info
countMatrix$genes <- purrr::map_dfc(c("SYMBOL", "GENENAME", "ENTREZID", "CHR"), function(column){
rownames(countMatrix$counts) %>%
AnnotationDbi::mapIds(org.Mm.eg.db,
keys = .,
column = column,
keytype = 'ENSEMBL') %>%
as.data.frame() %>%
tibble::remove_rownames() %>%
purrr::set_names(column)
})
# Raw density of log-CPM values
L <- mean(countMatrix$samples$lib.size) * 1e-6
M <- median(countMatrix$samples$lib.size) * 1e-6
logCPM <- cpm(countMatrix, log = TRUE)
logCPM.cutoff <- log2(10/M + 2/L)
nsamples <- ncol(countMatrix)
col <- brewer.pal(nsamples, "Paired")
pdf(glue::glue("{tissue}_{sex}/{tissue}_{sex}_density_plot.pdf"), height = 8.5, width = 11)
par(mfrow = c(1,2))
plot(density(logCPM[,1]), col = col[1], lwd = 2, las = 2, main = "", xlab = "")
title(main = "A. Raw data", xlab = "Log-cpm")
abline(v = logCPM.cutoff, lty = 3)
for (i in 2:nsamples){
den <- density(logCPM[,i])
lines(den$x, den$y, col = col[i], lwd = 2)
}
legend("topright", designMatrix$Name, text.col = col, bty = "n", cex = 0.5)
# Filter genes with low expression
rawCount <- dim(countMatrix)
keep.exprs <- filterByExpr(countMatrix,
group = countMatrix$samples$group,
lib.size = countMatrix$samples$lib.size)
countMatrix <- countMatrix[keep.exprs,, keep.lib.sizes = FALSE] %>%
calcNormFactors()
filterCount <- dim(countMatrix)
print(glue::glue("{100 - round((filterCount[1]/rawCount[1])*100)}% of genes were filtered from {rawCount[2]} samples, \\
where there were {rawCount[1]} genes before filtering and {filterCount[1]} genes after filtering for {tissue}"))
# Filtered density plot of log-CPM values
logCPM <- cpm(countMatrix, log = TRUE)
plot(density(logCPM[,1]), col = col[1], lwd = 2, las =2 , main = "", xlab = "")
title(main = "B. Filtered data", xlab = "Log-cpm")
abline(v = logCPM.cutoff, lty = 3)
for (i in 2:nsamples){
den <- density(logCPM[,i])
lines(den$x, den$y, col = col[i], lwd = 2)
}
legend("topright", designMatrix$Name, text.col = col, bty = "n", cex = 0.5)
dev.off()
# Interactive MDS plot
Glimma::glMDSPlot(countMatrix,
groups = designMatrix,
path = getwd(),
folder = "interactivePlots",
html = glue::glue("{tissue}_{sex}_MDS-Plot"),
launch = FALSE)
# Surrogate variables analysis --------------------------------------------
# # Create model matrices, with null model for svaseq, and don't force a zero intercept
# mm <- model.matrix(~group + Litter,
# data = designMatrix)
#
# mm0 <- model.matrix(~1 + Litter,
# data = designMatrix)
#
# # svaseq requires normalized data that isn't log transformed
# cpm <- cpm(countMatrix, log = FALSE)
#
# # Calculate number of surrogate variables
# nSv <- num.sv(cpm,
# mm,
# method = "leek")
#
# # Estimate surrogate variables
# svObj <- svaseq(cpm,
# mm,
# mm0,
# n.sv = nSv)
#
# # Update model to include surrogate variables
# mm <- model.matrix(~Treatment + svObj$sv,
# data = designMatrix)
# Voom transformation and calculation of variance weights -----------------
print(glue::glue("Normalizing {sex} {tissue} samples"))
# Design
mm <- model.matrix(~group,
data = designMatrix)
# Voom
pdf(glue::glue("{tissue}_{sex}/{tissue}_{sex}_voom_mean-variance_trend.pdf"), height = 8.5, width = 11)
voomLogCPM <- voom(countMatrix,
mm,
plot = TRUE)
dev.off()
# Make litter a random effect, since limma warns "coefficients not estimable" for some litters
# Ref: https://support.bioconductor.org/p/11956/
# Obstacle: Cannot do this properly with surrogtate variables, since there's an error when including litter in null model
# Duplicate correlations alternative for other scenarios:
# https://support.bioconductor.org/p/68916/
# https://support.bioconductor.org/p/110987/
correlations <- duplicateCorrelation(voomLogCPM,
mm,
block = designMatrix$Litter)
# Extract intraclass correlation within litters
correlations <- correlations$consensus.correlation
# Boxplots of logCPM values before and after normalization
pdf(glue::glue("{tissue}_{sex}/{tissue}_{sex}_normalization_boxplots.pdf"), height = 8.5, width = 11)
par(mfrow=c(1,2))
boxplot(logCPM, las = 2, col = col, main = "")
title(main = "A. Unnormalised data", ylab = "Log-cpm")
boxplot(voomLogCPM$E, las = 2, col = col, main = "")
title(main = "B. Normalised data", ylab = "Log-cpm")
dev.off()
# Fitting linear models in limma ------------------------------------------
print(glue::glue("Testing {sex} {tissue} samples for differential expression"))
# Weight standard errors of log fold changes by within litter correlation
fit <- lmFit(voomLogCPM,
mm,
correlation = correlations,
block = designMatrix$Litter)
head(coef(fit))
# Save normalized expression values for WGCNA
voomLogCPM$E %>%
as.data.frame() %>%
tibble::rownames_to_column(var = "Gene") %>%
openxlsx::write.xlsx(glue::glue("{tissue}_{sex}/{tissue}_{sex}_voomLogCPMforWGCNA.xlsx"))
# Create DEG tibble -------------------------------------------------------
print(glue::glue("Creating DEG list of {sex} {tissue} samples"))
efit <- fit %>%
contrasts.fit(coef = 2) %>% # Change for different models
eBayes()
# Final model plot
pdf(glue::glue("{tissue}_{sex}/{tissue}_{sex}_final_model_mean-variance_trend.pdf"),
height = 8.5, width = 11)
plotSA(efit, main = "Final model: Mean-variance trend")
dev.off()
# Interactive MA plot
Glimma::glimmaMA(efit,
dge = countMatrix,
path = getwd(),
html = glue::glue("interactivePlots/{tissue}_{sex}_MDA-Plot.html"),
launch = FALSE)
# Top differentially expressed genes
DEGs <- efit %>%
topTable(sort.by = "P", n = Inf) %>%
rownames_to_column() %>%
tibble::as_tibble() %>%
dplyr::rename(ensgene = rowname) %>%
dplyr::mutate(FC = dplyr::case_when(logFC > 0 ~ 2^logFC,
logFC < 0 ~ -1/(2^logFC))) %>%
dplyr::select(SYMBOL, GENENAME, FC, logFC, P.Value, adj.P.Val, AveExpr, t, B, ensgene) %T>%
openxlsx::write.xlsx(file = glue::glue("{tissue}_{sex}/{tissue}_{sex}_DEGs.xlsx"))
# For a continuous trait the FC is the change per each unit
# Volcano Plot ------------------------------------------------------------
volcano <- DEGs %>%
EnhancedVolcano::EnhancedVolcano(title = "",
labSize = 5,
lab = .$SYMBOL,
x = 'logFC',
y = 'P.Value', # P.Value 'adj.P.Val'
col = c("grey30", "royalblue", "royalblue", "red2"),
pCutoff = 0.05,
FCcutoff = 0.0) +
ggplot2::coord_cartesian(xlim = c(-3, 3),
ylim = c(0, 4))
ggplot2::ggsave(glue::glue("{tissue}_{sex}/{tissue}_{sex}_volcano.pdf"),
plot = volcano,
device = NULL,
width = 11,
height = 8.5)
# HTML report -------------------------------------------------------------
print(glue::glue("Saving html report of {sex} {tissue} samples"))
DEGs <- DEGs %>%
dplyr::filter(P.Value < 0.05) %T>%
openxlsx::write.xlsx(file = glue::glue("{tissue}_{sex}/{tissue}_{sex}_filtered_DEGs.xlsx"))
DEGs %>%
dplyr::rename(Gene = SYMBOL,
"p-value" = P.Value,
"adjusted p-value" = adj.P.Val,
Description = GENENAME,
ensembl = ensgene) %>%
gt() %>%
tab_header(
title = glue::glue("{nrow(DEGs)} Differentially Expressed Genes"),
subtitle = glue::glue("{round(sum(DEGs$logFC > 0) / nrow(DEGs), digits = 2)*100}% up-regulated, \\
{round(sum(DEGs$logFC < 0) / nrow(DEGs), digits = 2)*100}% down-regulated")) %>%
fmt_number(
columns = vars("FC", "logFC", "AveExpr", "t", "B"),
decimals = 2) %>%
fmt_scientific(
columns = vars("p-value", "adjusted p-value"),
decimals = 2) %>%
as_raw_html(inline_css = TRUE) %>%
write(glue::glue("{tissue}_{sex}/{tissue}_{sex}_DEGs.html"))
# Heatmap -----------------------------------------------------------------
print(glue::glue("Plotting heatmap of {sex} {tissue} samples"))
voomLogCPM$E[which(rownames(voomLogCPM$E) %in% DEGs$ensgene),] %>%
as.matrix() %>%
pheatmap::pheatmap(.,
scale = "row",
annotation_col = designMatrix %>%
tibble::column_to_rownames(var = "Name") %>%
dplyr::select(Treatment = group, Litter),
color = RColorBrewer::brewer.pal(11, name = "RdBu") %>%
rev(),
show_colnames = FALSE,
show_rownames = F,
#angle_col = 45,
border_color = "grey",
main = glue::glue("Z-Scores of {nrow(DEGs)} Differentially Expressed Genes"),
fontsize = 16,
filename = glue::glue("{tissue}_{sex}/{tissue}_{sex}_heatmap.pdf"),
width = 11,
height = 8.5,
annotation_colors = list(Treatment = c("PCB" = "#F8766D",
"Control" = "#619CFF")))
# Ontologies and Pathways -------------------------------------------------
print(glue::glue("Performing GO and pathway analysis of {sex} {tissue} samples"))
tryCatch({
DEGs %>%
dplyr::select(SYMBOL) %>%
purrr::flatten() %>%
enrichR::enrichr(c("GO_Biological_Process_2018",
"GO_Cellular_Component_2018",
"GO_Molecular_Function_2018",
"KEGG_2019_Mouse",
"Panther_2016",
"Reactome_2016",
"RNA-Seq_Disease_Gene_and_Drug_Signatures_from_GEO")) %T>% # %>%
#purrr::map(~ dplyr::filter(., Adjusted.P.value < 0.05)) %>%
#purrr::map(~ dplyr::filter(., stringr::str_detect(Genes, ";"))) %>%
openxlsx::write.xlsx(file = glue::glue("{tissue}_{sex}/{tissue}_{sex}_enrichr.xlsx")) %>%
DMRichR::slimGO(tool = "enrichR",
annoDb = "org.Mm.eg.db",
plots = FALSE) %T>%
openxlsx::write.xlsx(file = glue::glue("{tissue}_{sex}/{tissue}_{sex}_rrvgo_enrichr.xlsx")) %>%
DMRichR::GOplot() %>%
ggplot2::ggsave(glue::glue("{tissue}_{sex}/{tissue}_{sex}_enrichr_plot.pdf"),
plot = .,
device = NULL,
height = 8.5,
width = 10) },
error = function(error_condition) {
print(glue::glue("ERROR: Gene Ontology pipe didn't finish for {sex} {tissue}"))
})
print(glue::glue("The pipeline has finished for {sex} {tissue} samples"))
})
sink()
|
ac607c1ae2ef2c2d97078b0b0d1da6f8f392c87e | ee54a85e446285fcc568caffe2dab934089e12e2 | /xp11/03_lme4_models_xp11.R | bcc00dd5f53b200f904f6b81238479bdcf90c1ef | [
"CC0-1.0"
] | permissive | bricebeffara/rwa_evaluative_conditioning_data_analysis | e8697aee9cb7097c19ec9207a092d547658246e2 | 81c35c84148f9579b68624e32c0efc0dbad43e3c | refs/heads/main | 2023-06-03T01:15:34.778559 | 2021-06-17T09:55:12 | 2021-06-17T09:55:12 | 375,408,897 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 6,006 | r | 03_lme4_models_xp11.R | # File name: brms_models_xp11.R
# Online archive: gitlab
# Authors: Brice Beffara & Amélie Bret
# Tue Jul 03 14:24:51 2018 ------------------------------
# Contact: brice.beffara@slowpen.science amelie.bret@univ-grenoble-alpes.fr http://slowpen.science
#
# This R script was used to build and compute brms models
# corresponding to the 10th experiment of Amelie Bret's doctoral work
#
# This R script defines and computes brms models
# main effects, interaction effects, and simple slopes of interest
#
# 3 posependent variables of interest :
# RWA (continuous, centered and scaled)
# usvalence : positive (0.5) vs. negative (-0.5)
# and warning : no warn (-0.5) vs. warn (0.5)
#
# and 1 ordinal dependent variables :
# Ratings of Greebles from 1 (very negative) to 9 (very positive)
#
# This program is believed to be free of errors, but it comes with no guarantee!
# The user bears all responsibility for interpreting the results.
#
# This preambule is largely inspired by John K. Kruschke's work at https://osf.io/wp2ry/
#
### To run this program, please do the following:
### 1. Install the general-purpose programming language R from
### http://www.r-project.org/
### Install the version of R appropriate for your computer's operating
### system (Wposows, MacOS, or Linux).
### 2. Install the R editor, RStudio, from
### http://rstudio.org/
### This editor is not necessary, but highly recommended.
### 3. After the above actions are accomplished, this program should
### run as-is in R. You may "source" it to run the whole thing at once,
### or, preferably, run lines consecutively from the beginning.
################################################################################
# Loading packages needed (and installing if necessary) for this part
p_load(lme4, # main package for models
htmlTable, # helps to extract results
xtable,
install = TRUE,
gridExtra,
sjstats,
sjmisc,
update = getOption("pac_update"),
character.only = FALSE)
# In case we want to save summaries
col2keep <- c("Estimate", "l-95% CI", "u-95% CI")
#------------------------------------------------------------------------------------
# We run our first model for fixed main and interaction effects
#------------------------------------------------------------------------------------
# model
warn_resp_lme4 <- lmer(response ~ usvalence * warn * RWAscore + (1|ppt) + (1|stim1),
data = warn_df)
# Save summary & confint
model_gen_xp11_lme4 <- round(cbind(summary(warn_resp_lme4)$coefficients,
confint(warn_resp_lme4)[c(4:11),]), 2)
# export output
png("tables/lme4/model_gen_xp11_lme4.png", height=480, width=720)
p<-tableGrob(model_gen_xp11_lme4)
grid.arrange(p)
dev.off()
#------------------------------------------------------------------------------------
# Then we run our second step models to decompose interactions effects
# We look at the interaction between RWA and usvalence at each level of conditioning
#------------------------------------------------------------------------------------
#-------------
### RWA * valence in the !!no warning!! condition
#-------------
# model
warn_resp_nowa_lme4 <- lmer(response ~ usvalence * no_warn * RWAscore + (1|ppt) + (1|stim1),
data = warn_df)
# Save summary & confint
model_nowa_xp11_lme4 <- round(cbind(summary(warn_resp_nowa_lme4)$coefficients,
confint(warn_resp_nowa_lme4)[c(4:11),]), 2)
# export output
png("tables/lme4/model_nowa_xp11_lme4.png", height=480, width=720)
p<-tableGrob(model_nowa_xp11_lme4)
grid.arrange(p)
dev.off()
#-------------
### RWA * usvalence in the !!warning!! condition
#-------------
# model
warn_resp_yewa_lme4 <- lmer(response ~ usvalence * ye_warn * RWAscore + (1|ppt) + (1|stim1),
data = warn_df)
# Save summary & confint
model_yewa_xp11_lme4 <- round(cbind(summary(warn_resp_yewa_lme4)$coefficients,
confint(warn_resp_yewa_lme4)[c(4:11),]), 2)
# export output
png("tables/lme4/model_yewa_xp11_lme4.png", height=480, width=720)
p<-tableGrob(model_yewa_xp11_lme4)
grid.arrange(p)
dev.off()
#------------------------------------------------------------------------------------
# Then we run our third step model to decompose the interaction
# between RWA and usvalence in the no warning conditioning condition
# The interaction slope between RWA and usvalence includes 0 in the warning condition
#------------------------------------------------------------------------------------
#############################
##################### no warning
#############################
#-------------
### Simple slope of RWA in the !!negative!! valence & !!no warning!! condition
#-------------
#model
warn_resp_nowa_neg_lme4 <- lmer(response ~ usvalence_neg * no_warn * RWAscore + (1|ppt) + (1|stim1),
data = warn_df)
# Save summary & confint
model_nowa_neg_xp11_lme4 <- round(cbind(summary(warn_resp_nowa_neg_lme4)$coefficients,
confint(warn_resp_nowa_neg_lme4)[c(4:11),]), 2)
# export output
png("tables/lme4/model_nowa_neg_xp11_lme4.png", height=480, width=720)
p<-tableGrob(model_nowa_neg_xp11_lme4)
grid.arrange(p)
dev.off()
#-------------
### Simple slope of RWA in the !!positive!! valence & !!no warning!! condition
#-------------
#model
warn_resp_nowa_pos_lme4 <- lmer(response ~ usvalence_pos * no_warn * RWAscore + (1|ppt) + (1|stim1),
data = warn_df)
# Save summary & confint
model_nowa_pos_xp11_lme4 <- round(cbind(summary(warn_resp_nowa_pos_lme4)$coefficients,
confint(warn_resp_nowa_pos_lme4)[c(4:11),]), 2)
# export output
png("tables/lme4/model_nowa_pos_xp11_lme4.png", height=480, width=720)
p<-tableGrob(model_nowa_pos_xp11_lme4)
grid.arrange(p)
dev.off()
|
c26bcf647efddad8c4dee524523fa5d8652b3d59 | 9aafde089eb3d8bba05aec912e61fbd9fb84bd49 | /codeml_files/newick_trees_processed/3710_0/rinput.R | 8dc09fe6a3d6abe05808bd3269dee7312c1710b4 | [] | no_license | DaniBoo/cyanobacteria_project | 6a816bb0ccf285842b61bfd3612c176f5877a1fb | be08ff723284b0c38f9c758d3e250c664bbfbf3b | refs/heads/master | 2021-01-25T05:28:00.686474 | 2013-03-23T15:09:39 | 2013-03-23T15:09:39 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 135 | r | rinput.R | library(ape)
testtree <- read.tree("3710_0.txt")
unrooted_tr <- unroot(testtree)
write.tree(unrooted_tr, file="3710_0_unrooted.txt") |
df93f7eb0a1738ba9026c6fe3d67f295743bf4a1 | 4dcbd7bde2c131cb0d3c96990eb20df6e9ea22ed | /man/sub.obj.Rd | 24a5d3ea1079c59842b887720ed7a3145a06c48b | [] | no_license | LindaLuck/VoxR | e3428d0967a2d5b5b3470cdfe85df92d75e7dfb9 | 4ad74f91aa421881120fc387137861d57c92f790 | refs/heads/master | 2022-12-24T02:44:23.030784 | 2020-09-30T07:50:08 | 2020-09-30T07:50:08 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 242 | rd | sub.obj.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/subobj.R
\name{sub.obj}
\alias{sub.obj}
\title{Deprecated function}
\usage{
sub.obj(...)
}
\arguments{
\item{...}{parameters}
}
\description{
Deprecated function
}
|
42472f2d46987657ab4c973e7cfdc5a3450e836d | e434c57f4ca421735aaae1046f67c916e6b91bd4 | /man/captioner-package.Rd | 45bf6b77145d5107d62e7a5812a30c08ec7a8ac1 | [] | no_license | achetverikov/captioner | ac03bafe1334744cb2152bb93de1eb13ed084203 | ba2b2ceae048d8deafdf77d25752dab8cce81633 | refs/heads/master | 2021-05-30T02:48:12.645510 | 2015-02-10T22:52:00 | 2015-02-10T22:52:00 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 353 | rd | captioner-package.Rd | % Generated by roxygen2 (4.0.2): do not edit by hand
\docType{package}
\name{captioner-package}
\alias{captioner-package}
\title{captioner: A package for numbering figures and generating captions}
\description{
Contains the function \code{\link{captioner}} for generating numbered captions.
}
\author{
Alathea D Letaw, \email{alathea@zoology.ubc.ca}
}
|
a03d090f5041f16e66b5ad9ab6462396605118c9 | 7ae3533e4a28daa4f27f8a36ad71bb0a03cab159 | /MNN_clust.R | bfba82f3231c4c572947990e0d3884109993812e | [
"Apache-2.0"
] | permissive | Bionett/merge_ds_norm | 75dc51b92e6b718da7360e2c7b82e606457bcd52 | ab8eb23983e2280d5fabf5449949a8e854f10316 | refs/heads/master | 2020-06-04T01:49:13.565069 | 2019-06-13T19:37:34 | 2019-06-13T19:37:34 | 191,821,847 | 0 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,291 | r | MNN_clust.R | # unsupervised clustering methods
# semiunsupervised method
## Matching Mutual Nearest Neighbors
#BiocManager::install("scran")
library(scran)
library(data.table)
library(dplyr)
library(scater)
file2esets <- function(esetfile, inputfolder, sampattern, metafile, batch) {
eset <- fread(paste0(inputfolder, esetfile))
meta <- fread(paste0(inputfolder, metafile))
bs <- unique(meta[[batch]])
setNames(
lapply(bs, function(b) {
bsampls <- dplyr::filter(meta, batch == b)[["Sample_geo_accession"]]
select(eset, matches(paste(c("Symbol", bsampls), collapse = "|")))
}), bs)
}
#sapply(file2esets(fs[1], inputfolder, sampattern, metafile, batch), dim)
eset2obj <- function(eset, sampattern) {
emat <- data.matrix(select(eset, matches(sampattern)))
rownames(emat) <- eset$Symbol
sce <- SingleCellExperiment(list(counts = emat, logcounts = emat))
sce
}
do_mnn_corr <- function(esetfiles, inputfolder, sampattern, metafile, batch, ncores) {
#mclapply(esetfiles, function(esetfile) {
lapply(esetfiles, function(esetfile) {
if(esetfile %in% c("blood_sjia_pjia_UPC_eset_sjia_sex_difF_REM_deg.tsv",
"blood_sjia_pjia_yugene_eset_sjia_sex_difF_REM_deg.tsv",
"blood_sjia_pjia_zscore_eset_sjia_sex_difF_REM_deg.tsv")) {
NULL
} else {
print(esetfile)
# get merged eset file and split it in several tables based on the GSE accessions
print("starting...")
esets <- file2esets(esetfile, inputfolder, sampattern, metafile, batch)
print("esetfile splitted...")
# from tables to SingleCellExpression objects, deprecating the GSE58667 that contains just 4 samples
print(names(esets))
esetobjs <- lapply(esets[c("GSE55319", "GSE58667", "GSE80060_2011-03", "GSE80060", "GSE17590")],
function(e) eset2obj(e, sampattern))
# MNN batch correction
set.seed(224033911)
print("correcting...")
mnn.out <- do.call(fastMNN, c(esetobjs, list(k = 10, d = 50, approximate = TRUE, auto.order=TRUE)))
print("corrected...")
# Assembling original datset
omat <- Reduce(cbind, lapply(esetobjs, function(eo) logcounts(eo)))
# writing MNN corrected matrix
print("writing MNN corrected eset...")
cor.exp <- tcrossprod(mnn.out$rotation, mnn.out$corrected)
colnames(cor.exp) <- colnames(omat)
cor.exp <- cbind(data.frame("Symbol" = rownames(omat)), cor.exp)
write.table(cor.exp, paste0("../result/MNN_corrected_", gsub("\\.tsv", "", esetfile), ".tsv"),
sep = "\t", quote = FALSE, row.names = FALSE)
print("visualizing...")
# visualize cluster differences among the original and corrected datasets through tSNE plots
sce <- SingleCellExperiment(list(counts = omat, logcounts = omat))
reducedDim(sce, "MNN") <- mnn.out$corrected
sce$Batch <- as.character(mnn.out$batch)
# Including disease metadata
msce <- data.frame("Sample_geo_accession" = rownames(sce@colData))
# Reference metadata
meta <- read.delim(paste0(inputfolder, metafile))
rownames(meta) <- meta$Sample_geo_accession
msce <- merge(msce, meta, by = "Sample_geo_accession")
sce$Disease <- as.character(msce$Characteristics..disease.)
sce$scandate <- as.character(msce$scandate_yymm)
set.seed(100)
# Using irlba to set up the t-SNE, for speed.
# visualization of the original data
osce <- runPCA(sce, ntop=Inf, method="irlba")
osce <- runTSNE(osce, use_dimred="PCA")
ot <- plotTSNE(osce, colour_by="Batch") + ggtitle("Original")
dot <- plotTSNE(osce, colour_by="Disease") + ggtitle("Original")
sdot <- plotTSNE(osce, colour_by="scandate") + ggtitle("scandate")
# Visualizartion of the MNN transformed data
set.seed(100)
csce <- runTSNE(sce, use_dimred="MNN")
ct <- plotTSNE(csce, colour_by="Batch") + ggtitle("Corrected")
dct <- plotTSNE(csce, colour_by="Disease") + ggtitle("Corrected")
sdct <- plotTSNE(csce, colour_by="scandate") + ggtitle("scandate")
# Clustering
# The aim is to use the SNN graph to perform clustering of cells
# via community detection algorithms in the igraph package
snn.gr <- buildSNNGraph(sce, use.dimred = "MNN")
clusters <- igraph::cluster_walktrap(snn.gr)
table(clusters$membership, sce$Batch)
csce$Cluster <- factor(clusters$membership)
# Ploting cluster on tSNE plot
cls <- plotTSNE(csce, colour_by="Cluster")
# Ploting batch, disease and ad-hoc clustering on the original and MNN transformed data
pdf(paste0("../result/tSNE_MNN_corrected_", gsub("\\.tsv", "", esetfile), ".pdf"), width = 15, height = 15)
multiplot(ot, ct, sdot, cls, dot, dct, sdct, cls, cols=2)
dev.off()
#}, mc.cores = ncores)
}
})
}
# Inputs
inputfolder <- "../data/"
esetpattern <- "^blood_sjia_pjia_"
sampattern <- "GSM"
metafile <- "metadata_blood_sjia_pjia_normal_samples.tsv"
batch <- "batch" #"GSE_accession_wfound"
esetfiles <- list.files(path = inputfolder, pattern = esetpattern)
ncores <- 3
do_mnn_corr(esetfiles[1:4], inputfolder, sampattern, metafile, batch, ncores)
|
f66779944da7364d3f5852572d01f42124fee998 | a0473642bce536cc8960ffbb3dea88ff32cb75ec | /server.R | 48be564253281b7d33d6af1134698ede0af7b8bf | [] | no_license | j11011/j11011.github.io | 494ca020bc3163702f98f8cd05148b578714e2ec | 4fd68de3e60facc2278328a3011dac7599a239a9 | refs/heads/master | 2020-03-12T03:50:39.726827 | 2018-04-28T02:34:24 | 2018-04-28T02:34:24 | 130,432,646 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,461 | r | server.R | #
# This is the server logic of a Shiny web application. You can run the
# application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(shiny)
library(shinyjs)
library(scales)
bsearch=function(nMin,nMax,nTarget){
nTarget=round(nTarget)
text1=paste("Search from",nMin,"to",nMax,"the number",nTarget,":\n")
text2=""
arr=c(nMin:nMax)
arr2=arr
while(1){
if(nTarget>nMax | nTarget<nMin){
text1 = "Number not in array"
break()
}
search1=round(mean(arr2))
if(search1==nTarget){
#print(paste("Encontre",nTarget))
text2=paste("Found",nTarget)
text1=paste(text1,text2,"\n",sep="")
break
}else if(search1>nTarget){
nMax=search1-1
arr2=c(nMin:nMax)
#print(paste(search1,"es mayor a", nTarget ,"ahora buscare de",nMin,"a",search1))
text2=paste(search1,"is greater than", nTarget ,"now I will look from",nMin,"to",nMax)
}else if(search1<nTarget){
nMin=search1+1
arr2=c(nMin:nMax)
#print(paste(search1,"es menor a", nTarget ,"ahora buscare de",search1,"a",nMax))
text2=paste(search1,"is less than", nTarget ,"now I will look from",nMin,"to",nMax)
}
text1=paste(text1,text2,"\n",sep="")
}
return(text1)
}
bubble_ord=function(arr){
text1=""
#print(any(is.na(arr)))
if(any(is.na(arr))==F){
for(i in (2:length(arr))){
ii=i-1
for(j in c(1:(length(arr)-ii))){
if(arr[j]>arr[j+1]){
aux=arr[j]
arr[j]=arr[j+1]
arr[j+1]=aux
}
text1=paste(text1,paste(arr,collapse=","),"\n")
}
}
}else{
text1="The vector is wrong"
}
return(text1)
}
qs<-function(vec,start=1,finish=length(vec),text1="") {
if(length(vec)>1){
if (finish>start) {
pivot<-vec[start]
N<-length(vec)
window<-((1:N)>=start) & ((1:N)<=finish)
low_part<-vec[(vec<pivot) & window]
mid_part<-vec[(vec==pivot) & window]
high_part<-vec[(vec>pivot) & window]
if (start>1) text1=paste(text1,paste(vec[1:(start-1)],collapse = " "),"| ")
text1=paste(text1,paste(low_part,collapse = " "),">>>",
paste(mid_part,collapse = " "),"<<<",paste(high_part,collapse = " "))
#cat(low_part,">>>",mid_part,"<<<",high_part)
if (finish<N) text1=paste(text1," |",paste(vec[(finish+1):N],collapse = " "))
text1=paste(text1,"\n")
vec[window]<-c(low_part,mid_part,high_part)
if (length(low_part)>0) {
low_top<-start+length(low_part)-1
l_res<-qs(vec,start,low_top,text1=text1)
text1=l_res[[2]]
vec[start:low_top]=l_res[[1]][start:low_top]
}
if (length(high_part)>0) {
high_bottom<-finish-length(high_part)+1
l_res<-qs(vec,high_bottom,finish,text1=text1)
text1=l_res[[2]]
vec[high_bottom:finish]=l_res[[1]][high_bottom:finish]
}
}
}else{
vec=c(0)
text1=""
}
return(list(vec,text1))
}
matrix_frame=function(n){
if(n>1){
text=" 1"
for(i in (2:n)){
text=paste0(text,",",toString(i))
}
text=paste0(text,"\n")
for(i in (1:n)){
text=paste0(text,toString(i)," ")
for(i2 in (1:(n-1))){
text=paste0(text,",")
#cat(text,"\n")
}
text=paste0(text,"\n")
}
}else{
text="Error!"
}
return(text)
}
borrarFirstn=function(n,text1){
for(i in (1:n)){
text1=gsub(paste0("\n",toString(i)),"\n",text1)
}
text1=gsub(" \n",",\n",text1)
con <- textConnection(text1)
data <- read.csv(con)
#cat(text1)
colnames(data)=c(1:ncol(data))
return(data)
}
# Define server logic required to draw a histogram
shinyServer(function(input, output) {
v <- reactiveValues(df_t= "0",df=data.frame())
#binary
observeEvent(input$buscar1,{
nmin=isolate(as.numeric(input$min1))
nmax=isolate(as.numeric(input$max1))
ntarget=isolate(as.numeric(input$target1))
output$text1 <- renderText({bsearch(nmin,nmax,ntarget)})
})
#bubble
observeEvent(input$ord2,{
arr2=as.numeric(unlist(strsplit(input$arr2, split=",")))
output$text2 <- renderText({bubble_ord(arr2)})
})
#quicksort
observeEvent(input$ord3,{
text1=""
arr2=as.numeric(unlist(strsplit(input$arr3, split=",")))
output$text3 <- renderText({qs(arr2,text1=text1)[[2]]})
})
})
|
54e5b17686b11c5cb33b6c80ebf9b8240fb79cd3 | 247946f5456e093a7fe49f57e722477ac9dc010e | /man/ezumap.Rd | d1cbce51e9456b2404e26cde5ad7d0ec68cd7b25 | [
"MIT"
] | permissive | jdreyf/jdcbioinfo | b718d7e53f28dc15154d3a62b67075e84fbfa59b | 1ce08be2c56688e8b3529227e166ee7f3f514613 | refs/heads/master | 2023-08-17T20:50:23.623546 | 2023-08-03T12:19:28 | 2023-08-03T12:19:28 | 208,874,588 | 3 | 1 | null | null | null | null | UTF-8 | R | false | true | 3,099 | rd | ezumap.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ezumap.R
\name{ezumap}
\alias{ezumap}
\title{UMAP plot of first two dimensions}
\usage{
ezumap(
object,
pheno.df,
name = "umap",
pca = TRUE,
initial_dims = nrow(pheno.df),
config = umap::umap.defaults,
method = c("naive", "umap-learn"),
preserve.seed = TRUE,
alpha = 1,
all.size = NULL,
facet = NULL,
title = NULL,
subtitle = NULL,
rm.leg.title = FALSE,
labels = FALSE,
manual.color = NULL,
manual.shape = NULL,
plot = TRUE,
...
)
}
\arguments{
\item{object}{Matrix-like object with features (e.g. genes) as rows and samples as columns.}
\item{pheno.df}{Data frame with rows as samples and columns as phenotypes.}
\item{name}{Name of file to create. Set to \code{NA} to plot to screen instead of to file.}
\item{pca}{logical; Whether an initial PCA step should be performed (default: TRUE)}
\item{initial_dims}{integer; the number of dimensions that should be retained in the initial PCA step (default: 50)}
\item{config}{object of class umap.config}
\item{method}{character, implementation. Available methods are 'naive'
(an implementation written in pure R) and 'umap-learn' (requires python
package 'umap-learn')}
\item{preserve.seed}{logical, leave TRUE to insulate external code from
randomness within the umap algorithms; set FALSE to allow randomness used
in umap algorithms to alter the external random-number generator}
\item{alpha}{Transparency, passed to \code{\link[ggplot2]{geom_point}}.}
\item{all.size}{Passed to \code{\link[ggplot2]{geom_point}} \code{size} parameter to give size for all points without
appearing in legend. \code{ggplot2} default is size=2.}
\item{facet}{A formula with columns in \code{pheno.df} to facet by.}
\item{title}{Title text; suppressed if it is \code{NULL}.}
\item{subtitle}{Subtitle text; suppressed if it is \code{NULL} or \code{title} is \code{NULL}. If you'd like a
\code{subtitle} but no \code{title}, set \code{title = ""}.}
\item{rm.leg.title}{Logical indicating if legend title should be removed.}
\item{labels}{Logical, should sample labels be added next to points?}
\item{manual.color}{Vector passed to \code{\link[ggplot2:scale_manual]{scale_colour_manual}} for creating a
discrete color scale. Vector length should be equal to number of levels in mapped variable.}
\item{manual.shape}{Vector passed to \code{\link[ggplot2:scale_manual]{scale_shape_manual}} for creating a
discrete color scale. Vector length should be equal to number of levels in mapped variable.}
\item{plot}{Logical; should plot be generated?}
\item{...}{list of settings with values overwrite defaults from UMAP \code{config} or passed to \code{\link[ggplot2:aes_]{aes_string}}.}
}
\value{
Invisibly, a \code{ggplot} object. Its \code{data} element contains the first two principal components
appended to \code{pheno.df}.
}
\description{
UMAP plot of first two dimensions using \pkg{ggplot2}.
}
\details{
\code{object} must have colnames, and if \code{pheno.df}
is given, it is checked that \code{colnames(object)==rownames(pheno.df)}.
}
|
cdb4fd31f542405371b2519a8bbb5af44998a439 | cc42b601ff02cf5b62007caef366249d0c430898 | /misc/std12weeks.R | e8b5402a9885f6819568e62eb2b25063993b2603 | [] | no_license | tkthomas27/r_code | a10bfa3f687b7dc002679571c8e23f2740345c6f | e6c15544e29942d47c94bfda497cf1ade47035fa | refs/heads/master | 2020-04-05T11:44:52.349271 | 2018-07-06T19:35:42 | 2018-07-06T19:35:42 | 81,157,428 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,839 | r | std12weeks.R | # *------------------------------------------------------------------
# | PROGRAM NAME: 12 week standard deviation
# | DATE: 2017-12-07
# | CREATED BY: kyle thomas
# *----------------------------------------------------------------
#set working directory
setwd("/Volumes/tsandino_gomobile_project/Go Mobile/Paper_Daily Incentives_Select Stores/C. Stata files")
# load libraries
library(tidyverse) #data processing
library(xts) # time series
library(zoo) # time series
library(padr) # adding missing days if desired
# *----------------------------------------------------------------
# initialize data
# *----------------------------------------------------------------
# read in data
df <- read_csv("sales_variability_r.csv")
# format date
df$date <- as.Date(df$date, "%d %b %y")
#split data by store
dfs <- split(df,df$store)
# *----------------------------------------------------------------
# calculate rolling 84 day weekly standard deviation
# *----------------------------------------------------------------
# create empty dataframe to store the results
store_results <- data.frame(date=NA, store=NA, sales = NA, std_12=NA)
# loop through each store
for(z in 1:35){
dfx <- dfs[[z]] #load select store
#start with indices 1-84 and increment by 1 until none is left
i<-1
j<-84
# create empty dataframe to store standard deviation results
date = as.Date("2000-01-01")
std_12 = 1111
std_results = data.frame(date,std_12)
# if we want, we can pad out missing days
#dfx <- pad(dfx) %>% replace_na(list(sales = 0))
# roll forward 84 day window by 1 and compute standard deviation on weekly basis
while(j<nrow(dfx)){
#subset for 84 days
df1 <- dfx[i:j,]
#make into a time series object
df2 <- xts(df1[,3], order.by = df1$date)
#down sample to weekly by summing sales amounts
weeks <- period.apply(df2, INDEX = endpoints(df2, on="days", k=7), FUN = sum)
#compute standard deviation with anti-Bessel's correction
std_12_week <- sqrt(sd(weeks$sales)^2 * (11/12))
#add to stored results
std_results <- rbind(std_results, c(as.character(index(df2[84])), round(std_12_week,2)))
#increment indices
i<-i+1
j<-j+1
}
#match store's std results to original data series; this may not be necessary
df3 <- inner_join(dfx, std_results)
#add to overall results file
store_results <- rbind(store_results, df3)
}
# *----------------------------------------------------------------
# store results
# *----------------------------------------------------------------
# drop blank first row
store_results <- store_results[2:nrow(store_results),]
# convert dates
store_results$date <- as.Date(store_results$date)
# write to csv
write_csv(store_results,"store_results.csv")
|
9e680f7cd533fc3a971d4e609d25bf112cc7ae21 | 41503f4dbd79d3713c118ee79411b17b9025bfbf | /05-Famlia Apply.R | 759243bb21b7d1b9fe9d7230ada3e8dfe705fe5a | [] | no_license | tcoliveira/R_Cap03 | f795853e54ffd1bc2f0f6b30c58cb2062bb42aae | e3f8bebdda21181300624c70edd30501073eff67 | refs/heads/master | 2020-03-22T20:19:46.906283 | 2018-09-21T15:20:15 | 2018-09-21T15:20:15 | 140,590,377 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,616 | r | 05-Famlia Apply.R | ##Vetores = cadeia ordenadas de elementos
#Loops são ineficientes no R, pode ser a familia apply
#Apply, aplica uma função a todas as linhas / colunas de uma matriz / df
#lapply, retorna nova lista / sapply
#tapply
#Usando loop
lista1 <- list(a = (1:10), b = (45:77))
?sapply
sapply(lista1, mean) ###objeto que quero percorrer, função. sapply é um loop
x <- matrix(rnorm(9), nr = 3, byrow = T)
x
apply(x, 1, mean)
apply(x, 2, mean)
apply(x, 1, plot)
resultapply <- apply(x, 1, mean)
resultapply
escola <- data.frame(Aluno = c("Allan", "Alice", "Aline", "Alana", "Alex", "Adovaldo"),
Matematica = c(90,80,85,87,56,79),
Geografia = c(88,81,85,20,21,30),
Quimca = c(78,60,74,60,51,90))
escola
escola$Geografia
#Calculando a media por aluo
escola$Media = NA
escola
escola$Media = apply(escola[,c(2,3,4)],1,mean)
escola
escola$Media = round(escola$Media)
escola
##tapply() sqldf
install.packages('sqldf')
require(sqldf)
escola2 <- data.frame(Aluno = c("Allan", "Alice", "Aline", "Alana", "Alex", "Adovaldo"),
Semestre = c(1,1,1,2,2,2),
Matematica = c(90,80,85,87,56,79),
Geografia = c(88,81,85,20,21,30),
Quimca = c(78,60,74,60,51,90))
escola2
sqldf("select aluno, sum(Matematica), sum(Geografia), sum(Quimca) from escola2 group by aluno")
tapply(c(escola2$Matematica), escola2$Aluno, sum)
?by
#lapply()
?lapply
lista1<- list(a = (1:10), b = (45:77))
lista1
lapply(lista1, sum)
sapply(lista1, sum)
#vapply()
vapply(lista1, fivenum, c(Min. =0, "1st"=0))
|
0ed1ba056f91ea94545cfe433af5cfa75051c393 | c80d6dc4eb6a079fef9369095c1ac1bd0b8301d3 | /R/lpUtil.R | 92a1bbf2b517ff5acc410099663d6b8e458e0c6e | [] | no_license | pyrovski/MPItrace | 5b674228277c4af666476fd584db2966e7cea16b | 0645397426a5251becdd97c1c2474630501eb499 | refs/heads/master | 2020-05-17T03:16:34.837344 | 2015-03-19T01:16:25 | 2015-03-19T01:16:25 | 32,493,346 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 27,084 | r | lpUtil.R | #!/usr/bin/env Rscript
require('igraph')
require('rjson')
require('data.table')
require('parallel')
source('./util.R')
source('./read.R')
source('~/local/bin/pbutils.R')
ilpFileTypes = c('duration', 'edges')
fixedLPFileTypes = c('duration', 'edges')
.solveLP = function(entrySpaceRow, nodePowerLimits, forceLocal=F, fixedOnly=T, roundMode='step'){
cuts =
sub('.edges.csv','',
Sys.glob(paste(gsub('[/. ]', '_', entrySpaceRow$key),
'*.edges.csv', sep='')))
ilpCuts = grep('ILP', cuts, v=T)
fixedCuts = setdiff(cuts, ilpCuts)
rm(cuts)
mclapply(fixedCuts, function(cut){
mclapply(nodePowerLimits, function(pl) {
edgesFile = paste(cut, '.p', format(pl, nsmall=1), 'w.edges', sep='')
cutEdges = paste(cut, '.edges.csv', sep='')
if(file.exists(edgesFile) && file.info(edgesFile)$mtime > file.info(cutEdges)$mtime){
cat(edgesFile, 'exists\n')
return(NULL)
}
command =
paste('prefix=', cut, ' powerLimit=', pl*entrySpaceRow$ranks,
if(forceLocal) ' FORCELOCAL=1',
' roundMode=', roundMode,
' ./fixed.sh', sep='')
print(command)
system(command, intern=T)
})
})
}
solveLP = function(...){
load('../mergedEntries.Rsave')
mcrowApply(entrySpace, .solveLP, ...)
}
readLP = function(filename){
a = fromJSON(file=filename)
f = function(b){
arraySel = grep('[[]', names(b))
singleSel = setdiff(1:length(b), arraySel)
if(length(arraySel)){
arrayNames = gsub('[[].*[]]', '', names(b)[arraySel])
uArrayNames = unique(arrayNames)
arrayVars = nnapply(uArrayNames, function(name){
b = b[grep(paste(name, '[[]', sep=''), names(b))]
map = strsplit(gsub('[]]', '', names(b)), '[[]')
indices = as.numeric(sapply(map, '[[', 2))
b = .rbindlist(lapply(b, as.data.table))
b$index = indices
if('Id' %in% names(b))
b[, Id := NULL]
b
})
} else
arrayVars = NULL
if(length(singleSel)){
singleVars = lapply(b[singleSel], function(e){
b = as.data.table(e)
if('Id' %in% names(b))
b[, Id := NULL]
if(ncol(b) == 1 && identical(names(b), 'Value'))
b = b[[1]]
b
})
} else
singleVars = NULL
c(arrayVars, singleVars)
}
a$Solution[[2]]$Variable = f(a$Solution[[2]]$Variable)
a$Solution[[2]]$Constraint = f(a$Solution[[2]]$Constraint)
a$Solution[[2]]$Objective = f(a$Solution[[2]]$Objective)
a
}
#!@todo adapt for flow ilp, fixed ilp
reconcileLP = function(resultFile, timesliceFile, powerLimit, mode='split'){
result = readLP(resultFile)
vertexStartTimes = result$Solution[[2]]$Variable$vertexStartTime
result$Solution[[2]]$Variable$vertexStartTime = NULL
setnames(vertexStartTimes, c('index', 'Value'), c('vertex', 'start'))
setkey(vertexStartTimes, vertex)
tryCatch(load(timesliceFile), finally=NULL)
vertices = slice[, list(vertex=union(src, dest))]
setkey(vertices, vertex)
vertices = vertexStartTimes[vertices]
rm(vertexStartTimes)
##!@todo this is incorrect; some vertices are not present in this
##!timeslice or the next; just because they're destinations doesn't
##!mean we need to assign them start times.
vertices[is.na(start), start := 0.0]
e_uids = unique(slice[, e_uid])
taskDuration = result$Solution[[2]]$Variable$taskDuration
setnames(taskDuration, c('index', 'Value'), c('e_uid', 'lpWeight'))
setkey(taskDuration, e_uid)
taskDuration = taskDuration[J(e_uids)]
taskDuration[is.na(lpWeight), lpWeight := 0]
taskPower = result$Solution[[2]]$Variable$taskPower
setnames(taskPower, c('index', 'Value'), c('e_uid', 'lpPower'))
setkey(taskPower, e_uid)
taskPower = taskPower[J(e_uids)]
setkey(taskPower, e_uid)
## these should not exist, at least for comp edges
##!@todo warn on NA power for comp edges
taskPower = taskPower[slice[,head(.SD, 1),keyby=e_uid,.SDcols=c('type')]]
if(nrow(taskPower[is.na(lpPower) & type == 'comp']) > 0){
stop('LP should provide all comp task power entries')
}
taskPower[is.na(lpPower) & type == 'comp', lpPower := 0]
taskPower[, type := NULL]
setkey(taskDuration, e_uid)
task = merge(taskDuration, taskPower, all=T)
rm(taskPower, taskDuration)
result = list()
if(mode=='keepAll'){
setkey(slice, e_uid)
setkey(task, e_uid)
##edges = slice[task]
result$edges = slice
result$lp = task
} else { ## mode != 'keepAll'
setkey(slice, e_uid, weight, power)
setkey(task, e_uid, lpWeight, lpPower)
f = function(a, b) abs(a-b) < 1e-8
edges = lapply(e_uids, function(u){
s = slice[J(u)]
if(nrow(s) == 1){
s[, frac := 1]
return(s)
}
lp = task[J(u)]
unconstrained = lp[lpWeight > .9 & (lpWeight %% 1) < .1]
if(nrow(unconstrained) > 0){
cat('unconstrained weight(s)!\n')
print(unconstrained)
}
##!@todo this can be done with multiple e_uids at once
##! this needs to be approximate
##m = s[lp, nomatch=0]
m = s[f(weight, lp$lpWeight) & f(power, lp$lpPower)]
if(nrow(m) > 0){
m[, frac := 1]
return(m)
}
##!@todo figure out how to get Pyomo to be more precise with its output
##! can re-adjust lp weight based on selected power
m = rbind(head(s[power < lp$lpPower], 1), tail(s[power > lp$lpPower], 1))
if(mode=='combined'){
## find a single config that is closest to the LP
m[, dist := sqrt(((power-lp$lpPower)/lp$lpPower)^2+((weight - lp$lpWeight)/lp$lpWeight)^2)]
m = m[which.min(dist)]
m$dist = NULL
m$frac=1
return(m)
} else if(mode == 'combinedLE'){
## find a single config that is always under the power constraint
m = m[power <= powerLimit, .SD[which.min(weight)], by=e_uid]
m$frac=1
return(m)
} else if(mode == 'split'){ ## split configs
fastFrac = (lp$lpPower - m[1, power])/diff(m[, power])
slowFrac = 1 - fastFrac
m$frac = c(fastFrac, slowFrac)
##! adjust weight by frac
m[, weight := weight * frac]
###! m should contain two rows; one for each configuration neighboring
###! the LP-selected power/performance point
return(m)
}
})
edges = .rbindlist(edges)
result$edges = edges
}
result$vertices=vertices[order(start)]
result
}
timeStr = '[0-9]+[.][0-9]+'
##!@todo save results from this function, check for newer inputs than previous result
##!@todo make sure result files are newer than csv and Rsave inputs
readCommandResults = function(command, ...){
cat(command, '\n')
resultFiles =
list.files(pattern=
paste(command, '_', timeStr, '[.]p.*w[.]results$', sep=''))
powerLimits =
unique(sub('w[.]results$', '',
sub(paste(command, '_', timeStr, '[.]p', sep=''), '', resultFiles)))
prefixes = unique(sub('[.]p.*w[.]results$', '', resultFiles))
timesliceFiles = paste(prefixes, '.Rsave', sep='')
times = sub(paste(command, '_', sep=''), '', prefixes)
f = function(powerLimit){
powerLimit = as.numeric(powerLimit)
cat(powerLimit, 'w', '\n')
resultFiles =
list.files(pattern=
paste(command, '_', timeStr, '[.]p', powerLimit, 'w[.]results$',
sep=''))
times = sub('[.]p.*w[.]results$', '',
sub(paste(command, '_', sep=''), '', resultFiles))
result = mcmapply(reconcileLP, resultFiles, timesliceFiles, powerLimit, ..., SIMPLIFY=F)
names(result) = times
result
}
nnapply(powerLimits, f)
}
lpGo = function(...){
load('../mergedEntries.Rsave', envir=.GlobalEnv)
files = list.files(pattern='.*[.]results$')
commands <<- unique(sub(paste('_', timeStr, '[.]p.*w[.]results', sep=''),'',files))
nnapply(commands, readCommandResults, ...)
}
ilpGo = function(pattern='.*', powerLimitMin=0, ...){
## get all files, then filter by prefix, then by power limit and cut
load('../mergedEntries.Rsave', envir=.GlobalEnv)
cutPattern = 'cut_[0-9]+'
plPattern = 'p.*w'
pattern = paste(pattern, '.*[.]duration$', sep='')
files = list.files(pattern=pattern)
##duration'
prefixes =
unique(sub(paste('(.*)', cutPattern, plPattern, 'duration', sep='[.]'),
'\\1', files))
if(!length(prefixes)){
warning('no ILP result files!', immediate.=T)
return(NULL)
}
nnapply(prefixes, function(prefix){
fixed = length(grep('fixedLP', prefix)) > 0
files = list.files(pattern=paste(prefix, cutPattern, plPattern,
'duration$', sep='[.]'))
powerLimits =
sub('p([0-9.]+)w', '\\1',
unique(sub(paste('.*', cutPattern, paste('(', plPattern, ')', sep=''),
'duration$', sep='[.]'),
'\\1', files)))
##!@todo this needs to be modified to reformat the power limits with a trailing zero
if(powerLimitMin > 0){
powerLimitFloat = as.numeric(powerLimits)
cat('ignoring power limits: ', powerLimits[powerLimitFloat < powerLimitMin], '\n')
powerLimits = powerLimits[powerLimitFloat >= powerLimitMin]
}
cuts =
sort(as.numeric(
sub('cut_([0-9]+)', '\\1',
unique(sub(paste(prefix, paste('(',cutPattern, ')', sep=''),
plPattern, 'duration$', sep='[.]'),
'\\1', files)))))
expectedCuts = as.integer(read.table(paste(prefix, '.cuts.csv', sep=''), h=F)[[1]])
nnapply(powerLimits, function(powerLimit){
plPattern = paste('p', powerLimit, 'w', sep='')
presentCuts =
list.files(pattern=paste(prefix, cutPattern,
plPattern,
'edges$', sep='[.]'))
presentCuts = as.integer(sub('.*cut_([0-9]+).*', '\\1', presentCuts))
if(length(setdiff(expectedCuts, presentCuts))){
errMsg =
paste(prefix, '@', powerLimit, 'w:\nmissing cuts!\n',
paste(setdiff(expectedCuts, presentCuts), collapse=' '), sep='')
stop(errMsg)
}
nnapply(cuts,
function(cut){
if(fixed)
fileTypes = fixedLPFileTypes
else
fileTypes = ilpFileTypes
nnapply(fileTypes, function(fileType){
filename =
paste(prefix, paste('cut_', cut, sep=''),
paste('p', powerLimit, 'w', sep=''),
fileType, sep='.')
tryCatch(
as.data.table(
read.table(filename, h=T, sep=',', strip.white=T)),
error=function(e){
warning('failed to read ', filename, immediate.=T)
NULL
}, finally=NULL)
}
)
},
mc=T)
}
)
}
)
}
##!@todo this function assumes that we don't alter the schedule from
##!the LP. For modes other than the default, this may not be true,
##!and we need to recompute start times and slack edges.
lpMerge = function(slices, name){
edges =
.rbindlist(napply(slices, function(e, name) {
e$edges$ts = name
e$edges
}, mc=T))
vertices =
.rbindlist(napply(slices, function(e, name) {
e$vertices$ts =name
e$vertices
}, mc=T))
tsDuration = vertices[, .SD[which.max(start)], by=ts]
tsDuration[, vertex := NULL]
setnames(tsDuration, 'start', 'tsEnd')
setkey(tsDuration, ts)
tsDuration[, tsEnd := cumsum(tsEnd)]
tsDuration$tsStart = 0
tsDuration$tsStart[2:nrow(tsDuration)] = head(tsDuration[, tsEnd], -1)
setkey(vertices, ts)
vertices = vertices[tsDuration[, list(ts, tsStart)]]
vertices[, c('start', 'tsStart') := list(start + tsStart, NULL)]
setnames(vertices, 'vertex', 'src')
setkey(vertices, ts, src)
setkey(edges, ts, src)
edges = vertices[edges]
setnames(vertices, 'src', 'vertex')
## renumber vertices across timeslices
edges[, c('src', 'dest') := list(as.character(src), as.character(dest))]
edges[, ts := as.character(.GRP), by=ts]
edges[, ts := as.integer(ts)]
edges[splitDest == T, dest := paste(src, '_', ts, 's', sep='')]
edges[splitSrc == T, src := paste(src, '_', ts-1, 's', sep='')]
edges[, c('splitSrc', 'splitDest') := list(NULL, NULL)]
## just to be consistent
##edges[splitSrc==F, src := paste(src, ts, sep='_')]
##edges[splitDest==F, dest := paste(dest, ts, sep='_')]
## assign new vertices to split-config edges from each timeslice,
## rename edge uids to be unique across timeslices
if('frac' %in% names(edges))
edges = edges[order(ts, e_uid, -frac)]
else
edges = edges[order(ts, e_uid)]
edges[, second := F]
edges[, orig_e_uid := e_uid]
edges =
edges[,if(.N ==2){
e = copy(.SD)
e[1, c('dest') := list(paste(src, '.', sep=''))]
e[2, c('src', 'start', 'second') :=
list(paste(src, '.', sep=''), start + e[1, weight], T)]
e
} else {
.SD
}, by=list(e_uid, ts)]
e_uid_map = data.table(orig=edges$e_uid)
edges[, e_uid := as.character(e_uid)]
edges[, e_uid := paste(ts, e_uid, sep='_')]
edges[second == T, e_uid := paste(e_uid, '.', sep='')]
e_uid_map$new = edges$e_uid
##!@todo assign weights to slack edges
vertices = edges[, list(vertex=union(src, dest))]
setkey(vertices, vertex)
setkey(edges, src)
vertices = vertices[unique(edges[,list(src, start)])]
vertices =
rbind(vertices, data.table(vertex='2',
start=edges[dest=='2', max(start+weight)]))
pt = powerTime(edges, vertices)
plotPowerTime(pt, name=name)
return(list(edges = edges,
vertices = vertices,
pt = pt))
}
lpMergeAll = function(commands_powers){
napply(commands_powers,
function(x, name){
command = name
napply(x,
function(x, name){
powerLimit = name
lpMerge(x, name=paste(command, powerLimit))
}, mc=T)
})
}
loadAndMergeLP = function(){
results <<- lpGo()
resultsMerged <<- lpMergeAll(results)
resultsOneConf <<- lpGo(mode='combined')
resultsMergedOneConf <<- lpMergeAll(resultsOneConf)
resultsOneConfLE <<- lpGo(mode='combinedLE')
resultsMergedOneConfLE <<- lpMergeAll(resultsOneConfLE)
}
# note: this also merges fixedLP results. I'm lazy.
loadAndMergeILP = function(...){
resultsILP = ilpGo(...)
## retain only complete cuts
##!@todo get list of expected cuts, warn if any missing
f = function(x, depth){
if(depth == 1){
if(any(sapply(x, is.null)))
NULL
else
x
}
else {
result = lapply(x, f, depth-1)
result = result[!sapply(result, is.null)]
if(length(result))
result
else
NULL
}
}
resultsILP = f(resultsILP, 4)
fixed = grep('fixedLP', names(resultsILP))
ilp = setdiff(seq(length(resultsILP)), fixed)
resultsFixedLP = resultsILP[fixed]
resultsILP = resultsILP[ilp]
f = function(cuts, fileTypes)
nnapply(
fileTypes,
function(fileType)
.rbindlist(
napply(
cuts,
function(x, name){
result=x[[fileType]]
if(is.null(result)) return(result)
result[, cut:=as.numeric(name)]
result
}
)
)
)
## merge cuts
resultsILPMerged = lapply(resultsILP, lapply, f, ilpFileTypes)
resultsFixedLPMerged = lapply(resultsFixedLP, lapply, f, fixedLPFileTypes)
## propagate event times across cuts.
##!@todo collectives are numbered in order of their occurrence, but
##!e.g. MPI_Waitall()s may not be. As the cut names are vertex
##!labels, we can get the vertex ordering from mergedData.
## f = function(x){
## ### if we somehow avoid zero-length slack edges, the end time of a cut
## ### will not correspond to the start time of its last vertex
## ## place event start times within each cut
## setkey(x$duration, cut)
## x$duration[, cutEnd:=cumsum(duration)]
## x$duration[, cutStart:=c(0, head(cutEnd, -1))]
## setkey(x$events, cut, event)
## x$events = x$duration[x$events, list(event, cut, start=start+cutStart)]
## setkey(x$edges, cut, event)
## setkey(x$events, cut, event)
## x$edges = x$events[x$edges]
## ## renumber events, remove cut column
## x$activeEvents = x$edges[, list(event=unique(event)), by=cut]
## setkey(x$activeEvents, cut, event)
## x$activeEvents = x$activeEvents[, list(newEvent=.GRP-1), by=list(cut, event)]
## x$edges[x$activeEvents, c('event', 'cut') := list(newEvent, NULL)]
## x$events = x$events[x$activeEvents]
## x
## }
## # resultsILPMerged <<- lapply(resultsILPMerged, lapply, f)
assign('resultsILPMerged', resultsILPMerged, envir=.GlobalEnv)
assign('resultsFixedLPMerged', resultsFixedLPMerged, envir=.GlobalEnv)
NULL
}
accumulateCutStarts = function(x, orderedCuts){
setkey(x$duration, cut)
x$duration = x$duration[J(orderedCuts)]
x$duration$cutEnd = as.numeric(NA)
x$duration$cutStart = as.numeric(NA)
x$duration[duration != 'infeasible', cutEnd:=cumsum(duration)]
x$duration[duration != 'infeasible', cutStart:=c(0, head(cutEnd, -1))]
x$duration[duration == 'infeasible', duration := as.numeric(NA)]
x$duration$duration = as.numeric(x$duration$duration)
setkey(x$duration, cut)
setkey(x$edges, cut)
x$edges = x$edges[x$duration[!is.na(duration), list(cut, cutStart)]]
x$edges[, c('start', 'cutStart') := list(start+cutStart, NULL)]
## match NAs for message edges
x$edges[power < 1, power := as.numeric(NA)]
x
}
.writeILP_prefix = function(prefix){
origPrefix = sub('_fixedLP', '', prefix)
eReduced = new.env()
load(paste('../mergedData', origPrefix, 'Rsave', sep='.'), envir=eReduced)
ranks = unique(eReduced$reduced$assignments$rank)
## eRuntimes = new.env()
## runtimes =
## load(paste('../', head(eReduced$reduced$assignments, 1)$date,
## '/merged.Rsave',sep=''), envir=eRuntimes)
##!@todo eReduced and eRuntimes have enough info to recreate the schedule?
edges_inv = eReduced$reduced$edges_inv
setkey(edges_inv, e_uid)
vertices = eReduced$reduced$vertices
rSched = eReduced$reduced$schedule
globals = eReduced$reduced$globals
rm(eReduced)
###!@todo this should be done in loadAndMergeILP, but requires
###!knowledge of vertex order that lives in merged_.Rsave.
setkey(vertices, vertex)
orderedCuts =
vertices[J(
resultsFixedLPMerged[[prefix]][[1]]$duration$cut
)][order(start), vertex]
resultsILPMerged =
lapply(resultsILPMerged, lapply, accumulateCutStarts, orderedCuts)
resultsFixedLPMerged =
lapply(resultsFixedLPMerged, lapply, accumulateCutStarts, orderedCuts)
assign('resultsILPMerged', resultsILPMerged, envir=.GlobalEnv)
assign('resultsFixedLPMerged', resultsFixedLPMerged, envir=.GlobalEnv)
cols =
c('src', 's_uid', 'd_uid', 'dest', 'type', 'start', 'weight',
## name
'size',
## dest
## src
'tag',
'power', 'OMP_NUM_THREADS', 'cpuFreq')
vertexCols = c('vertex', 'label', 'hash', 'reqs')
.writeILP_prefix_powerLimit = function(pl){
if(!nrow(pl$edges)){
cat("powerLimit", pl$duration$powerLimit, ": no edges\n")
return(NULL)
}
setkey(pl$edges, e_uid)
## merge sched with reduced edges_inv
sched = edges_inv[pl$edges]
save(sched,
file=
paste('sched', prefix,
paste('p', pl$duration$powerLimit[1], 'w', sep=''),
'Rsave', sep='.'))
.writePowerTime = function(s, label){
write.table(powerTime(s),
file=
paste('powerTime',
prefix, label, paste('p', pl$duration$powerLimit[1], 'w', sep=''),
'dat', sep='.'),
quote=F, sep='\t', row.names=F)
}
.writePowerTime(sched, 'all')
lapply(ranks, function(r)
.writePowerTime(sched[rank == r], label=sprintf('%06d', r)))
##!@todo plot per-rank power vs time, compare power allocation nonuniformity
cols = intersect(cols, names(sched))
setkey(sched, src)
schedDest = data.table::copy(sched)
setkey(schedDest, dest)
if(nrow(schedDest[J(2)]) != length(ranks)){
stop("MPI_Finalize anomaly found in LP schedule for prefix ", prefix, "!\n")
}
.writeILP_prefix_powerLimit_rank = function(r){
## rank == dest rank for messages
compEdges =
vertices[, vertexCols, with=F][cbind(
sched[type=='comp' & rank==r,
cols, with=F],
d_rank=as.integer(NA),
s_rank=as.integer(NA),
mseq=as.numeric(NA))]
if(nrow(sched[type == 'message'])){
messageSendEdges = sched[type=='message' & s_rank==r]
messageRecvEdges = sched[type=='message' & rank==r]
messageSendEdges[, d_rank := rank]
setkey(messageRecvEdges, 'o_dest')
messageRecvEdges[, d_rank := rank]
messageSendEdges = vertices[, vertexCols,
with=F][messageSendEdges[, c(cols, 's_rank', 'd_rank'), with=F]]
messageRecvEdges = vertices[, vertexCols,
with=F][messageRecvEdges[, c(cols, 's_rank', 'd_rank', 'o_dest', 'o_d_uid'), with=F]]
messageRecvEdges[, src := NULL]
### reduceConfs produces one message edge for each send/recv
### pair, but we want a separate row for both send and recv
messageEdges =
.rbindlist(list(messageRecvEdges,
cbind(messageSendEdges, o_d_uid=as.numeric(NA))))
messageEdges[,mseq:=max(s_uid, o_d_uid, na.rm=T),by=vertex]
messageEdges[, o_d_uid := NULL]
edges =
.rbindlist(list(compEdges,
messageEdges))
rm(messageEdges)
} else
edges = compEdges
edges[, mseq:=max(mseq, s_uid, na.rm=T), by=list(vertex,type)]
edges =
edges[,
if(.N > 1){
### we should never have more than one comp edge and one message edge
### leaving a vertex
a = .SD[type=='comp']
a[, label := as.character(NA)]
a =
rbindlist(list(cbind(.SD[type=='message'], seq=1),
cbind(a, seq=2)))
### hack to handle start times from sender in recv edges.
a[, start := min(start)]
} else {
a=copy(.SD)
a[,c('label', 'hash', 'reqs'):=as.character(NA)]
cbind(rbindlist(list(.SD,a)), seq=1)
},
by=vertex]
edges[,mseq:=min(mseq),by=list(vertex)]
edges = edges[order(mseq, seq)]
##!@todo UMT is missing MPI_Finalize; WTF?
## handle finalize
edges =
.rbindlist(
list(
edges,
vertices[, vertexCols,
with=F][cbind(
schedDest[dest==2 & rank==r, cols, with=F],
d_rank=as.integer(NA), s_rank=as.integer(NA),
mseq=max(edges$mseq) + 1, seq=1)]))
edges[, c('seq', 'vertex', 's_uid') := NULL]
edges[, c('src', 'dest'):=as.integer(NA)]
edges[type == 'message',
c('src', 'dest') := list(as.integer(s_rank), as.integer(d_rank))]
edges[, c('d_rank', 'type') := NULL]
if(!'size' %in% names(edges)){
edges[, c('size', 'tag', 'comm') := as.numeric(NA)]
}
edges =
edges[, list(start,
duration=weight,
name=sapply(strsplit(label, ' '), '[[', 1),
size,
dest,
src,
tag,
comm='0x0', ##!@todo fix
hash,
flags=0, ##!@todo fix?
#pkg_w=power,
#pp0_w=0,
#dram_w=0,
reqs,##=as.character(NA), ##!@todo fix
OMP_NUM_THREADS,
cpuFreq
)]
for(col in setdiff(names(edges), c('reqs', 'name', 'comm', 'hash', 'pkg_w'))){
eCol = edges[[col]]
eCol[is.na(eCol)] = globals$MPI_UNDEFINED
edges[[col]] = eCol
}
edges[is.na(comm), comm:=0]
edges[is.na(hash), hash:='0']
#edges[is.na(pkg_w), pkg_w:=0]
edges[, cpuFreq:=as.integer(cpuFreq)]
edges[!is.na(name), duration := 0.0]
edges[, reqs:=sapply(reqs, paste, collapse=',')]
write.table(edges,
## C code uses %s.%06d.dat
file=
paste('replay', prefix,
paste('p', pl$duration$powerLimit[1], 'w', sep=''),
sprintf('%06d', r),
'dat', sep='.'),
quote=F, sep='\t', row.names=F)
}
##debug(.writeILP_prefix_powerLimit_rank)
lapply(ranks, .writeILP_prefix_powerLimit_rank)
}
mclapply(resultsFixedLPMerged[[prefix]], .writeILP_prefix_powerLimit)
NULL
}
## For each command, for each power limit, write a configuration
## schedule. This involves matching scheduled edges with edges from
## the original schedule, verifying that all edges were scheduled in
## the solution, matching edges with corresponding start vertices,
## writing vertices and edges in start order per rank, etc. We also
## require request IDs and communicator IDs. Perhaps it would be
## easier to load an existing replay schedule and add config options.
writeILPSchedules = function(){
nnapply(names(resultsFixedLPMerged), .writeILP_prefix)
}
summarizeSchedules = function(){
napply(
resultsFixedLPMerged,
function(results, name){
prefix = name
napply(
results,
function(plResults, name){
powerLimit = name
powerTime =
fread(paste('powerTime', prefix, 'all',
paste('p',
###!@todo this should agree with .writePowerTime()
as.integer(powerLimit),
'w', sep=''),
'dat', sep='.'))
## plot(stepfun(powerTime$start, c(powerTime$power,0)))
meanPower =
sum(powerTime[, diff(start) * tail(power, -1)])/tail(powerTime[, start],1)
plResults$edges[, list(duration=max(start+weight),
meanPower = meanPower,
maxPower = max(powerTime$power))]
})})
}
if(!interactive()){
## loadAndMergeLP()
loadAndMergeILP()
writeILPSchedules()
}
|
90f2d0302c2534b6006593fdd4c918ac72708988 | cec90ab7e7436b8ffabb7f5b5f38c7c7ee7904d2 | /analysis/cSTM_time_dep_simulation.R | 7f1227aae4b8dc185ee877710b49ab44cad05a96 | [
"MIT"
] | permissive | DARTH-git/cohort-modeling-tutorial-timedep | d11df3e69f3f22e46bd4880031dd1564c0846c95 | db0b7b20dbcccfd5a2676ce9e408bd19619be4b0 | refs/heads/main | 2023-07-21T04:28:22.968099 | 2023-07-13T19:00:42 | 2023-07-13T19:00:42 | 357,371,663 | 5 | 3 | null | null | null | null | UTF-8 | R | false | false | 35,682 | r | cSTM_time_dep_simulation.R | # Appendix code to Time-dependent cSTMs in R: Simulation-time dependency ----
#* This code forms the basis for the state-transition model of the tutorial:
#* 'A Tutorial on Time-Dependent Cohort State-Transition Models in R using a
#* Cost-Effectiveness Analysis Example'
#* Authors:
#* - Fernando Alarid-Escudero <falarid@stanford.edu>
#* - Eline Krijkamp
#* - Eva A. Enns
#* - Alan Yang
#* - M.G. Myriam Hunink
#* - Petros Pechlivanoglou
#* - Hawre Jalal
#* Please cite the article when using this code
#*
#* To program this tutorial we used:
#* R version 4.0.5 (2021-03-31)
#* Platform: 64-bit operating system, x64-based processor
#* Running under: Mac OS 12.2.1
#* RStudio: Version 1.4.1717 2009-2021 RStudio, Inc
#******************************************************************************#
# Description ----
#* This code implements a simulation-time-dependent Sick-Sicker cSTM model to
#* conduct a CEA of four strategies:
#* - Standard of Care (SoC): best available care for the patients with the
#* disease. This scenario reflects the natural history of the disease
#* progression.
#* - Strategy A: treatment A is given to patients in the Sick and Sicker states,
#* but only improves the quality of life of those in the Sick state.
#* - Strategy B: treatment B is given to all sick patients and reduces disease
#* progression from the Sick to Sicker state.
#* - Strategy AB: This strategy combines treatment A and treatment B. The disease
#* progression is reduced, and individuals in the Sick state have an improved
#* quality of life.
#******************************************************************************#
# Initial setup ----
rm(list = ls()) # remove any variables in R's memory
## Install required packages ----
# install.packages("dplyr") # to manipulate data
# install.packages("tidyr") # to manipulate data
# install.packages("reshape2") # to manipulate data
# install.packages("ggplot2") # to visualize data
# install.packages("ggrepel") # to visualize data
# install.packages("gridExtra") # to visualize data
# install.packages("ellipse") # to visualize data
# install.packages("scales") # for dollar signs and commas
# install.packages(patchwork) # for combining ggplot2 figures
# install.packages("dampack") # for CEA and calculate ICERs
# install.packages("devtools") # to install packages from GitHub
# devtools::install_github("DARTH-git/darthtools") # to install darthtools from GitHub using devtools
# install.packages("doParallel") # to handle parallel processing
## Load packages ----
library(dplyr)
library(tidyr)
library(reshape2) # For melting data
library(ggplot2) # For plotting
library(ggrepel) # For plotting
library(gridExtra) # For plotting
library(ellipse) # For plotting
library(scales) # For dollar signs and commas
library(patchwork) # For combining ggplot2 figures
# library(dampack) # Uncomment to use CEA and PSA visualization functionality from dampack instead of the functions included in this repository
# library(darthtools) # Uncomment to use WCC, parameter transformation, and matrix checks from darthtools instead of the functions included in this repository
# library(doParallel) # For running PSA in parallel
## Load supplementary functions ----
source("R/Functions.R")
# Model input ----
## General setup ----
cycle_length <- 1 # cycle length equal to one year (use 1/12 for monthly)
n_age_init <- 25 # age at baseline
n_age_max <- 100 # maximum age of follow up
n_cycles <- (n_age_max - n_age_init)/cycle_length # time horizon, number of cycles
#* Age labels
v_age_names <- paste(rep(n_age_init:(n_age_max-1), each = 1/cycle_length),
1:(1/cycle_length),
sep = ".")
#* the 4 health states of the model:
v_names_states <- c("H", # Healthy (H)
"S1", # Sick (S1)
"S2", # Sicker (S2)
"D") # Dead (D)
n_states <- length(v_names_states) # number of health states
### Discounting factors ----
d_c <- 0.03 # annual discount rate for costs
d_e <- 0.03 # annual discount rate for QALYs
### Strategies ----
v_names_str <- c("Standard of care", # store the strategy names
"Strategy A",
"Strategy B",
"Strategy AB")
n_str <- length(v_names_str) # number of strategies
## Within-cycle correction (WCC) using Simpson's 1/3 rule ----
v_wcc <- gen_wcc(n_cycles = n_cycles, # Function included in "R/Functions.R". The latest version can be found in `darthtools` package
method = "Simpson1/3") # vector of wcc
### Transition rates (annual), and hazard ratios (HRs) ----
r_HS1 <- 0.15 # constant annual rate of becoming Sick when Healthy
r_S1H <- 0.5 # constant annual rate of becoming Healthy when Sick
r_S1S2 <- 0.105 # constant annual rate of becoming Sicker when Sick
hr_S1 <- 3 # hazard ratio of death in Sick vs Healthy
hr_S2 <- 10 # hazard ratio of death in Sicker vs Healthy
### Effectiveness of treatment B ----
hr_S1S2_trtB <- 0.6 # hazard ratio of becoming Sicker when Sick under treatment B
## Age-dependent mortality rates ----
lt_usa_2015 <- read.csv("data/LifeTable_USA_Mx_2015.csv")
#* Extract age-specific all-cause mortality for ages in model time horizon
v_r_mort_by_age <- lt_usa_2015 %>%
dplyr::filter(Age >= n_age_init & Age < n_age_max) %>%
dplyr::select(Total) %>%
as.matrix()
### State rewards ----
#### Costs ----
c_H <- 2000 # annual cost of being Healthy
c_S1 <- 4000 # annual cost of being Sick
c_S2 <- 15000 # annual cost of being Sicker
c_D <- 0 # annual cost of being dead
c_trtA <- 12000 # annual cost of receiving treatment A
c_trtB <- 13000 # annual cost of receiving treatment B
#### Utilities ----
u_H <- 1 # annual utility of being Healthy
u_S1 <- 0.75 # annual utility of being Sick
u_S2 <- 0.5 # annual utility of being Sicker
u_D <- 0 # annual utility of being dead
u_trtA <- 0.95 # annual utility when receiving treatment A
### Transition rewards ----
du_HS1 <- 0.01 # disutility when transitioning from Healthy to Sick
ic_HS1 <- 1000 # increase in cost when transitioning from Healthy to Sick
ic_D <- 2000 # increase in cost when dying
### Discount weight for costs and effects ----
v_dwc <- 1 / ((1 + (d_e * cycle_length)) ^ (0:n_cycles))
v_dwe <- 1 / ((1 + (d_c * cycle_length)) ^ (0:n_cycles))
# Process model inputs ----
## Age-specific transition rates to the Dead state for all cycles ----
v_r_HDage <- rep(v_r_mort_by_age, each = 1/cycle_length)
#* Name age-specific mortality vector
names(v_r_HDage) <- v_age_names
#* compute mortality rates
v_r_S1Dage <- v_r_HDage * hr_S1 # Age-specific mortality rate in the Sick state
v_r_S2Dage <- v_r_HDage * hr_S2 # Age-specific mortality rate in the Sicker state
#* transform rates to probabilities adjusting by cycle length
#* Function included in "R/Functions.R". The latest version can be found in `darthtools` package
p_HS1 <- rate_to_prob(r = r_HS1, t = cycle_length) # constant annual probability of becoming Sick when Healthy conditional on surviving
p_S1H <- rate_to_prob(r = r_S1H, t = cycle_length) # constant annual probability of becoming Healthy when Sick conditional on surviving
p_S1S2 <- rate_to_prob(r = r_S1S2, t = cycle_length)# constant annual probability of becoming Sicker when Sick conditional on surviving
v_p_HDage <- rate_to_prob(v_r_HDage, t = cycle_length) # Age-specific mortality risk in the Healthy state
v_p_S1Dage <- rate_to_prob(v_r_S1Dage, t = cycle_length) # Age-specific mortality risk in the Sick state
v_p_S2Dage <- rate_to_prob(v_r_S2Dage, t = cycle_length) # Age-specific mortality risk in the Sicker state
## Annual transition probability of becoming Sicker when Sick for treatment B ----
#* Apply hazard ratio to rate to obtain transition rate of becoming Sicker when
#* Sick for treatment B
r_S1S2_trtB <- r_S1S2 * hr_S1S2_trtB
#* Transform rate to probability to become Sicker when Sick under treatment B
#* adjusting by cycle length conditional on surviving
#* (Function included in "R/Functions.R". The latest version can be found in
#* `darthtools` package)
p_S1S2_trtB <- rate_to_prob(r = r_S1S2_trtB, t = cycle_length)
# Construct state-transition models ----
## Initial state vector ----
#* All starting healthy
v_m_init <- c(H = 1, S1 = 0, S2 = 0, D = 0) # initial state vector
## Initialize cohort traces ----
### Initialize cohort trace under SoC ----
m_M_SoC <- matrix(NA,
nrow = (n_cycles + 1), ncol = n_states,
dimnames = list(0:n_cycles, v_names_states))
#* Store the initial state vector in the first row of the cohort trace
m_M_SoC[1, ] <- v_m_init
### Initialize cohort trace for strategies A, B, and AB ----
#* Structure and initial states are the same as for SoC
m_M_strA <- m_M_SoC # Strategy A
m_M_strB <- m_M_SoC # Strategy B
m_M_strAB <- m_M_SoC # Strategy AB
## Create transition probability arrays for strategy SoC ----
### Initialize transition probability array for strategy SoC ----
#* All transitions to a non-death state are assumed to be conditional on survival
a_P_SoC <- array(0,
dim = c(n_states, n_states, n_cycles),
dimnames = list(v_names_states,
v_names_states,
0:(n_cycles - 1)))
### Fill in array
## From H
a_P_SoC["H", "H", ] <- (1 - v_p_HDage) * (1 - p_HS1)
a_P_SoC["H", "S1", ] <- (1 - v_p_HDage) * p_HS1
a_P_SoC["H", "D", ] <- v_p_HDage
## From S1
a_P_SoC["S1", "H", ] <- (1 - v_p_S1Dage) * p_S1H
a_P_SoC["S1", "S1", ] <- (1 - v_p_S1Dage) * (1 - (p_S1H + p_S1S2))
a_P_SoC["S1", "S2", ] <- (1 - v_p_S1Dage) * p_S1S2
a_P_SoC["S1", "D", ] <- v_p_S1Dage
## From S2
a_P_SoC["S2", "S2", ] <- 1 - v_p_S2Dage
a_P_SoC["S2", "D", ] <- v_p_S2Dage
## From D
a_P_SoC["D", "D", ] <- 1
### Initialize transition probability array for strategy A as a copy of SoC's ----
a_P_strA <- a_P_SoC
### Initialize transition probability array for strategy B ----
a_P_strB <- a_P_SoC
#* Update only transition probabilities from S1 involving p_S1S2
a_P_strB["S1", "S1", ] <- (1 - v_p_S1Dage) * (1 - (p_S1H + p_S1S2_trtB))
a_P_strB["S1", "S2", ] <- (1 - v_p_S1Dage) * p_S1S2_trtB
### Initialize transition probability array for strategy AB as a copy of B's ----
a_P_strAB <- a_P_strB
## Check if transition probability arrays are valid ----
#* Functions included in "R/Functions.R". The latest version can be found in `darthtools` package
### Check that transition probabilities are [0, 1] ----
check_transition_probability(a_P_SoC, verbose = TRUE)
check_transition_probability(a_P_strA, verbose = TRUE)
check_transition_probability(a_P_strB, verbose = TRUE)
check_transition_probability(a_P_strAB, verbose = TRUE)
### Check that all rows for each slice of the array sum to 1 ----
check_sum_of_transition_array(a_P_SoC, n_states = n_states, n_cycles = n_cycles, verbose = TRUE)
check_sum_of_transition_array(a_P_strA, n_states = n_states, n_cycles = n_cycles, verbose = TRUE)
check_sum_of_transition_array(a_P_strB, n_states = n_states, n_cycles = n_cycles, verbose = TRUE)
check_sum_of_transition_array(a_P_strAB, n_states = n_states, n_cycles = n_cycles, verbose = TRUE)
## Create transition dynamics arrays ----
#* These arrays will capture transitions from each state to another over time
### Initialize transition dynamics array for strategy SoC ----
a_A_SoC <- array(0,
dim = c(n_states, n_states, n_cycles + 1),
dimnames = list(v_names_states, v_names_states, 0:n_cycles))
#* Set first slice of a_A_SoC with the initial state vector in its diagonal
diag(a_A_SoC[, , 1]) <- v_m_init
### Initialize transition-dynamics array for strategies A, B, and AB ----
#* Structure and initial states are the same as for SoC
a_A_strA <- a_A_SoC
a_A_strB <- a_A_SoC
a_A_strAB <- a_A_SoC
# Run Markov model ----
#* Iterative solution of age-dependent cSTM
for(t in 1:n_cycles){
## Fill in cohort trace
# For SoC
m_M_SoC[t + 1, ] <- m_M_SoC[t, ] %*% a_P_SoC[, , t]
# For strategy A
m_M_strA[t + 1, ] <- m_M_strA[t, ] %*% a_P_strA[, , t]
# For strategy B
m_M_strB[t + 1, ] <- m_M_strB[t, ] %*% a_P_strB[, , t]
# For strategy ZB
m_M_strAB[t + 1, ] <- m_M_strAB[t, ] %*% a_P_strAB[, , t]
## Fill in transition-dynamics array
# For SoC
a_A_SoC[, , t + 1] <- diag(m_M_SoC[t, ]) %*% a_P_SoC[, , t]
# For strategy A
a_A_strA[, , t + 1] <- diag(m_M_strA[t, ]) %*% a_P_strA[, , t]
# For strategy B
a_A_strB[, , t + 1] <- diag(m_M_strB[t, ]) %*% a_P_strB[, , t]
# For strategy AB
a_A_strAB[, , t + 1] <- diag(m_M_strAB[t, ]) %*% a_P_strAB[, , t]
}
## Store the cohort traces in a list ----
l_m_M <- list(SoC = m_M_SoC,
A = m_M_strA,
B = m_M_strB,
AB = m_M_strAB)
names(l_m_M) <- v_names_str
## Store the transition dynamics array for each strategy in a list ----
l_a_A <- list(SoC = a_A_SoC,
A = a_A_strA,
B = a_A_strB,
AB = a_A_strAB)
names(l_a_A) <- v_names_str
# Plot Outputs ----
#* (Functions included in "R/Functions.R"; depends on the `ggplot2` package)
## Plot the cohort trace for strategy SoC ----
plot_trace(m_M_SoC)
## Plot the cohort trace for all strategies ----
plot_trace_strategy(l_m_M)
## Plot the epidemiology outcomes ----
### Survival ----
survival_plot <- plot_surv(l_m_M, v_names_death_states = "D") +
theme(legend.position = "bottom")
survival_plot
### Prevalence ----
prevalence_S1_plot <- plot_prevalence(l_m_M,
v_names_sick_states = c("S1"),
v_names_dead_states = "D") +
theme(legend.position = "")
prevalence_S1_plot
prevalence_S2_plot <- plot_prevalence(l_m_M,
v_names_sick_states = c("S2"),
v_names_dead_states = "D") +
theme(legend.position = "")
prevalence_S2_plot
prevalence_S1S2_plot <- plot_prevalence(l_m_M,
v_names_sick_states = c("S1", "S2"),
v_names_dead_states = "D") +
theme(legend.position = "")
prevalence_S1S2_plot
prop_sicker_plot <- plot_proportion_sicker(l_m_M,
v_names_sick_states = c("S1", "S2"),
v_names_sicker_states = c("S2")) +
theme(legend.position = "bottom")
prop_sicker_plot
## Combine plots ----
gridExtra::grid.arrange(survival_plot,
prevalence_S1_plot,
prevalence_S2_plot,
prevalence_S1S2_plot,
prop_sicker_plot,
ncol = 1, heights = c(0.75, 0.75, 0.75, 0.75, 1))
# State Rewards ----
## Scale by the cycle length ----
#* Vector of state utilities under strategy SoC
v_u_SoC <- c(H = u_H,
S1 = u_S1,
S2 = u_S2,
D = u_D) * cycle_length
#* Vector of state costs under strategy SoC
v_c_SoC <- c(H = c_H,
S1 = c_S1,
S2 = c_S2,
D = c_D) * cycle_length
#* Vector of state utilities under strategy A
v_u_strA <- c(H = u_H,
S1 = u_trtA,
S2 = u_S2,
D = u_D) * cycle_length
#* Vector of state costs under strategy A
v_c_strA <- c(H = c_H,
S1 = c_S1 + c_trtA,
S2 = c_S2 + c_trtA,
D = c_D) * cycle_length
#* Vector of state utilities under strategy B
v_u_strB <- c(H = u_H,
S1 = u_S1,
S2 = u_S2,
D = u_D) * cycle_length
#* Vector of state costs under strategy B
v_c_strB <- c(H = c_H,
S1 = c_S1 + c_trtB,
S2 = c_S2 + c_trtB,
D = c_D) * cycle_length
#* Vector of state utilities under strategy AB
v_u_strAB <- c(H = u_H,
S1 = u_trtA,
S2 = u_S2,
D = u_D) * cycle_length
#* Vector of state costs under strategy AB
v_c_strAB <- c(H = c_H,
S1 = c_S1 + (c_trtA + c_trtB),
S2 = c_S2 + (c_trtA + c_trtB),
D = c_D) * cycle_length
## Store state rewards ----
#* Store the vectors of state utilities for each strategy in a list
l_u <- list(SoC = v_u_SoC,
A = v_u_strA,
B = v_u_strB,
AB = v_u_strAB)
#* Store the vectors of state cost for each strategy in a list
l_c <- list(SoC = v_c_SoC,
A = v_c_strA,
B = v_c_strB,
AB = v_c_strAB)
#* assign strategy names to matching items in the lists
names(l_u) <- names(l_c) <- v_names_str
# Compute expected outcomes ----
#* Create empty vectors to store total utilities and costs
v_tot_qaly <- v_tot_cost <- vector(mode = "numeric", length = n_str)
names(v_tot_qaly) <- names(v_tot_cost) <- v_names_str
## Loop through each strategy and calculate total utilities and costs ----
for (i in 1:n_str) { # i <- 1
v_u_str <- l_u[[i]] # select the vector of state utilities for the i-th strategy
v_c_str <- l_c[[i]] # select the vector of state costs for the i-th strategy
a_A_str <- l_a_A[[i]] # select the transition array for the i-th strategy
##* Array of state rewards
#* Create transition matrices of state utilities and state costs for the i-th strategy
m_u_str <- matrix(v_u_str, nrow = n_states, ncol = n_states, byrow = T)
m_c_str <- matrix(v_c_str, nrow = n_states, ncol = n_states, byrow = T)
#* Expand the transition matrix of state utilities across cycles to form a transition array of state utilities
a_R_u_str <- array(m_u_str,
dim = c(n_states, n_states, n_cycles + 1),
dimnames = list(v_names_states, v_names_states, 0:n_cycles))
# Expand the transition matrix of state costs across cycles to form a transition array of state costs
a_R_c_str <- array(m_c_str,
dim = c(n_states, n_states, n_cycles + 1),
dimnames = list(v_names_states, v_names_states, 0:n_cycles))
##* Apply transition rewards
#* Apply disutility due to transition from H to S1
a_R_u_str["H", "S1", ] <- a_R_u_str["H", "S1", ] - du_HS1
#* Add transition cost per cycle due to transition from H to S1
a_R_c_str["H", "S1", ] <- a_R_c_str["H", "S1", ] + ic_HS1
#* Add transition cost per cycle of dying from all non-dead states
a_R_c_str[-n_states, "D", ] <- a_R_c_str[-n_states, "D", ] + ic_D
###* Expected QALYs and costs for all transitions per cycle
#* QALYs = life years x QoL
#* Note: all parameters are annual in our example. In case your own case example is different make sure you correctly apply.
a_Y_c_str <- a_A_str * a_R_c_str
a_Y_u_str <- a_A_str * a_R_u_str
###* Expected QALYs and costs per cycle
##* Vector of QALYs and costs
v_qaly_str <- apply(a_Y_u_str, 3, sum) # sum the proportion of the cohort across transitions
v_cost_str <- apply(a_Y_c_str, 3, sum) # sum the proportion of the cohort across transitions
##* Discounted total expected QALYs and Costs per strategy and apply within-cycle correction if applicable
#* QALYs
v_tot_qaly[i] <- t(v_qaly_str) %*% (v_dwe * v_wcc)
#* Costs
v_tot_cost[i] <- t(v_cost_str) %*% (v_dwc * v_wcc)
}
# Cost-effectiveness analysis (CEA) ----
## Incremental cost-effectiveness ratios (ICERs) ----
#* Function included in "R/Functions.R"; depends on the `dplyr` package
#* The latest version can be found in `dampack` package
df_cea <- calculate_icers(cost = v_tot_cost,
effect = v_tot_qaly,
strategies = v_names_str)
df_cea
## CEA table in proper format ----
table_cea <- format_table_cea(df_cea) # Function included in "R/Functions.R"; depends on the `scales` package
table_cea
## CEA frontier -----
#* Function included in "R/Functions.R"; depends on the `ggplot2` and `ggrepel` packages.
#* The latest version can be found in `dampack` package
plot(df_cea, label = "all", txtsize = 16) +
expand_limits(x = max(table_cea$QALYs) + 0.1) +
theme(legend.position = c(0.8, 0.2))
#******************************************************************************#
# Probabilistic Sensitivity Analysis (PSA) -----
## Load model, CEA and PSA functions ----
source('R/Functions_cSTM_time_dep_simulation.R')
source('R/Functions.R')
## List of input parameters -----
l_params_all <- list(
# Transition probabilities (per cycle), hazard ratios
v_r_HDage = v_r_HDage, # constant rate of dying when Healthy (all-cause mortality)
r_HS1 = 0.15, # constant annual rate of becoming Sick when Healthy conditional on surviving
r_S1H = 0.5, # constant annual rate of becoming Healthy when Sick conditional on surviving
r_S1S2 = 0.105, # constant annual rate of becoming Sicker when Sick conditional on surviving
hr_S1 = 3, # hazard ratio of death in Sick vs Healthy
hr_S2 = 10, # hazard ratio of death in Sicker vs Healthy
# Effectiveness of treatment B
hr_S1S2_trtB = 0.6, # hazard ratio of becoming Sicker when Sick under treatment B
## State rewards
# Costs
c_H = 2000, # cost of remaining one cycle in Healthy
c_S1 = 4000, # cost of remaining one cycle in Sick
c_S2 = 15000, # cost of remaining one cycle in Sicker
c_D = 0, # cost of being dead (per cycle)
c_trtA = 12000, # cost of treatment A
c_trtB = 13000, # cost of treatment B
# Utilities
u_H = 1, # utility when Healthy
u_S1 = 0.75, # utility when Sick
u_S2 = 0.5, # utility when Sicker
u_D = 0, # utility when Dead
u_trtA = 0.95, # utility when being treated with A
## Transition rewards
du_HS1 = 0.01, # disutility when transitioning from Healthy to Sick
ic_HS1 = 1000, # increase in cost when transitioning from Healthy to Sick
ic_D = 2000, # increase in cost when dying
# Initial and maximum ages
n_age_init = 25,
n_age_max = 100,
# Discount rates
d_c = 0.03, # annual discount rate for costs
d_e = 0.03, # annual discount rate for QALYs,
# Cycle length
cycle_length = 1
)
#* Store the parameter names into a vector
v_names_params <- names(l_params_all)
## Test functions to generate CE outcomes and PSA dataset ----
#* Test function to compute CE outcomes
calculate_ce_out(l_params_all) # Function included in "R/Functions_cSTM_time_dep_simulation.R"
#* Test function to generate PSA input dataset
generate_psa_params(10) # Function included in "R/Functions_cSTM_time_dep_simulation.R"
## Generate PSA dataset ----
#* Number of simulations
n_sim <- 1000
#* Generate PSA input dataset
df_psa_input <- generate_psa_params(n_sim = n_sim)
#* First six observations
head(df_psa_input)
### Histogram of parameters ----
ggplot(melt(df_psa_input, variable.name = "Parameter"), aes(x = value)) +
facet_wrap(~Parameter, scales = "free") +
geom_histogram(aes(y = ..density..)) +
ylab("") +
theme_bw(base_size = 16) +
theme(axis.text = element_text(size = 6),
axis.title.x = element_blank(),
axis.title.y = element_blank(),
axis.text.y = element_blank(),
axis.ticks.y = element_blank())
## Run PSA ----
#* Initialize data.frames with PSA output
#* data.frame of costs
df_c <- as.data.frame(matrix(0,
nrow = n_sim,
ncol = n_str))
colnames(df_c) <- v_names_str
#* data.frame of effectiveness
df_e <- as.data.frame(matrix(0,
nrow = n_sim,
ncol = n_str))
colnames(df_e) <- v_names_str
#* data.frame of survival
m_surv <- matrix(NA, nrow = n_sim, ncol = (n_cycles + 1),
dimnames = list(1:n_sim, 0:n_cycles))
df_surv <- data.frame(Outcome = "Survival",
m_surv, check.names = "FALSE")
#* data.frame of life expectancy
df_le <- data.frame(Outcome = "Life Expectancy",
LE = m_surv[, 1])
#* data.frame of prevalence of S1
m_prev <- matrix(NA, nrow = n_sim, ncol = (n_cycles + 1),
dimnames = list(1:n_sim, 0:n_cycles))
df_prevS1 <- data.frame(States = "S1",
m_prev, check.names = "FALSE")
#* data.frame of prevalence of S2
df_prevS2 <- data.frame(States = "S2",
m_prev, check.names = "FALSE")
#* data.frame of prevalence of S1 & S2
df_prevS1S2 <- data.frame(States = "S1 + S2",
m_prev, check.names = "FALSE")
#* Conduct probabilistic sensitivity analysis
#* Run Markov model on each parameter set of PSA input dataset
n_time_init_psa_series <- Sys.time()
for (i in 1:n_sim) { # i <- 1
l_psa_input <- update_param_list(l_params_all, df_psa_input[i,])
# Economics Measures
l_out_ce_temp <- calculate_ce_out(l_psa_input)
df_c[i, ] <- l_out_ce_temp$Cost
df_e[i, ] <- l_out_ce_temp$Effect
# Epidemiological Measures
l_out_epi_temp <- generate_epi_measures_SoC(l_psa_input)
df_surv[i, -1] <- l_out_epi_temp$S
df_le[i, -1] <- l_out_epi_temp$LE
df_prevS1[i, -1] <- l_out_epi_temp$PrevS1
df_prevS2[i, -1] <- l_out_epi_temp$PrevS2
df_prevS1S2[i, -1] <- l_out_epi_temp$PrevS1S2
# Display simulation progress
if (i/(n_sim/100) == round(i/(n_sim/100), 0)) { # display progress every 5%
cat('\r', paste(i/n_sim * 100, "% done", sep = " "))
}
}
n_time_end_psa_series <- Sys.time()
n_time_total_psa_series <- n_time_end_psa_series - n_time_init_psa_series
print(paste0("PSA with ", scales::comma(n_sim), " simulations run in series in ",
round(n_time_total_psa_series, 2), " ",
units(n_time_total_psa_series)))
## Run Markov model on each parameter set of PSA input dataset in parallel
# Uncomment next section to run in parallel
## Get OS
os <- get_os()
no_cores <- parallel::detectCores() - 1
print(paste0("Parallelized PSA on ", os, " using ", no_cores, " cores."))
n_time_init_psa_parallel <- Sys.time()
# ## Run parallelized PSA based on OS
# if (os == "osx") {
# # Initialize cluster object
# cl <- parallel::makeForkCluster(no_cores)
# # Register clusters
# doParallel::registerDoParallel(cl)
# # Run parallelized PSA
# df_ce <- foreach::foreach(i = 1:n_sim, .combine = rbind) %dopar% {
# l_out_temp <- calculate_ce_out(df_psa_input[i, ])
# df_ce <- c(l_out_temp$Cost, l_out_temp$Effect)
# }
# # Extract costs and effects from the PSA dataset
# df_c <- df_ce[, 1:n_str]
# df_e <- df_ce[, (n_str + 1):(2*n_str)]
# # Register end time of parallelized PSA
# n_time_end_psa_parallel <- Sys.time()
# }
# if (os == "windows") {
# # Initialize cluster object
# cl <- parallel::makeCluster(no_cores)
# # Register clusters
# doParallel::registerDoParallel(cl)
# opts <- list(attachExportEnv = TRUE)
# # Run parallelized PSA
# df_ce <- foreach::foreach(i = 1:n_samp, .combine = rbind,
# .export = ls(globalenv()),
# .packages = c("dampack"),
# .options.snow = opts) %dopar% {
# l_out_temp <- calculate_ce_out(df_psa_input[i, ])
# df_ce <- c(l_out_temp$Cost, l_out_temp$Effect)
# }
# # Extract costs and effects from the PSA dataset
# df_c <- df_ce[, 1:n_str]
# df_e <- df_ce[, (n_str + 1):(2*n_str)]
# # Register end time of parallelized PSA
# n_time_end_psa_parallel <- Sys.time()
# }
# if (os == "linux") {
# # Initialize cluster object
# cl <- parallel::makeCluster(no_cores)
# # Register clusters
# doParallel::registerDoMC(cl)
# # Run parallelized PSA
# df_ce <- foreach::foreach(i = 1:n_sim, .combine = rbind) %dopar% {
# l_out_temp <- calculate_ce_out(df_psa_input[i, ])
# df_ce <- c(l_out_temp$Cost, l_out_temp$Effect)
# }
# # Extract costs and effects from the PSA dataset
# df_c <- df_ce[, 1:n_str]
# df_e <- df_ce[, (n_str + 1):(2*n_str)]
# # Register end time of parallelized PSA
# n_time_end_psa_parallel <- Sys.time()
# }
# # Stop clusters
# stopCluster(cl)
# n_time_total_psa_parallel <- n_time_end_psa_parallel - n_time_init_psa_parallel
# print(paste0("PSA with ", scales::comma(n_sim), " simulations run in series in ",
# round(n_time_total_psa_parallel, 2), " ",
# units(n_time_total_psa_parallel)))
## Visualize PSA results for CEA ----
### Create PSA object ----
#* Function included in "R/Functions.R" The latest version can be found in `dampack` package
l_psa <- make_psa_obj(cost = df_c,
effectiveness = df_e,
parameters = df_psa_input,
strategies = v_names_str)
l_psa$strategies <- v_names_str
colnames(l_psa$effectiveness) <- v_names_str
colnames(l_psa$cost) <- v_names_str
#* Vector with willingness-to-pay (WTP) thresholds.
v_wtp <- seq(0, 200000, by = 5000)
### Cost-Effectiveness Scatter plot ----
txtsize <- 13
#* Function included in "R/Functions.R"; depends on `tidyr` and `ellipse` packages.
#* The latest version can be found in `dampack` package
gg_scattter <- plot.psa(l_psa, txtsize = txtsize) +
ggthemes::scale_color_colorblind() +
ggthemes::scale_fill_colorblind() +
scale_y_continuous("Cost (Thousand $)",
breaks = number_ticks(10),
labels = function(x) x/1000) +
xlab("Effectiveness (QALYs)") +
guides(col = guide_legend(nrow = 2)) +
theme(legend.position = "bottom")
gg_scattter
### Incremental cost-effectiveness ratios (ICERs) with probabilistic output ----
#* Compute expected costs and effects for each strategy from the PSA
#* Function included in "R/Functions.R". The latest version can be found in `dampack` package
df_out_ce_psa <- summary(l_psa)
#* Function included in "R/Functions.R"; depends on the `dplyr` package
#* The latest version can be found in `dampack` package
df_cea_psa <- calculate_icers(cost = df_out_ce_psa$meanCost,
effect = df_out_ce_psa$meanEffect,
strategies = df_out_ce_psa$Strategy)
df_cea_psa
### Plot cost-effectiveness frontier with probabilistic output ----
#* Function included in "R/Functions.R"; depends on the `ggplot2` and `ggrepel` packages.
#* The latest version can be found in `dampack` package
plot.icers(df_cea_psa, label = "all", txtsize = txtsize) +
expand_limits(x = max(table_cea$QALYs) + 0.1) +
theme(legend.position = c(0.8, 0.2))
### Cost-effectiveness acceptability curves (CEACs) and frontier (CEAF) ---
#* Functions included in "R/Functions.R". The latest versions can be found in `dampack` package
ceac_obj <- ceac(wtp = v_wtp, psa = l_psa)
#* Regions of highest probability of cost-effectiveness for each strategy
summary(ceac_obj)
#* CEAC & CEAF plot
gg_ceac <- plot.ceac(ceac_obj, txtsize = txtsize, xlim = c(0, NA), n_x_ticks = 14) +
ggthemes::scale_color_colorblind() +
ggthemes::scale_fill_colorblind() +
theme(legend.position = c(0.8, 0.48))
gg_ceac
### Expected Loss Curves (ELCs) ----
#* Function included in "R/Functions.R".The latest version can be found in `dampack` package
elc_obj <- calc_exp_loss(wtp = v_wtp, psa = l_psa)
elc_obj
#* ELC plot
gg_elc <- plot.exp_loss(elc_obj, log_y = FALSE,
txtsize = txtsize, xlim = c(0, NA), n_x_ticks = 14,
col = "full") +
ggthemes::scale_color_colorblind() +
ggthemes::scale_fill_colorblind() +
# geom_point(aes(shape = as.name("Strategy"))) +
scale_y_continuous("Expected Loss (Thousand $)",
breaks = number_ticks(10),
labels = function(x) x/1000) +
theme(legend.position = c(0.4, 0.7),)
gg_elc
### Expected value of perfect information (EVPI) ----
#* Function included in "R/Functions.R". The latest version can be found in `dampack` package
evpi <- calc_evpi(wtp = v_wtp, psa = l_psa)
#* EVPI plot
gg_evpi <- plot.evpi(evpi, effect_units = "QALY",
txtsize = txtsize, xlim = c(0, NA), n_x_ticks = 14) +
scale_y_continuous("EVPI (Thousand $)",
breaks = number_ticks(10),
labels = function(x) x/1000)
gg_evpi
### Combine all figures into one ----
patched_cea <- (gg_scattter + gg_ceac + plot_layout(guides = "keep"))/(gg_elc + gg_evpi)
gg_psa_plots <- patched_cea +
plot_annotation(tag_levels = 'A')
gg_psa_plots
## Visualize PSA results for Epidemiological Measures ----
### Wrangle PSA output ----
#* Combine prevalence measures
df_prev <- dplyr::bind_rows(df_prevS1,
df_prevS2,
df_prevS1S2)
#* Transform to long format
df_surv_lng <- reshape2::melt(df_surv,
id.vars = c("Outcome"),
# value.name = "Survival",
variable.name = "Time")
df_prev_lng <- reshape2::melt(df_prev,
id.vars = c("States"),
# value.name = "Proportion",
variable.name = "Time")
#* Compute posterior-predicted 95% CI
df_surv_summ <- data_summary(df_surv_lng, varname = "value",
groupnames = c("Outcome", "Time"))
df_le_summ <- data_summary(df_le, varname = "LE",
groupnames = c("Outcome"))
df_prev_summ <- data_summary(df_prev_lng, varname = "value",
groupnames = c("States", "Time"))
df_prev_summ$States <- ordered(df_prev_summ$States,
levels = c("S1", "S2", "S1 + S2"))
### Plot epidemiological measures ---
txtsize_epi <- 16
#### Survival ---
gg_surv_psa <- ggplot(df_surv_summ, aes(x = as.numeric(Time), y = value,
ymin = lb, ymax = ub)) +
geom_line() +
geom_ribbon(alpha = 0.4) +
scale_x_continuous(breaks = number_ticks(8)) +
xlab("Cycle") +
ylab("Proportion alive") +
theme_bw(base_size = txtsize_epi) +
theme()
gg_surv_psa
#### Life Expectancy ---
gg_le_psa <- ggplot(df_le, aes(x = LE)) +
geom_density(color = "darkblue", fill = "lightblue") +
scale_x_continuous(breaks = number_ticks(8)) +
xlab("Life expectancy") +
ylab("") +
theme_bw(base_size = txtsize_epi) +
theme(
axis.text.y = element_blank(),
axis.ticks = element_blank())
gg_le_psa
#### Prevalence ---
gg_prev_psa <- ggplot(df_prev_summ, aes(x = as.numeric(Time), y = value,
ymin = lb, ymax = ub,
color = States, linetype = States,
fill = States)) +
geom_line() +
geom_ribbon(alpha = 0.4, color = NA) +
scale_x_continuous(breaks = number_ticks(8)) +
scale_y_continuous(breaks = number_ticks(8),
labels = scales::percent_format(accuracy = 1)) +
scale_color_discrete(name = "Health state", l = 50) +
scale_linetype(name = "Health state") +
scale_fill_discrete(name = "Health state", l = 50) +
xlab("Cycle") +
ylab("Prevalence (%)") +
theme_bw(base_size = 16) +
theme(legend.position = c(0.83, 0.83))
gg_prev_psa
### Combine all figures into one ----
patched_epi <- (gg_surv_psa / gg_le_psa) | gg_prev_psa
gg_psa_epi_plots <- patched_epi +
plot_annotation(tag_levels = 'A')
gg_psa_epi_plots
|
e662f17cc6bbc65f6cabf7e63e7177e6514c22ac | a3011e4901d9cda7fa28b5c53079743273a34ab2 | /extras/2020-04-13_draw-line-on-plot.R | 7ef766c7e9bb5e30339364be3419d603cd8c6f13 | [] | no_license | amit-agni/FiMATS | ee56427132989d2c0beacdf9be9225699a4c02b7 | 78d42982c7391f8fa9c80ab2ef2677202e8a4c44 | refs/heads/master | 2021-05-21T09:26:16.056174 | 2020-05-18T06:08:02 | 2020-05-18T06:08:02 | 252,637,321 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 1,063 | r | 2020-04-13_draw-line-on-plot.R | library(shiny,tidyverse)
ui <- fluidPage(
plotOutput("plot", width = "500px", height = "500px",
hover=hoverOpts(id = "hover", delay = 1000, delayType = "throttle", clip = TRUE, nullOutside = TRUE),
click="click"))
server <- function(input, output, session) {
plot <- ggplot(mtcars,aes(x=wt,y=qsec)) + geom_point()
observe({
if(is.null(input$click$x)){
output$plot <- renderPlot({plot})
}else{
print(input$click$x)
print(input$click$y)
print(input$hover$x)
print(input$hover$y)
plot + geom_segment(aes(x=isolate(input$click$x)
,y=isolate(input$click$y)
,xend=isolate(input$hover$x)
,yend=isolate(input$hover$y)))
}
})
observeEvent(input$click,{
})
}
shinyApp(ui, server) |
35fe7b5e493ed23bae0b2d935fa1d2972df3bf5a | 26f1cb213312ad204072dadd6b1163bcc0fa1bba | /exemples/chap7/7.09.R | 58714c61dbdc7938a41bde9c14699bdb0b131638 | [] | no_license | fmigone/livreR | a9b6f61a0aab902fb3b07fc49ea7dd642b65bdc3 | 998df678da1559ee03438c439335db796a416f2f | refs/heads/master | 2020-04-22T05:47:14.083087 | 2018-05-01T14:39:10 | 2018-05-01T14:39:10 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 887 | r | 7.09.R | library(reshape2) # transposition
# transposition : 1 ligne par logement, 1 colonne par date (pour que l'opération
# ne soit pas trop lente on se limite à 1 million de lignes)
# la mention d'une moyenne est factice car il n'y a qu'un
# prix par date et par logement
evol_prix <- dcast(calendar[1:1000000,],
listing_id ~ date,
fun = mean,
value.var = "price")
# calcul d'une moyenne par ligne (donc par logement)
prix_par_logement <- data.frame(id=evol_prix$listing_id,
moy=rowMeans(evol_prix[,-1],
na.rm = TRUE))
# aperçu du résultat avec les 6 premières lignes
head(prix_par_logement)
# id moy
# 1 9279 55.00000
# 2 11170 70.61056
# 3 11798 101.07735
# 4 11848 87.00000
# 5 14011 89.00000
# 6 17372 54.00000 |
0556cfaff07c48b4e6a07556e35770b37b427706 | f66ea0275fc795a7d7caf39fb0f6e91fc5a66830 | /man/yr_oldestplayer.Rd | b2f51ee3f875dc817c9f1daadfaa14b282fa8db1 | [] | no_license | sunpark0405/nba | acc8910923f0a1b573956861e0c9fa49768ccf79 | a882e4179e50f6bab620bd87e48e8b586777602f | refs/heads/main | 2023-03-23T23:10:43.817658 | 2021-03-08T02:20:44 | 2021-03-08T02:20:44 | 341,405,412 | 0 | 0 | null | null | null | null | UTF-8 | R | false | true | 537 | rd | yr_oldestplayer.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/yr_oldestplayer.R
\name{yr_oldestplayer}
\alias{yr_oldestplayer}
\title{Year and Oldest Player Function}
\usage{
yr_oldestplayer(year)
}
\arguments{
\item{year}{What year are we observing? Need to define.}
}
\description{
This function selects the oldest player in a given year.
If there are multiple players with the same age, the function will return the top alphabetical name.
}
\examples{
yr_oldestplayer()
}
\keyword{age}
\keyword{player}
\keyword{year}
|
7e1b44d5d023cad8e0fef50e80e273ffdd1daa7e | 12e0ddae06438b748d12a7f9c26e67cf682a8c16 | /featureEngineering/setup_H2O.R | 0b532785362a35b2ed3e72049d26be386c3996cc | [
"MIT"
] | permissive | christianadriano/ML_SelfHealingUtility | b05b2462c95a9aed9ac86af9e5eeb65bb07713d0 | 398ef99a7073c6383862fade85b8816e65a2fb1e | refs/heads/master | 2021-10-07T20:45:51.281121 | 2018-12-05T09:16:05 | 2018-12-05T09:16:05 | 105,566,942 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 714 | r | setup_H2O.R | ### Setup H2O packages - http://h2o-release.s3.amazonaws.com/h2o/rel-wolpert/4/docs-website/h2o-r/docs/articles/getting_started.html
#Remove any previously installed packages for R.
if ("package:h2o" %in% search()) { detach("package:h2o", unload=TRUE) }
if ("h2o" %in% rownames(installed.packages())) { remove.packages("h2o") }
pkgs <- c("RCurl","jsonlite")
for (pkg in pkgs) {
if (! (pkg %in% rownames(installed.packages()))) { install.packages(pkg) }
}
#Download and install the latest H2O package for R.
install.packages("h2o", type="source", repos=(c("http://h2o-release.s3.amazonaws.com/h2o/latest_stable_R")))
#Initialize H2O and run a demo to see H2O at work.
library(h2o)
h2o.init()
demo(h2o.kmeans) |
48941ad2ba3b3f12102510dae02777dbc0582f30 | 11318c146dc2d3e3a58bcd2874d6897d072ef3db | /plot3.R | 206292b4c57e2ab9c68ce432f874bb9bb488275c | [] | no_license | Chesalov63/ExData_Plotting1 | 23faa1e8c651f86c100a507a9aad05c6ae2869f7 | 351d1b09907b19cedda4376c1043b3647b858177 | refs/heads/master | 2021-01-01T08:10:57.969343 | 2020-02-09T12:22:43 | 2020-02-09T12:22:43 | 239,191,440 | 0 | 0 | null | 2020-02-08T19:21:05 | 2020-02-08T19:21:04 | null | UTF-8 | R | false | false | 1,617 | r | plot3.R | ## Start with downloading and unzipping UCI file
filename <- "exdata_data_household_power_consumption.zip"
if(!file.exists(filename)) {
download.file(
"https://archive.ics.uci.edu/ml/machine-learning-databases/00235/household_power_consumption.zip", filename)
}
unzip(filename)
## Read the data from dates 2007-02-01 and 2007-02-02 (2880 records)
library(data.table)
library(dplyr)
filename <- "household_power_consumption.txt"
headerDT <- fread(filename, sep = ";", na.strings = "?", nrows=0)
powerDT <- fread(filename, sep = ";", na.strings = "?", skip = "1/2/2007",
nrows=2880, col.names = colnames(headerDT))
## Remove the original file, it's too big to keep it unzipped
file.remove(filename)
## Set locale in case it differs from North-American usage
Sys.setlocale("LC_TIME", "C")
## Create new column, because we need Date and Time in POSIXct format
powerDT <- mutate(powerDT, DateTime = as.POSIXct(strptime(paste(Date, Time),
"%d/%m/%Y %H:%M:%S")
)
)
## Construct the plot and save it to a PNG file (480x480 by default)
png("plot3.png")
with(powerDT, plot(Sub_metering_1 ~ DateTime, type = "l",
ylab = "Energy sub metering", xlab = ""))
with(powerDT, lines(DateTime, Sub_metering_2, col = "red"))
with(powerDT, lines(DateTime, Sub_metering_3, col = "blue"))
legend("topright", pch = 32, col = c("black", "blue", "red"),
legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), lwd=1)
dev.off()
|
9ea58d28cc11e6c1cef71bc893e4aec34aba21c9 | 4882312ae33d4555e654321b3fee4cf18a05d563 | /man/methylomeStats.Rd | e5d55d0fbc4ad425129f6f8e0d01b8da05aac4fe | [] | no_license | timoast/methylQC | d3487c884b12e2a79544440b32aa48b738d8453b | fdba2f4917e5d442675780b455d5066ee33c8126 | refs/heads/master | 2020-09-21T20:15:40.768725 | 2016-09-13T08:33:52 | 2016-09-13T08:33:52 | 66,904,949 | 3 | 0 | null | null | null | null | UTF-8 | R | false | true | 364 | rd | methylomeStats.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/methylomeStats.R
\name{methylomeStats}
\alias{methylomeStats}
\title{Methylome statistics}
\usage{
methylomeStats(data)
}
\arguments{
\item{data}{A dataframe}
}
\value{
a dataframe
}
\description{
Generate summary statistics for methylome data
}
\examples{
methylomeStats(methylome)
}
|
c9a6ff6456fef3812f3f3a468ef1423c9dae8487 | a8f1a464fdd18222c094ddeccb8da85c204393ca | /RNAseq_discovery_analysis.R | b6d599adb3ef64c7a74b2c4745a91b7b139301b1 | [] | no_license | nikolayaivanov/PNET_MetPrediction | 3d1e3f881ac5e85954907874934d3d191a3ff52b | 4b9291beb8ec8b41408476a8a4af360c7c93bb69 | refs/heads/main | 2023-06-27T19:26:54.072462 | 2021-08-02T13:34:12 | 2021-08-02T13:34:12 | 379,664,521 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 18,977 | r | RNAseq_discovery_analysis.R | #
###########################################################################################
##### Discovery DE analysis [localized vs metastatic]
###########################################################################################
###### Read in the data and peform differential expression analysis
load('/athena/masonlab/scratch/users/nai2008/PNET_FinnertyProject/_rdas/discovery_dataset_metadata.rda')
source('/athena/masonlab/scratch/users/nai2008/ivanov_functions.R')
library(DESeq2)
library(tximeta)
# import data
se = tximeta(coldata=discovery_dataset_metadata, type = "salmon")
# found matching transcriptome:
# [ Ensembl - Homo sapiens - release 97 ]
# summarize transcript-level quantifications to gene-level
gse = summarizeToGene(se)
# get TPM matrix
tpm = assays(gse)$abundance
#get count matrix
counts=assays(gse)$counts
# make DESeqDataSet object
dds = DESeqDataSet(gse, design = ~ Distant_Mets)
#perform pre-filtering to keep only rows that have at least 10 reads total
keep <- rowSums(counts(dds)) >= 10
dds <- dds[keep,]
# make a transformed count matrix, using variance stabilizing transformation (VST)
vsd = vst(dds, blind=FALSE)
# run SVA (see chapter 8.1 of https://www.bioconductor.org/packages/devel/workflows/vignettes/rnaseqGene/inst/doc/rnaseqGene.html)
library(sva)
dds <- estimateSizeFactors(dds) # using 'avgTxLength' from assays(dds), correcting for library size
dat <- counts(dds, normalized=TRUE)
idx = rowMeans(dat) > 1
dat = dat[idx, ]
mod = model.matrix(~ Distant_Mets, colData(dds))
mod0 <- model.matrix(~1, colData(dds))
svaobj = svaseq(dat, mod, mod0)
# Number of significant surrogate variables is: 12
colnames(svaobj$sv)=paste0('SV_',1:ncol(svaobj$sv))
colData(dds) = as(cbind(as.data.frame(colData(gse)),svaobj$sv),'DataFrame')
design(dds) = ~ SV_1 + SV_2 + SV_3 + SV_4 + SV_5 + SV_6 + SV_7 + SV_8 + SV_9 + SV_10 + SV_11 + SV_12 + Distant_Mets
# examine how well the SVA method did at recovering the batch variable (i.e. which dataset the samples originated from)
pdf('/athena/masonlab/scratch/users/nai2008/PNET_FinnertyProject/_pdfs/discovery_dataset_SVA_plots.pdf')
par(mar=c(5.1,5.3,4.1,2.1))
x=factor(dds$Dataset)
for(i in 1:12){
boxplot(svaobj$sv[, i] ~ x, xlab='Batch (Dataset)', ylab=paste0("SV", i), main=paste0("Surrogate Variable ", i),
cex.main=2, cex.lab=2, cex.axis=1.5, outline=FALSE, col='lightgrey', ylim=c( min(as.vector(svaobj$sv[, i])), max(as.vector(svaobj$sv[, i])) ) )
points(as.vector(svaobj$sv[, i]) ~ jitter(as.numeric(x), amount=0.2), pch =21, col='black', bg='darkgrey', cex=1.4)
}
# for (i in 1:12) {
# stripchart(svaobj$sv[, i] ~ dds$Dataset, vertical = TRUE, main = paste0("Surrogate Variable ", i), ylab=paste0("SV", i),xlab='Batch (Dataset)',cex.main=2, cex.lab=2, cex.axis=1.5)
# abline(h = 0, lty='dashed')
# }
dev.off()
# run DE analysis
dds=DESeq(dds)
which(is.na(mcols(dds)$betaConv)) # none
# 93 rows did not converge in beta
# omit rows that did not converge in beta (these are typically genes with very small counts and little power)
# see https://support.bioconductor.org/p/65091/
ddsClean <- dds[which(mcols(dds)$betaConv),]
# extract results
rr=results(ddsClean, alpha=0.1, contrast=c('Distant_Mets','Y','N'))
# contrast = c( the name of a factor in the design formula, name of the numerator level for the fold change, name of the denominator level for the fold change)
summary(rr)
# out of 29726 with nonzero total read count
# adjusted p-value < 0.1
# LFC > 0 (up) : 184, 0.62%
# LFC < 0 (down) : 219, 0.74%
# outliers [1] : 0, 0%
# low counts [2] : 0, 0%
# (mean count < 0)
# add gene symbols, chr, & Ensembl gene IDs
library(AnnotationHub)
hub = AnnotationHub()
dm = query(hub, c("EnsDb", "sapiens", "97"))
edb = dm[["AH73881"]]
genes=as.data.frame(genes(edb))
mm=match(rownames(rr), genes$gene_id)
length(which(is.na(mm))) # 0
rr$chr=as.vector(genes$seqnames[mm])
rr$Ensembl=as.vector(rownames(rr))
rr$gene=as.vector(genes$gene_name[mm])
# save releveant data
save(se, gse, tpm, counts, ddsClean, rr, vsd, file='/athena/masonlab/scratch/users/nai2008/PNET_FinnertyProject/_rdas/discovery_dataset_DESeq2_DEbyMetStatus_BUNDLE.rda')
# load('/athena/masonlab/scratch/users/nai2008/PNET_FinnertyProject/_rdas/discovery_dataset_DESeq2_DEbyMetStatus_BUNDLE.rda') # se, gse, tpm, counts, ddsClean, rr, vsd
###########################################################################################
##### Downstream analysis
###########################################################################################
library(DESeq2)
source('/athena/masonlab/scratch/users/nai2008/ivanov_functions.R')
load('/athena/masonlab/scratch/users/nai2008/PNET_FinnertyProject/_rdas/discovery_dataset_metadata.rda')
load('/athena/masonlab/scratch/users/nai2008/PNET_FinnertyProject/_rdas/discovery_dataset_DESeq2_DEbyMetStatus_BUNDLE.rda') # se, gse, tpm, counts, ddsClean, rr, vsd
################ PCA
# pcaData <- plotPCA(vsd, intgroup = c( "Distant_Mets"), returnData = TRUE)
# percentVar <- round(100 * attr(pcaData, "percentVar"))
library(calibrate)
pca = prcomp(t(assays(vsd)[[1]]))
#command that will return % of variance explained by each PC:
pcaVars=getPcaVars(pca)
all(rownames(pca$x) == discovery_dataset_metadata$names) #TRUE
PCs=as.matrix(pca$x)
## PCA colored by metastatic status
col=discovery_dataset_metadata$Distant_Mets
col=sub('N','forestgreen',col)
col=sub('Y','red',col)
pdf('/athena/masonlab/scratch/users/nai2008/PNET_FinnertyProject/_pdfs/discovery_datset_PCA_plots_metStatus.pdf')
par(mar=c(5.1,5.3,4.1,2.1))
for(i in seq(from=1, to=10, by=2)){
x_lab=paste0('PC',i,': ',signif(pcaVars[i],2),'% variance')
y_lab=paste0('PC',i+1,': ',signif(pcaVars[i+1],2),'% variance')
plot(PCs[,i], PCs[,i+1], xlab=x_lab, ylab=y_lab, pch=21, cex=1.2, col='black', bg=col, cex.lab=2, cex.axis=2, xlim=c(min(PCs[,i])-10,max(PCs[,i])+10))
textxy(PCs[,i], PCs[,i+1],discovery_dataset_metadata$names,cex =.7, offset = .7)
legend("topright", legend=c('Distant mets','No distant mets'), pch=15, col=c("red", "forestgreen"), cex=1, pt.cex=1)
}
dev.off()
pdf('/athena/masonlab/scratch/users/nai2008/PNET_FinnertyProject/_pdfs/discovery_datset_PCA_plots_metStatus_only_interesting_samples_labeled.pdf')
par(mar=c(5.1,5.3,4.1,2.1))
for(i in seq(from=1, to=10, by=2)){
x_lab=paste0('PC',i,': ',signif(pcaVars[i],2),'% variance')
y_lab=paste0('PC',i+1,': ',signif(pcaVars[i+1],2),'% variance')
plot(PCs[,i], PCs[,i+1], xlab=x_lab, ylab=y_lab, pch=21, cex=1.2, col='black', bg=col, cex.lab=2, cex.axis=2, xlim=c(min(PCs[,i])-10,max(PCs[,i])+10))
textxy(PCs[c(15,22),i], PCs[c(15,22),i+1],discovery_dataset_metadata$names[c(15,22)],cex =.7, offset = .7)
legend("topright", legend=c('Distant mets','No distant mets'), pch=15, col=c("red", "forestgreen"), cex=1, pt.cex=1)
}
dev.off()
## PCA colored by study
col=c('blue','darkred')
palette(col)
study=as.factor(discovery_dataset_metadata$Dataset)
pdf('/athena/masonlab/scratch/users/nai2008/PNET_FinnertyProject/_pdfs/discovery_datset_PCA_plots_studyID.pdf')
par(mar=c(5.1,5.3,4.1,2.1))
for(i in seq(from=1, to=10, by=2)){
x_lab=paste0('PC',i,': ',signif(pcaVars[i],2),'% variance')
y_lab=paste0('PC',i+1,': ',signif(pcaVars[i+1],2),'% variance')
plot(PCs[,i], PCs[,i+1], xlab=x_lab, ylab=y_lab, pch=21, cex=1.2, col='black', bg=study, cex.lab=2, cex.axis=2, xlim=c(min(PCs[,i])-10,max(PCs[,i])+10))
#textxy(PCs[,i], PCs[,i+1],discovery_dataset_metadata$names,cex =.7, offset = .7)
legend("topright", legend=levels(study), pch=15, col=col, cex=1, pt.cex=1, title='Study')
}
dev.off()
################ Make table of DE genes
# print results
sig_results=as.data.frame(rr[which(rr$padj<=0.1),])
# order results by LFC
oo=order(sig_results$log2FoldChange)
sig_results=sig_results[oo,]
# make output table
out=data.frame(gene=sig_results$gene, chr=sig_results$chr, Ensembl=sig_results$Ensembl, log2FoldChange=sig_results$log2FoldChange, FDR=sig_results$padj)
nrow(out) # 403
length(which(out$log2FoldChange > 0)) # 184 genes overexpressed in samples with distant mets (relative to localized samples)
length(which(out$log2FoldChange < 0)) # 219 genes are underexpressed in samples with distant mets (relative to localized samples)
# How many DE genes are TFs?
out$TF=FALSE
TFs=read.csv("/athena/masonlab/scratch/users/nai2008/Human_TFs_DatabaseExtract_v_1.01.csv")
TFs$Ensembl_ID=as.vector(TFs$Ensembl_ID)
nrow(TFs) # 2765
length(unique(TFs$Ensembl_ID)) # 2765
which(is.na(TFs$Ensembl_ID)) # 0
mm=match(out$Ensembl,TFs$Ensembl_ID)
which(duplicated(out$Ensembl)) # none
which(duplicated(TFs$Ensembl_ID)) # none
out$TF[which(!is.na(mm))]=TRUE
length(which(out$TF==TRUE)) # 26
save(out, file='/athena/masonlab/scratch/users/nai2008/PNET_FinnertyProject/_rdas/discovery_dataset_DE_genes_byMetStatus.rda')
# load('/athena/masonlab/scratch/users/nai2008/PNET_FinnertyProject/_rdas/discovery_dataset_DE_genes_byMetStatus.rda')
# print table of DE genes
write.csv(out,file="/athena/masonlab/scratch/users/nai2008/PNET_FinnertyProject/_data_tables/discovery_dataset_DE_genes_byMetStatus.csv", row.names=FALSE)
################ Perform gene set over-representation analysis (ORA)
library(goseq)
load('/athena/masonlab/scratch/users/nai2008/items_for_goseq_analysis.rda') # gene2cat_GOandKEGG, KEGG_term_names, median_tx_lengths, cat2gene_GO, cat2gene_KEGG
if(length(which(is.na(rr$padj))) != 0) {
rr_mod=rr[-which(is.na(rr$padj)),]
} else { rr_mod = rr }
indicator=rep(0, times=nrow(rr_mod))
indicator[which(rr_mod$padj<=0.1)]=1
aa=indicator
names(aa)=rr_mod$Ensembl
mm = match(names(aa), median_tx_lengths$gene_EnsemblID)
bias.data = median_tx_lengths$median_length[mm]
pwf = nullp(aa, 'hg38', 'ensGene', bias.data = bias.data)
pdf('/athena/masonlab/scratch/users/nai2008/PNET_FinnertyProject/_pdfs/discovery_dataset_goseq_pwf_plot.pdf')
plotPWF(pwf)
dev.off()
GO.KEGG.wall=goseq(pwf,"hg38","ensGene", gene2cat = gene2cat_GOandKEGG, test.cats=c("GO:CC", "GO:BP", "GO:MF", "KEGG"))
GO.KEGG.wall$over_represented_FDR=p.adjust(GO.KEGG.wall$over_represented_pvalue, method="BH")
GO.KEGG.wall$ontology[grep('path:hsa', GO.KEGG.wall$category)]='KEGG'
index = grep('path:hsa', GO.KEGG.wall$category)
for (i in 1:length(index)){
mm=match(GO.KEGG.wall$category[index[i]], KEGG_term_names$KEGG_ID)
GO.KEGG.wall$term[index[i]] = KEGG_term_names$KEGG_term[mm]
}
length(which(GO.KEGG.wall$over_represented_FDR<=0.1)) # 2
GO.KEGG.wall_sig = GO.KEGG.wall[which(GO.KEGG.wall$over_represented_FDR<=0.1),]
# Add DE genes in each GO/KEGG category
GO.KEGG.wall_sig_withoutGenes = GO.KEGG.wall_sig
library(AnnotationHub)
hub = AnnotationHub()
dm = query(hub, c("EnsDb", "sapiens", "97"))
edb = dm[["AH73881"]]
genes=as.data.frame(genes(edb))
ens.gene.map = data.frame(gene_id=genes$gene_id, gene_name=genes$gene_name)
length(names(cat2gene_GO)) == length(unique(names(cat2gene_GO))) #TRUE
length(names(cat2gene_KEGG)) == length(unique(names(cat2gene_KEGG))) #TRUE
GO.KEGG.wall_sig$genes_Ensembl=NA
GO.KEGG.wall_sig$genes=NA
for (i in 1:nrow(GO.KEGG.wall_sig)){
cat=GO.KEGG.wall_sig$category[i]
if (length(grep('GO',cat)) == 1){
m.cat=match(cat, names(cat2gene_GO))
if(is.na(m.cat)){print('error: m.cat does not match (GO)')} else {
possible_genes=cat2gene_GO[[m.cat]]
m.genes=match(possible_genes,names(aa))
if( length(which(!is.na(m.genes)))==0 ){print('error: m.genes are all <NA> (GO)')} else {
if (length(which(is.na(m.genes)))>0){ possible_genes= possible_genes[-which(is.na(m.genes))] }
m.genes=match(possible_genes,names(aa))
subset=aa[m.genes]
DE_genes=subset[which(subset==1)]
GO.KEGG.wall_sig$genes_Ensembl[i]=paste(names(DE_genes),collapse=';')
m.ens=match(names(DE_genes),ens.gene.map$gene_id)
GO.KEGG.wall_sig$genes[i]=paste(ens.gene.map$gene_name[m.ens],collapse=';')
}
}
} else if (length(grep('path:hsa',cat)) == 1){
m.cat=match(cat, names(cat2gene_KEGG))
if(is.na(m.cat)){print('error: m.cat does not match (KEGG)')} else {
possible_genes=cat2gene_KEGG[[m.cat]]
m.genes=match(possible_genes,names(aa))
if(length(which(!is.na(m.genes) == 0))){print('error: m.genes are all <NA> (KEGG)')} else{
if (length(which(is.na(m.genes)))>0){ possible_genes= possible_genes[-which(is.na(m.genes))] }
m.genes=match(possible_genes,names(aa))
subset=aa[m.genes]
DE_genes=subset[which(subset==1)]
GO.KEGG.wall_sig$genes_Ensembl[i]=paste(names(DE_genes),collapse=';')
m.ens=match(names(DE_genes),ens.gene.map$gene_id)
GO.KEGG.wall_sig$genes[i]=paste(ens.gene.map$gene_name[m.ens],collapse=';')
}
}
}
}
write.csv(GO.KEGG.wall_sig, file = '/athena/masonlab/scratch/users/nai2008/PNET_FinnertyProject/_data_tables/discovery_dataset_DEbyMetStatus_ORA.csv')
################ Make TPM plots of validated genes with concordant LFCs b/w the discovery and validation datasets
load('/athena/masonlab/scratch/users/nai2008/PNET_FinnertyProject/_rdas/validated_genes_DEbyMetStatus_concordantLFC_only.rda')
log2_tpm_plus1=log2(tpm+1)
x=as.vector(ddsClean$Distant_Mets)
x=sub('Y','Distant mets',x)
x=sub('N','No distant mets',x)
x=factor(x, levels=c('No distant mets','Distant mets'))
mycol=as.vector(ddsClean$Distant_Mets)
mycol[which(mycol=="N")]='purple'
mycol[which(mycol=="Y")]='green'
pdf('/athena/masonlab/scratch/users/nai2008/PNET_FinnertyProject/_pdfs/discovery_dataset_TPM_plots_of_validated_genes_with_concordantLFCs.pdf')
for (i in 1: nrow(validated_genes_concordantLFC_only)){
par(mar=c(5.1,5.3,4.1,2.1))
index=which(rr$Ensembl==validated_genes_concordantLFC_only$Ensembl[i])
zag1=paste0(rr$gene[index], ' (',rr$Ensembl[index],')')
zag2=as.expression(bquote(log[2]~"FC" == .(signif(rr$log2FoldChange[index],2))))
zag3=paste0("FDR = ",signif(rr$padj[index],2))
log2_tpm_plus1_subset=as.vector(log2_tpm_plus1[which(rownames(log2_tpm_plus1)==validated_genes_concordantLFC_only$Ensembl[i]),])
boxplot(as.vector(log2_tpm_plus1_subset)~x, , xlab='Tumor status', ylab= as.expression(bquote(log[2]~"(TPM+1)")), main=zag1, cex.main=2, cex.lab=2, cex.axis=1.5, outline=FALSE, col='lightgrey', ylim=c( min(as.vector(log2_tpm_plus1_subset)), max(as.vector(log2_tpm_plus1_subset)) ) )
points(as.vector(log2_tpm_plus1_subset) ~ jitter(as.numeric(x), amount=0.2), pch =21, col='black', bg=mycol, cex=1.4)
legend(x='topright',legend=c(zag2,zag3), bty='n')
}
dev.off()
################ UCHL1 expression
#UCHL1 is ENSG00000154277
rr[which(rr$Ensembl=='ENSG00000154277'),]
# baseMean log2FoldChange lfcSE stat pvalue
# <numeric> <numeric> <numeric> <numeric> <numeric>
# ENSG00000154277 18982.2 -0.297401 0.559588 -0.531464 0.595097
# padj chr Ensembl gene
# <numeric> <character> <character> <character>
# ENSG00000154277 0.999998 4 ENSG00000154277 UCHL1
################ Clustering analysis
load('/athena/masonlab/scratch/users/nai2008/PNET_FinnertyProject/_rdas/validated_genes_DEbyMetStatus_concordantLFC_only.rda')
load('/athena/masonlab/scratch/users/nai2008/PNET_FinnertyProject/_rdas/discovery_dataset_metadata.rda')
# Patients from whom samples CA20 and CA35 were derived had localized disease at the time of sequencing, but later developed distant mets; add that info to the metadata
discovery_dataset_metadata$Loc.to.Met=NA
discovery_dataset_metadata$Loc.to.Met[which(discovery_dataset_metadata$Distant_Mets=='Y')]=NA
discovery_dataset_metadata$Loc.to.Met[which(discovery_dataset_metadata$names=='CA20')]='Yes'
discovery_dataset_metadata$Loc.to.Met[which(discovery_dataset_metadata$names=='CA35')]='Yes'
###### Read in the data
source('/athena/masonlab/scratch/users/nai2008/ivanov_functions.R')
library(DESeq2)
library(tximeta)
# import data
se = tximeta(coldata=discovery_dataset_metadata, type = "salmon")
# found matching transcriptome:
# [ Ensembl - Homo sapiens - release 97 ]
# summarize transcript-level quantifications to gene-level
gse = summarizeToGene(se)
# get TPM matrix
tpm = assays(gse)$abundance
#get count matrix
counts=assays(gse)$counts
# make DESeqDataSet object
dds = DESeqDataSet(gse, design = ~ Distant_Mets)
#perform pre-filtering to keep only rows that have at least 10 reads total
keep <- rowSums(counts(dds)) >= 10
dds <- dds[keep,]
# make a transformed count matrix, using variance stabilizing transformation (VST)
vsd = vst(dds, blind=FALSE)
vst_counts = as.matrix(assay(vsd))
# regress out the batch variable
library(jaffelab)
mod = model.matrix(~Distant_Mets + factor(Dataset), data=as.data.frame(colData(dds)))
clean_vst_counts=cleaningY(vst_counts, mod, P=2)
library(AnnotationHub)
hub = AnnotationHub()
dm = query(hub, c("EnsDb", "sapiens", "97"))
edb = dm[["AH73881"]]
genes=as.data.frame(genes(edb))
mm_validated_genes_w_concordantLFCs=match(validated_genes_concordantLFC_only$Ensembl, rownames(clean_vst_counts))
if(length(which(is.na(mm_validated_genes_w_concordantLFCs))) != 0) {
validated_genes_concordantLFC_only = validated_genes_concordantLFC_only[-which(is.na(mm_validated_genes_w_concordantLFCs)),]
mm_validated_genes_w_concordantLFCs=match(validated_genes_concordantLFC_only$Ensembl, rownames(clean_vst_counts))
}
which(duplicated(validated_genes_concordantLFC_only$Ensembl))# none
which(duplicated(rownames(clean_vst_counts)))# none
mm=match(rownames(clean_vst_counts),genes$gene_id)
which(duplicated(rownames(clean_vst_counts)))# none
which(duplicated(genes$gene_id))# none
which(is.na(mm)) # none
rownames(clean_vst_counts) = genes$gene_name[mm]
all(colnames(clean_vst_counts)==discovery_dataset_metadata$names) #TRUE
library(pheatmap)
library(RColorBrewer)
annotation_col=data.frame(Distant.Mets=discovery_dataset_metadata$Distant_Mets, Loc.to.Met=discovery_dataset_metadata$Loc.to.Met)
rownames(annotation_col)=as.vector(discovery_dataset_metadata$names)
ann_colors = list(
Loc.to.Met = c(Yes='blue'),
Distant.Mets = c(Y='green', N='purple')
)
pdf('/athena/masonlab/scratch/users/nai2008/PNET_FinnertyProject/_pdfs/discovery_analysis_heatmaps_pearsonCorrelation.pdf')
pheatmap(clean_vst_counts[mm_validated_genes_w_concordantLFCs,], color=colorRampPalette(rev(brewer.pal(n = 11, name = "RdBu")))(3000),
main='Discovery Dataset; Pearson correlation',
clustering_distance_rows = "correlation", clustering_distance_cols = "correlation",
cluster_rows=TRUE, cluster_cols=TRUE, annotation_col=annotation_col, scale='row', fontsize_col=5, annotation_colors = ann_colors)
dev.off()
tt=clean_vst_counts[mm_validated_genes_w_concordantLFCs,]
shapiro.test(as.vector(tt[10,]))
|
841ec77d0fe5068d6a7613c2b6101fd64491c617 | b353f7d6f9f514b889c88b29f8b03f6eb219366a | /man/clean_yahoo_data.Rd | cb5d754ae51b0f1ed1616da3827c96f563f6a765 | [] | no_license | ces0491/companyDataScrapeR | 806b83fa2068267bf6435e51096e4c4125642962 | 76ba4ddb4a42003a966cce84dff238a6e7eb89fd | refs/heads/master | 2023-06-16T04:05:15.530547 | 2021-07-15T17:22:13 | 2021-07-15T17:22:13 | 291,532,715 | 1 | 0 | null | null | null | null | UTF-8 | R | false | true | 427 | rd | clean_yahoo_data.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/clean_yahoo_data.R
\name{clean_yahoo_data}
\alias{clean_yahoo_data}
\title{clean Yahoo data}
\usage{
clean_yahoo_data(scraped_data, type, frequency = NULL)
}
\arguments{
\item{scraped_data}{tbl_df of scraped data}
\item{type}{string indicating the data type, e.g. 'IS'}
\item{frequency}{string}
}
\value{
tbl_df
}
\description{
clean Yahoo data
}
|
365a625d8bbb741a30809cb1f4df9a3ef29aada5 | e8a4396d8d60cb3d530fd977c9864c651c143efa | /analysis/additional_within_cross_models_and_plots.R | ee6eb66cb2ecf5e3a95bfa0d860ea4e6c994bbfc | [
"MIT"
] | permissive | joshsbloom/yeast-16-parents | 5d8a8fb837966b9150cbb22a0dd3fd1f6bed50dd | c913c9ae7fd237329f639de02e7ec511b048730f | refs/heads/master | 2020-04-17T09:34:56.206569 | 2019-09-23T22:15:12 | 2019-09-23T22:15:12 | 166,464,371 | 1 | 1 | null | null | null | null | UTF-8 | R | false | false | 10,157 | r | additional_within_cross_models_and_plots.R | # plot composite mapping per cross
peaksModel=list()
interactionPeaks=list()
marginalR=list()
for(cross.name in crosses) {
print(cross.name)
cross=cross.list[[cross.name]]
if(cross.name=='A') { cross=subset(cross, ind=!grepl('A11', as.character(cross$pheno$id))) }
snames = as.character(cross$pheno$id)
subPheno=lapply(NORMpheno, function(x) x[match(snames, names(x))])
mPheno =sapply(subPheno, function(x) sapply(x, mean, na.rm=T))
mPheno=apply(mPheno,2, function(x) {x[is.na(x)]=mean(x, na.rm=T); return(x)})
g=pull.argmaxgeno(cross)
# are there fixed loci ?? (no)-------------------------------
#g.af=apply(g,2,function(x) sum(x==1))
#parents.list[[cross.name]]$fixed=(g.af==0 | g.af==nrow(g))
#fixed.loci=which(parents.list[[cross.name]]$fixed)
#if(length(fixed.loci)>0) { g=g[,-fixed.loci] }
#------------------------------------------------------------
#g.r=g[,-which(duplicated(g, MARGIN=2))]
g.s=scale(g)
#A=tcrossprod(g.s)/(ncol(g.s))
mPheno=scale(mPheno)
marginalR[[cross.name]]=(crossprod(mPheno,g.s)/(nrow(mPheno)-1))
#.1581 = var exp ~.05
#.2236 = var exp ~.16
cps=cross.peaks[[cross.name]]
cps=cps[cps$q<.05,]
# remove 4NQO, YPD;;2 and YPD;;3
for(pheno in names(subPheno)[-c(1,38,39)]) {
print(pheno)
cpQTL=cps[cps$trait==pheno,]
if(length(cpQTL$pmarker)!=0) {
apeaks = unique(match(cpQTL$fscan.markers, colnames(g.s)))
X=data.frame(g.s[,apeaks])
fitme=lm(mPheno[,pheno]~.-1, data=X)
aov.a = anova(fitme)
tssq = sum(aov.a[,2])
a.effs=(aov.a[1:(nrow(aov.a)-1),2]/tssq)
coeffs=coefficients(fitme)
cpQTL$var.exp=a.effs
cpQTL$lm.coeff=as.vector(coeffs)
cpQTL$chr=sapply(strsplit(cpQTL$pmarker, '_'), function(x) x[1])
cpQTL$pos=as.numeric(sapply(strsplit(cpQTL$pmarker, '_'), function(x) x[2]))
cpQTL$cross=cross.name
names(cpQTL)[1]='trait'
print(cpQTL)
peaksModel[[cross.name]][[pheno]]=cpQTL
}
if(length(cpQTL$pmarker)>1) {
qtl.combs=combn(apeaks,2)
null=lm(mPheno[,pheno]~g.s[,apeaks]-1)
int.coef1=rep(NA, ncol(qtl.combs))
int.coef2=rep(NA, ncol(qtl.combs))
int.coef=rep(NA, ncol(qtl.combs))
int.pvalue=rep(NA, ncol(qtl.combs))
for(ist in 1:ncol(qtl.combs)){
full=lm(mPheno[,pheno]~g.s[,apeaks]+g.s[,qtl.combs[1,ist]]*g.s[,qtl.combs[2,ist]]-1)
int.pvalue[ist]=anova(null,full)$'Pr(>F)'[2]
int.coef1[ist]=coef(full)[paste0("g.s[, apeaks]",colnames(g.s)[qtl.combs[1,ist]])]
int.coef2[ist]=coef(full)[paste0("g.s[, apeaks]",colnames(g.s)[qtl.combs[2,ist]])]
int.coef[ist]=coef(full)[length(coef(full))] #anova(null,full)$'Pr(>F)'[2]
}
tqc=t(qtl.combs)
dfi=data.frame(m1=colnames(g.s)[tqc[,1]], m2=colnames(g.s)[tqc[,2]], int.coef1, int.coef2, int.coef, int.pvalue, stringsAsFactors=F)
dfi$cross=cross.name
dfi$chr1=sapply(strsplit(dfi$m1, '_'), function(x) x[1])
dfi$chr2=sapply(strsplit(dfi$m2, '_'), function(x) x[1])
dfi$pos1=as.numeric(sapply(strsplit(dfi$m1, '_'), function(x) x[2]))
dfi$pos2=as.numeric(sapply(strsplit(dfi$m2, '_'), function(x) x[2]))
dfi$trait=pheno
interactionPeaks[[cross.name]][[pheno]]=dfi
#interactions_per_trait[[pheno]]=dfi
}
}
}
#save(marginalR,file='/data/rrv2/genotyping/RData/FDR_marignalR.RData')
#save(peaksModel,file='/data/rrv2/genotyping/RData/FDR_wcPeaksModel.RData')
#save(interactionPeaks, file='/data/rrv2/genotyping/RData/FDR_wcInteractionPeaksModel.RData')
load('/data/rrv2/genotyping/RData/FDR_marignalR.RData')
load('/data/rrv2/genotyping/RData/FDR_wcPeaksModel.RData')
load('/data/rrv2/genotyping/RData/FDR_wcInteractionPeaksModel.RData')
cross.peaks.flat=do.call('rbind', lapply(peaksModel, function(y) { do.call('rbind', y)} )) #cross.peaks)
cross.peaks.flat$gcoord=gcoord.key[cross.peaks.flat$chr]+cross.peaks.flat$pos
interactionPeaks.flat=do.call('rbind', lapply(interactionPeaks, function(y){ do.call('rbind', y)}))
qs.int=qvalue(interactionPeaks.flat$int.pvalue, fdr.level=.1)
interactionPeaks.flat$significant=qs.int$qvalues<.1
intP=interactionPeaks.flat[interactionPeaks.flat$significant,]
intP$gcoord1=gcoord.key[intP$chr1]+intP$pos1
intP$gcoord2=gcoord.key[intP$chr2]+intP$pos2
intP=na.omit(intP)
ssi=split(intP, paste(intP$trait, intP$cross) )
hist(c(sapply(ssi, nrow), rep(0, (38*16)-length(ssi))))
glength=sum(unlist(chr.lengths))
#load('/data/rrv2/genotyping/RData/jointPeaks5.RData')
#jP=rbindlist(jointPeaks5, idcol='chromosome')
#jPs=split(jP, jP$trait)
jointPeaksFlat=rbindlist(jointPeaksJS, idcol='chromosome')
#data.frame(do.call('rbind', jointPeaks5), stringsAsFactors=F)
names(jointPeaksFlat)[1]='chr'
#jointPeaksFlat$chr=sapply(strsplit(jointPeaksFlat$marker, '_'), function(x) x[1])
jointPeaksFlat$pos=as.numeric(sapply(strsplit(jointPeaksFlat$fscan.markers, '_'), function(x) x[2]))
jointPeaksFlat$gpos=gcoord.key[jointPeaksFlat$chr ]+jointPeaksFlat$pos
utraits.orig=unique(cross.peaks.flat$trait)
utraits=utraits.orig
utraits[34]='YNB_ph8'
utraits[36]='YPD_15C'
utraits[33]='YNB_ph3'
utraits[10]='EtOH_Glu'
utraits[37]='YPD_37C'
utraits=gsub(';.*','', utraits)
utraits=gsub('_', ' ', utraits)
pdf(file=paste0('/home/jbloom/Dropbox/RR/Figures and Tables/SupplementaryFigure2.pdf'), width=11, height=8)
for(piter in 1:length(utraits)) {
png(file=paste0('/home/jbloom/Dropbox/RR/Figures and Tables/other formats/SuplementaryFigure2_', piter, '.png'), width=1100, height=800)
pheno.iter=utraits.orig[piter]
#pdf(file=paste0('/home/jbloom/Dropbox/RR/Figures and Tables/', filename.clean(pheno.iter), '_joint.pdf'), width=1024, height=800)
pcnt=0
op <- par(mfrow = c(16,1),
oma = c(5,8,5,.5) + 0.01,
mar = c(0,4,0,0) + 0.01,
xaxs='i',
yaxs='i'
)
joint.peaks.toplot=jointPeaksFlat[jointPeaksFlat$trait==pheno.iter,]
joint.peaks.toplot=joint.peaks.toplot[!duplicated(joint.peaks.toplot$gpos),]
parent.vec=c('M22', 'BY', 'RM', 'YPS163', 'YJM145', 'CLIB413', 'YJM978', 'YJM454',
'YPS1009', 'I14', 'Y10', 'PW5', '273614N', 'YJM981', 'CBS2888', 'CLIB219')
glength=1.2e7
for(cross.iter in 1:length(crosses)){
cross.name=crosses[cross.iter]
jptlookup=joint.peaks.toplot[joint.peaks.toplot$fscan.markers %in% parents.list[[cross.name]]$marker.name,]
cross.sub.p=cross.peaks.flat[cross.peaks.flat$trait==pheno.iter & cross.peaks.flat$cross==crosses[cross.iter],]
cross.sub.pi=intP[intP$trait==pheno.iter & intP$cross==crosses[cross.iter],]
mpM=marginalR[[crosses[cross.iter]]]
mpM.marker=tstrsplit(colnames(mpM), '_', type.convert=T)
mpM.gcoord=gcoord.key[mpM.marker[[1]]]+mpM.marker[[2]]
if(nrow(cross.sub.p)>0) {
#plot(0,0, type='n', xlim=c(0, glength), yaxt='n', ylab='', xaxt='n', yaxt='n', ylim=c(-1,1),cex.lab=1.5)
plot(0,0, type='n', xlim=c(0, glength), yaxt='n', ylab='', xaxt='n', yaxt='n', ylim=c(0,1),cex.lab=1.5)
abline(h=0)
abline(v=jptlookup$gpos, col='lightgreen')
axis(2,at=1,labels=parent.vec[cross.iter], cex.axis=1.5, las=2)
pcnt=pcnt+nrow(cross.sub.p)
signme=sign(cross.sub.p$lm.coeff)
if(min(grep(crosses.to.parents[[cross.iter]][1], names(parents.list[[cross.iter]])))==7) {
signme=signme
signR=-1*mpM[pheno.iter,]
} else{
signme=-1*signme
signR=mpM[pheno.iter,]
}
# flip sign ... (if negative then point to strain that increases growth)
signme=-1*signme
splus=signme==1
sminus=signme==-1
#points(mpM.gcoord, signR)
cross.sub.p$lm.ceiling=rep(.1, nrow(cross.sub.p))
cross.sub.p$lm.ceiling[cross.sub.p$var.exp>0]=.2
cross.sub.p$lm.ceiling[cross.sub.p$var.exp>.04]=.5
cross.sub.p$lm.ceiling[cross.sub.p$var.exp>.08]=.75
cross.sub.p$lm.ceiling[cross.sub.p$var.exp>.25]=1
if(sum(splus)>0){
arrows(cross.sub.p$gcoord[splus],0, cross.sub.p$gcoord[splus], cross.sub.p$lm.ceiling[splus], code=2, length=.12, lwd=4,
col=ifelse(cross.sub.p$lm.ceiling[splus]>.2, 'black', 'grey'))
}
if(sum(sminus)>0){
arrows(cross.sub.p$gcoord[sminus],cross.sub.p$lm.ceiling[sminus], cross.sub.p$gcoord[sminus],0 , code=2, length=.12, lwd=4,
col=ifelse(cross.sub.p$lm.ceiling[sminus]>.2, 'black', 'grey')
)
}
abline(v=gcoord.key, lty=2, col='lightblue')
} else {
plot(0,0, type='n', xlim=c(0, max(glength)), ylim=c(0,1), xaxt='n' ) #ylab=crosses[cross.iter] ,
#abline(h=0, lty=3, col='grey')
abline(v=cumsum(genome.chr.lengths), lty=2, col='lightblue')
}
# if(nrow(cross.sub.pi)>0) {
# peak.number=c(seq_along(cross.sub.pi[,1]), c(seq_along(cross.sub.pi[,2])))
# #peak.chr=c(cross.sub.pi$chr1, cross.sub.pi$chr2)
# #peak.pos=as.numeric(sapply(strsplit(sapply(strsplit(c(cross.sub.pi.sig[,1], cross.sub.pi.sig[,2]), ':'), function (x) x[2]), '_'), function(x)x[1]))
# peak.gpos=c(cross.sub.pi$gcoord1, cross.sub.pi$gcoord2)
# text(peak.gpos, (peak.number/max(peak.number))*.9, '*', col='red', cex=4)
# }
if(cross.iter==16){ axis(1, at=gcoord.key, labels=names(gcoord.key), cex.axis=1.5)}
}
title(xlab='genomic position', ylab='', outer=TRUE, cex.lab=2,
main=paste(utraits[piter], ' ', pcnt, 'total QTL | ',
length(joint.peaks.toplot$gpos), 'joint QTL'
))
dev.off()
}
# dev.off()
|
7f060fe58c8d3475e040e55373691ab2d39b40b2 | a3da395d683014c2f04a4491f5cf3214076a82f6 | /fmd work.R | 6fc42b194ebf9d748e93a5907a75f8e8b1811207 | [] | no_license | VetMomen/UBM | f2f9ba780a8ce09aba3a2c64c29ba06d93e49a37 | d4af7f433144b4f7bf62335ee8e4ad6bb1997495 | refs/heads/master | 2020-04-02T02:25:25.759831 | 2019-03-30T13:44:54 | 2019-03-30T13:44:54 | 151,887,463 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 4,352 | r | fmd work.R | dir.create('./plots')
fmd<-read_excel(path = './data sets/FMD form (Responses).xlsx',col_types = c('guess','guess','guess','guess','guess','numeric','numeric','guess','guess','guess','guess','guess','guess'))
str(fmd)
uniqus<-apply(fmd,2,unique)
#frequancy of Each type and location
xtab<-with(fmd,
table(`Production Type`,Location))
par(las=1)
plot(xtab,main='distribution of production type and location')
#total cap in Eche loc
cap_loc<-fmd%>%group_by(Location)%>%summarize(Cap=sum(Cap.))
cap_loc%>%ggplot(aes(Location,Cap))+
geom_col(aes(fill=Location),width = .7)+
geom_text(data = cap_loc,aes(label=Cap),vjust=.003,color='darkseagreen2')+
ylab('Capacity')+
ggtitle('Capacity of each area')+
theme(panel.background = element_rect(fill = 'black'),panel.grid.minor = element_line(colour = 'black'))
#total cap of each type
cap_type<-fmd%>%group_by(`Production Type`)%>%summarize(Cap=sum(Cap.))
cap_type%>%ggplot(aes(`Production Type`,Cap,fill=`Production Type`))+
geom_col(width = .7)+
geom_text(data = cap_type,aes(label=Cap),vjust=.003,color='darkseagreen2')+
ylab('Capacity')+
ggtitle('Capacity of each type')+
theme(panel.background = element_rect(fill = 'black'),panel.grid.minor = element_line(colour = 'black'))
#mixing area with type
cap_type<-fmd%>%group_by(`Production Type`,Location)%>%summarize(Cap=sum(Cap.))
cap_type%>%ggplot(aes(`Production Type`,Cap,fill=`Production Type`))+
geom_col(width = .7)+
geom_text(data = cap_type,aes(label=Cap),vjust=.003,color='darkseagreen2')+
ylab('Capacity')+
ggtitle('Capacity of each type in each area')+
theme(panel.background = element_rect(fill = 'black'),panel.grid.minor = element_line(colour = 'black'))+
facet_wrap(.~Location)
#adding factor of infection
cap_type<-fmd%>%group_by(`Production Type`,Location,infected)%>%summarize(Cap=sum(Cap.))
cap_type%>%ggplot(aes(`Production Type`,Cap,fill=infected))+
geom_col(width = .7)+
geom_text(data = cap_type,aes(label=Cap),vjust=.003,color='darkseagreen2')+
ylab('Capacity')+
ggtitle('Capacity of each type in each area illustrating infected herd')+
theme(panel.background = element_rect(fill = 'black'),panel.grid.minor = element_line(colour = 'black'))+
facet_grid(.~Location)
#adding vaccine type
cap_type<-fmd%>%group_by(`Production Type`,Location,infected,`vacc. Type`)%>%summarize(Cap=sum(Cap.))
cap_type%>%ggplot(aes(`Production Type`,Cap,fill=infected))+
geom_col(width = .7)+
geom_text(data = cap_type,aes(label=Cap),vjust=.003,color='darkseagreen2')+
ylab('Capacity')+
ggtitle('Capacity of each area illustrating infected herd & vaccine type')+
theme(panel.background = element_rect(fill = 'black'),panel.grid.minor = element_line(colour = 'black'))+
facet_grid(`vacc. Type`~Location)
#farm location
color<-colorFactor(palette =c('blue','red') ,domain = fmd$infected)
fmd%>%leaflet()%>%
addProviderTiles(providers$OpenStreetMap.BlackAndWhite)%>%addCircleMarkers(lat = fmd$lat,
lng = fmd$lon,color = ~color(fmd$infected),
radius = fmd$Cap./1000)%>%
addLegend(position = 'topright',pal = color,values = ~factor(fmd$infected),title = 'Infection')
perc<-fmd%>%group_by(`vacc. Type`)%>%summarize(percent=(sum(Cap.)/sum(fmd$Cap.))*100)
perc$percent<-round(perc$percent,1)
fmd%>%group_by(`vacc. Type`)%>%summarize(total=sum(Cap.),
percent=round((sum(Cap.)/sum(fmd$Cap.))*100,1))%>%
ggplot(aes(x='',y=total,fill=`vacc. Type`))+
geom_col(width = .3)+
coord_polar(theta = 'y',start = 0,direction = 1,clip = 'on')+
theme(axis.title = element_text(face = 'bold'),axis.line = element_blank(),
panel.background = element_blank(),
axis.text = element_blank(),panel.grid = element_blank(),
axis.title.y.left = element_blank(),
axis.title.x.bottom = element_blank())+
geom_text(aes(label=percent),nudge_x = .2,hjust=.5)+
scale_fill_brewer(type = qualitative,palette = 'Dark2')+
labs(title = 'Vaccination type share')+
theme(plot.title = element_text(hjust = .5))
|
03976a400225f076749496f873d27f14b6b6e6d5 | 0618bcede3e643e0ce2c9b4f6ea70bceb2778b78 | /rscripts/coveragePeaks.R | 5d03973d62af23a8e3f59025b0857d551ffcdced | [] | no_license | emdann/HexamerBias | 2f3c523b374931804c12b9a85a11c68a666f007f | b8e0065d0331efcf4a0e0337277f13b6d6ee8b55 | refs/heads/master | 2021-09-21T21:03:55.383020 | 2018-08-31T12:29:34 | 2018-08-31T12:29:34 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 644 | r | coveragePeaks.R | ## COVERAGE PEAKS
library(data.table)
library(dplyr)
library(ggplot2)
peakAnn <- fread("mnt/edann/coverage_peaks/multipeaks.annotatePeaks.homer")
randAnn <- fread("mnt/edann/coverage_peaks/multipeaks.random.annotatePeaks.homer")
pdf("AvOwork/output/covPeaks_distTSS_boxplot.pdf")
boxplot(abs(peakAnn$`Distance to TSS`), abs(randAnn$`Distance to TSS`), outline = FALSE, varwidth = TRUE, names = c("Coverage peaks", "random"), ylab='Distance to TSS')
dev.off()
g <- randAnn %>% mutate(Annotation=gsub(pattern = "\\(.+\\)",replacement = "", x=Annotation)) %>%
ggplot(., aes(Annotation)) + geom_bar()
randAnn %>% ggplot(., aes(Annotation))
|
d9ab4ba748a72ba899a8d5da2b5dd422e3e83632 | ac644d6d019e20b8a54df53ad99959ea641c1d49 | /sentimentAnalysis.R | af487847bbc07653828d3d42cd6716eb84e75245 | [] | no_license | Nithya945/Crime-Trends-in-the-City | 5d65a29635234a47835561138503c2039af7ec37 | 87abc4bf67e80161bf1eb3710d4566cb9c3ddda9 | refs/heads/master | 2021-05-11T20:13:35.229892 | 2018-04-11T07:53:11 | 2018-04-11T07:53:11 | 117,435,078 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 8,451 | r | sentimentAnalysis.R | library(maptools)
library(plyr)
library(ggplot2)
library(car)
library(MASS)
library(sp)
require("RPostgreSQL")
source("/home/nithya/Desktop/Crime-Prediction-master/CrimePredictionUtil.R")
####################################
## Twitter data prerprocessing #####
####################################
# build the link to my PostgreSQL database
drv <- dbDriver("PostgreSQL")
print(drv)
con <- dbConnect(drv, host = 'localhost', port='5432',
dbname = 'postgres',user = 'postgres',
password = 'apple945')
# draw query from PostgresSQL database
# get tweets from "2014-01-01 00:00:00" to "2014-01-31 11:59:59"
Jan.2014 <- tweet.qry("2014-01-01 00:00:00", "2014-01-31 11:59:59")
print(Jan.2014)
# read chicago boundary
city.boundary = read.shapefile("/home/nithya/Desktop/Crime-Prediction-master/City_Boundary/City_Boundary/City_Boundary.shp", "poly", "+init=epsg:3435", "+init=epsg:26971")
city.boundary
# set up grid (neighborhood) for concatenating tweets
bb <- bbox(city.boundary) # bbox of city boundary
bb
cs <- c(1000, 1000) # cell size 1000m *1000m
cs
cc <- bb[, 1] + (cs/2) # cell offset
cc
cd <- ceiling(diff(t(bb))/cs) # number of cells per direction
cd
grid <- GridTopology(cellcentre.offset=cc, cellsize=cs, cells.dim=cd) # create a grib topology
grid
data=data.frame(id=1:prod(cd))
data
proj4string=CRS(proj4string(city.boundary))
proj4string
# conver grid topology to spatial data frame
sp_grid <- SpatialGridDataFrame(grid,
data=data.frame(id=1:prod(cd)),
proj4string=CRS(proj4string(city.boundary)))
class(sp_grid)
head(sp_grid@data,100)
str(sp_grid)
summary(sp_grid)
plot(city.boundary, xlim=c(332777, 367345), ylim=c(552875, 594870))
plot(sp_grid, add =TRUE)
spplot(sp_grid, sp.layout = c("sp.points", SpatialPoints(coordinates(sp_grid))))
Jan.2014
# convert xy coordinate of tweets as spatial points
Jan.2014.xy <- Jan.2014[7:8]
Jan.2014.xy
coordinates(Jan.2014.xy) <- ~ st_x+st_y
proj4string(Jan.2014.xy) <- proj4string(city.boundary)
points(Jan.2014.xy, pch =".")
Jan.2014.xy
# assign tweets to each grid (neighbourhood)
Jan.tweet.grid <- over(Jan.2014.xy, sp_grid)
Jan.tweet.grid
names(Jan.tweet.grid) <- "grid_id"
Jan.2014.grid <- cbind(Jan.2014, Jan.tweet.grid)
Jan.2014.grid
# convert date type
Jan.2014.grid$dates <- as.POSIXct(Jan.2014.grid$date)
Jan.2014.grid
# splite time period into every 6-hour
dates.combine.tweets.crime <- c(Jan.2014.grid$dates,
theft.2014.jan.to.feb[which(theft.2014.jan.to.feb$month==1),]$timestamp)
dates.combine.tweets.crime
ncol(dates.combine.tweets.crime)
factor.combine <- cut(dates.combine.tweets.crime, "6 hours")
length(factor.combine)
factor.combine
Jan.2014.grid$sixhr <- factor.combine[1:length(Jan.2014.grid$dates)]
# levels(Jan.2014.grid$sixhr)[1:10]
Jan.2014.grid$sixhr_n <- as.numeric(Jan.2014.grid$sixhr)
Jan.2014.grid$dates <- NULL
#names(Jan.test.grid$date)
#class(as.POSIXlt(Jan.test.grid$created_at[1:5]))
# concatenate tweets with same grid_id and 6-hour period together
Jan.2014.paste <- ddply(Jan.2014.grid, c("sixhr_n", "grid_id"), summarise,
text_p=paste(tweet, collapse=" "))
text
print(Jan.2014.paste)
# clean-up twitter data using twitter.clean function
Jan.2014.paste.c <- twitter.clean(Jan.2014.paste, Jan.2014.paste$text_p)
Jan.2014.paste.c
row.to.keep <- !is.na(Jan.2014.paste.c$grid_id)
Jan.2014.paste.c <- Jan.2014.paste.c[row.to.keep,]
print(Jan.2014.paste.c)
#summary(Jan.2014.paste.c)
#save("Jan.2014.paste.c", file = "Capstone/Jan_2014_paste_c_6hr.Rdata")
#load("Capstone/Jan_2014_paste_c_6hr.Rdata")
###################################
### calculate snetiment score #####
###################################
Jan.2014.pol.6h <- NULL
#options(warn=0, error = recover)
# load polarity file created by Lexicon.R
load("/home/nithya/POLKEY.RData")
# calculate sentiment score
system.time(
Jan.2014.pol.6h<- polarity(Jan.2014.paste.c$text1, grouping.var = NULL, polarity.frame = POLKEY,
constrain = TRUE, negators = qdapDictionaries::negation.words,
amplifiers = qdapDictionaries::amplification.words, deamplifiers = qdapDictionaries::deamplification.words,
question.weight = 0, amplifier.weight = .3, n.before = 4, n.after = 2, rm.incomplete = FALSE, digits = 3)
)
# user system elapsed
# 5777.63 1.81 5841.16
# save("Jan.2014.pol.6h",file = "Jan_2014_pol_1000m_6h.Rdata")
# load("Jan_2014_pol_1000m_6h.Rdata")
# str(Jan.2014.pol.6h)
# Jan.2013.paste.c$text1[1]
# Jan.2014.paste.c$pol <- Jan.2014.pol$all$polarity
# test <- ddply(Jan.2014.paste.c, c("sixhr"), summarise,
# difference = diff(pol,2))
#
# Jan.2014.combined <- cbind(Jan.2014.paste.c[63:33498,], test[,2])
# test2 <- Jan.2014.combined[which(Jan.2014.combined$grid_id == 587),]
# head(test2)
# plot(test2[,6]/10)
# calculate mean from raw score
Jan.2014.pol.6h$mean <- mean(Jan.2014.pol.6h$all$polarity)
Jan.2014.pol.6h
# center the data by subtracting $sum from $mean
Jan.2014.pol.6h$all$centered <- Jan.2014.pol.6h$all$polarity - Jan.2014.pol.6h$mean
# plot sentiment score without centering
qplot(Jan.2014.pol.6h$all$polarity, main = "Sentiment Histogram", xlab = "Score", ylab = "Frequency", binwidth = 0.015)
# plot centered sentiment score
qplot(Jan.2014.pol.6h$all$centered, main = "Centered Sentiment Histogram", xlab = "Score", ylab = "Frequency", binwidth = 0.075)
# insert day of month and grid_id into large polarity
Jan.2014.pol.6h$all$sixhr_n <- Jan.2014.paste.c$sixhr_n + 1 #shift a 6-hour period
Jan.2014.pol.6h$all
Jan.2014.pol.6h$all$grid_id <- Jan.2014.paste.c$grid_id
Jan.2014.pol.6h$all
# summary(Jan.2014.pol.6h$all$grid_id)
# create data.frame which contains 6-hour period, polarity and grid_id
Jan.2014.pol.6h.data <- data.frame()
Jan.2014.pol.6h.data <- data.frame(Jan.2014.pol.6h$all$sixhr, Jan.2014.pol.6h$all$grid_id, Jan.2014.pol.6h$all$polarity)
names(Jan.2014.pol.6h.data) <- c("sixhr_n", "grid_id", "polarity")
Jan.2014.pol.6h.data
# inset missing row from ddply
vals <- expand.grid(sixhr_n = 2:123,grid_id = 1:max(Jan.2014.pol.6h.data$grid_id, na.rm = TRUE))
head(vals)
summary(vals)
Jan.2014.pol.6h.data.m <- merge(vals, Jan.2014.pol.6h.data,all.x=TRUE)
Jan.2014.pol.6h.data.m
Jan.2014.pol.6h.data.m[which(Jan.2014.pol.6h.data.m$polarity!='NA'),]
#summary(Jan.2014.pol.6h.data.m)
# impute 0 to those missing polarity
Jan.2014.pol.6h.data.m[is.na(Jan.2014.pol.6h.data.m$polarity),"polarity"] <- 0
# calculate trend index for all grid area
Jan.2014.pol.trend.6hour1 <- data.frame()
Jan.2014.pol.trend.6hour <- Jan.2014.pol.6h.data.m[which(Jan.2014.pol.6h.data.m$polarity!='NA'),]
Jan.2014.pol.trend.6hour
system.time(
for (i in 1:max(Jan.2014.pol.6h.data$grid_id, na.rm = TRUE)){
sub <- subset(Jan.2014.pol.trend.6hour, grid_id == i)
sub
sub$trend_3 <- trend.idx(sub$polarity,3,0.1)
Jan.2014.pol.trend.6hour1 <- rbind(Jan.2014.pol.trend.6hour1,sub)
}
)
str(Jan.2014.pol.trend.6hour)
summary(Jan.2014.pol.trend.6hour)
#save(Jan.2014.pol.trend.6hour, file = "Capstone/allsub_6hr.Rdata")
#View(Jan.2014.pol.trend.6hour)
# visualize sentiment score and its trend in neighbourhood 587 (downtown
sub.587 <- subset(Jan.2014.pol.6h$all, grid_id == 587)[,c(3,7)]
# bb.scatter.587
bb.scatter.587 <- ggplot(sub.587, aes(x = sub.587$sixhr_n, y = sub.587$polarity))
bb.scatter.587 <- bb.scatter.587 + geom_point() + geom_line() + ylim(-1, 1)
bb.scatter.587 <- bb.scatter.587 + xlab("Period") + ylab("Sentiment") + ggtitle("Neighborhood 587")
bb.scatter.587
# calsulate trend index
sub.587$trend_2 <- trend.idx(sub.587$polarity,2,0.1)
sub.587$trend_3 <- trend.idx(sub.587$polarity,3,0.1)
# plot trend index for each 12-hour
t_2.scatter.587 <- ggplot(sub.587, aes(x = sub.587$mday, y = sub.587$trend_2))
t_2.scatter.587 <- t_2.scatter.587 + geom_point() + geom_line()
t_2.scatter.587 <- t_2.scatter.587 + xlab("Date") + ylab("trend_2") + ggtitle("587")
t_2.scatter.587
# plot trend index for each 18-hour
t_3.scatter.587 <- ggplot(sub.587, aes(x = sub.587$sixhr_n, y = sub.587$trend_3))
t_3.scatter.587 <- t_3.scatter.587 + geom_point() + geom_line()
t_3.scatter.587 <- t_3.scatter.587 + xlab("Period") + ylab("Trend_3") + ggtitle("Neighborhood 587")
t_3.scatter.587
# use multiplot function to plot both trend index
multiplot(bb.scatter.587, t_3.scatter.587)
|
fdcaf7e68084750902372aeab4cda547bb61babd | d743759238db4f45f2ceef30118008e52a67ea63 | /RCode/Data/0_cleanWTO.R | 56cc5a2deda7ddfd0dbf3eb8875e9dcff71898fc | [] | no_license | cindyyawencheng/Codex | e628ace6d070c46b6c7896ff01468e39603fdfbf | 30e178c7fc8e7d295512c3c7f434c175a69d6781 | refs/heads/master | 2020-04-06T23:39:44.345165 | 2019-09-26T13:51:09 | 2019-09-26T13:51:09 | 157,876,928 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 2,605 | r | 0_cleanWTO.R | # clean WTO membership data
if(Sys.info()['user'] == 'cindycheng'){
source('/Users/cindycheng/Documents/Papers/Codex/RCode/setup.R')
}
# -------------------------
# Clean WTO Membership data
# --------------------------
wto = read.csv(paste0(pathMain, '/participation_development/mem-obs-list.csv'), stringsAsFactors = FALSE)
wto$date = as.Date(wto$Membership.Date, '%d %B %Y')
wto$year = as.numeric(format(wto$date, "%Y"))
# observers = wto[166:188,]
# observers$Members[observers$Members == 'Iran'] = "Iran (Islamic Republic of)"
# observers$Members[observers$Members == "Lebanese Republic"] = "Lebanon"
# observers$Members[observers$Members == "Sao Tome and Principe"] = "Sao Tomé and Principe"
# observers$Members[observers$Members == "Sudan"] = "Sudan (North + South)"
#
# observers$observerDum = 1
wto = wto[1:164,]
wto$wtoDum = 1
wto$Members[wto$Members == "Bahrain, Kingdom of" ] = 'Bahrain'
wto$Members[wto$Members == "Bolivia, Plurinational State of" ] = "Bolivia"
wto$Members[wto$Members == "Côte d’Ivoire" ] = "Côte d'Ivoire"
wto$Members[wto$Members == "European Union (formerly EC)"] = "European Union"
wto$Members[wto$Members == "Kuwait, the State of"] = "Kuwait"
wto$Members[wto$Members == "Kyrgyz Republic"] = "Kyrgyzstan"
wto$Members[wto$Members == "Lao People’s Democratic Republic"] = "Lao People's Democratic Republic"
wto$Members[wto$Members == "Moldova, Republic of"] = "Republic of Moldova"
wto$Members[wto$Members == "Saudi Arabia, Kingdom of"] = "Saudi Arabia"
wto$Members[wto$Members == "Slovak Republic"] = "Slovakia"
wto$Members[wto$Members == "Chinese Taipei"] ="Taipei, Chinese"
wto$Members[wto$Members == "Tanzania"] = "Tanzania, United Republic of"
wto$Members[wto$Members == "United States"] = "United States of America"
wto$Members[wto$Members == "Venezuela, Bolivarian Republic of"] = "Venezuela"
wto$Members[wto$Members == "North Macedonia"] = "Macedonia, The Former Yugoslav Republic of"
particip[which(particip$actor_name == 'European Union' & particip$event_short == 'CCEURO' & particip$year == "1996" ),]
particip[which(part)]
wtoLong = expand.grid(wto$Members %>% unique() %>% sort(), 1964:2018)
names(wtoLong) = c('Members', 'year')
wtoLong = wtoLong[order(wtoLong$Members, wtoLong$year),]
wtoLong$wtoDum = wto$wtoDum[match(paste0(wtoLong$Members, wtoLong$year), paste0(wto$Members, wto$year))]
wtoLong$wtoDum[which(is.na(wtoLong$wtoDum ))] = 0
wtoLong$wtoDum = unlist(lapply(split(wtoLong$wtoDum, wtoLong$Members), cumsum))
save(wtoLong, file = paste0(pathMain, '/participation_development/mem-obs-list_wtoclean.rda'))
|
875e4cd3a6026caae37284e75dcb23328798676b | 6e2547b89094ab73aba4b39fb79270e317d1a001 | /monte_carlo_triangle.r | 9751b3eb2dc0772700d8e5767b9f044c8cd43228 | [] | no_license | kjlockhart/RAPfish | bf3cbb81936a4eab42e2ab886b8209d3e7cedcd6 | cfb5a5ce98d626c29b9482e41371b9984e3150b9 | refs/heads/master | 2021-01-01T18:34:39.650166 | 2012-09-09T23:46:49 | 2012-09-09T23:46:49 | 5,730,374 | 2 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,370 | r | monte_carlo_triangle.r | #Triangular Distribution Monte Carlo for Rapfish
# 2012-04-15 Divya Varkey Created
#to be obtained from site
nsim=100
num_fish=53
anchor_files=c('anchors4.csv','anchors5.csv','anchors6.csv','anchors7.csv','anchors8.csv','anchors9.csv','anchors10.csv','anchors11.csv','anchors12.csv')
filenames=c('CCRF_Field1.csv','CCRF_Field2.csv','CCRF_Field3.csv','CCRF_Field4.csv','CCRF_Field5.csv','CCRF_Field6.csv')
###########generated here
nfield=length(filenames)
discipline.names =strsplit(filenames, ".csv")
images_tri=paste(discipline.names,"_Triangle_MC",".jpg",sep="")
res_tri=paste("MC_Triangle_",discipline.names,".csv",sep="")
source("rapfish_functions.R")
source("functiontri.r")
L1=num_fish+1
L2=L1+num_fish-1
U1=L2+1
U2=U1+num_fish-1
####################TRIANGULAR MC
Trig_RapfishMC<-function(fisheries.all,num_fish,nsim)
{
n_att=ncol(fisheries.all)
fisheries.dat=fisheries.all[1:num_fish,]
lb=fisheries.all[L1:L2,]
ub=fisheries.all[U1:U2,]
anchors=loaddata(anchor_files[n_att-3])
colnames(anchors)<-colnames(fisheries.all)
mc_init=array(data=0,dim=c(num_fish,n_att,nsim))
for(j in 1:num_fish) {
for(k in 1:n_att) {
mc_init[j,k,]=rtri(nsim,lb[j,k],ub[j,k],fisheries.dat[j,k])
}
}
fish_mc_res=array(data=0,dim=c(num_fish+nrow(anchors),2,nsim))
for(m in 1:nsim) {
fish_mc.dat=round(mc_init[1:num_fish,1:n_att,m],1)
colnames(fish_mc.dat)<-colnames(fisheries.all)
fish_mc.raw=rbind(anchors,fish_mc.dat)
fish_mc.scaled = mdscale(fish_mc.raw)
fish_mc_res[,,m]=fish_mc.scaled
}
output<-list()
output$mc_init=mc_init
output$fish_mc_res=fish_mc_res
return(output)
}
###############################MC PLOTS
for(i in 1:nfield) {
fisheries.all = loaddata(filenames[i])
tt=Trig_RapfishMC(fisheries.all,num_fish,nsim)
n_att=ncol(fisheries.all)
fisheries.dat=fisheries.all[1:num_fish,]
anchors=loaddata(anchor_files[n_att-3])
colnames(anchors)<-colnames(fisheries.all)
fisheries.raw=rbind(anchors,fisheries.dat)
fisheries.scaled = mdscale(fisheries.raw)
n_an=nrow(anchors)
plot1=n_an+1
plot2=n_an+num_fish
cols=rainbow(num_fish,start=0, end=.7)
jpeg(filename=images_tri[i],width=20,height=16,units="cm",res=500)
Res=ifelse(nfield>30,RAPplot1(fisheries.scaled,num_fish,n_an),RAPplot2(fisheries.scaled,num_fish,n_an))
mtext(side=3, line=1, "Triangular MC",adj=1)
mtext(side=3, line=1, discipline.names[i],adj=0)
for(m in 1:nsim) {
mcplot=tt$fish_mc_res[plot1:plot2,,m]
mcplot = mcplot[order(fisheries.scaled[plot1:plot2,1]),]
points(mcplot,xlab="",ylab="",col=cols,pch='.')
}
dev.off()
mc_summ=matrix(data=0,nrow=num_fish,ncol=12)
s_mcres=tt$fish_mc_res[plot1:plot2,,]
for(fs in 1:num_fish) {
xx=round(quantile(s_mcres[fs,1,],probs=c(0.5,0.25,0.75,0.025,0.975)),4)
yy=round(quantile(s_mcres[fs,2,],probs=c(0.5,0.25,0.75,0.025,0.975)),4)
mc_summ[fs,2:6]=xx
mc_summ[fs,8:12]=yy
}
mc_summ[,1]=round(fisheries.scaled[plot1:plot2,1],4)
mc_summ[,5]=round(fisheries.scaled[plot1:plot2,2],4)
colnames(mc_summ)<-c("X_Scores","Median","25%","75%","2.5%","97.5%","Y_Scores","Median","25%","75%","2.5%","97.5%")
rownames(mc_summ)<-rownames(fisheries.dat)
write.csv(mc_summ,res_tri[i])
}
|
04c1558ea820f5c27f3db676871be004005d85bb | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/rscala/tests/testthat.R | bb7f800c6e94d2efc0c4de68e19c27a4d08ab541 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 56 | r | testthat.R | library(testthat)
library(rscala)
test_check("rscala")
|
b0aa2a7c93c0e0782ae5da4389f1fca1a212a226 | 0da7e603de44ac6eb0a23381aa194b22563b02d0 | /03_Analysis/PopGenNet20210915.R | ec11f784f7b2b89f1b2aea0381cad44191091927 | [] | no_license | efronhofer/PopGenNet | 5eddea7815afdb7f870b477ef563be57aef6e906 | 6f83abcdbd301b8a1c8efc0766338c252e270226 | refs/heads/master | 2023-04-07T20:51:35.080945 | 2021-09-29T06:04:46 | 2021-09-29T06:04:46 | 321,404,193 | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 169,930 | r | PopGenNet20210915.R | # PopGenNet analysis
# 2021-09-15
# Author: Roman Alther, Eawag, Duebendorf, Switzerland
# Works in R ver. 3.6.1 and 4.0.3 (tested on GNU/Linux, MacOS, Windows) with RStudio ver. 1.3.1093
#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#*#
#### PREPARATION -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ####
rm(list=ls()) # start from scratch
existing_data=T # Should the analysis run with pre-collated and attached data from the authors? Otherwise the required raw data need to be prepared using the script 'PopGenNet_Prep_20201210.R', creating an output folder in '02_Data_prep'.
internal=F # defaults to FALSE, but was set to TRUE for publication preparation (lab internal use)
log_trans=T # log-transform some explanatory variables ("betw_undir","betw_dir","degree","catch")
critval <- 1.96 ## approx 95% confidence interval (plus/minus x times SD)
fst=T # prepares figures and analyses for Fst as well
# setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
WD <- getwd() # save current directory (should correspond to 03_Analysis, source script from there)
#* Load data ####
setwd("..")
DF <- getwd()
if (internal){
prep_folder <- "Output20210913"
rdata <- "PopGenNet_data"
}else{
prep_folder <- "Output"
rdata <- "PopGenNet_data"
}
load(paste0("02_Data_prep/",rdata,".RData"))
setwd(WD)
if (internal){
load("Gfos_data_20200925.RData")
}
output <- format(Sys.time(), "%Y%m%d")
##* Packages ####
# Check if custom-made packages OpenSwissRiverPlot and MultiPanel are already installed, else install from SWITCHdrive
if (!"OpenSwissRiverPlot" %in% installed.packages()[,"Package"]){
ulib <- NULL # option to define path for user defined library -> change NULL to desired path
source("https://drive.switch.ch/index.php/s/kgoAUIbqxYc92YP/download") # install OpenSwissRiverPlot
# alternatively you may install from the included .tgz file
# install.packages("OpenSwissRiverPlot.tgz", repos = NULL, type="source", lib=ulib, INSTALL_opts="--no-multiarch")
}
if (!"MultiPanel" %in% installed.packages()[,"Package"]){
ulib <- NULL # option to define path for user defined library -> change NULL to desired path
source("https://drive.switch.ch/index.php/s/tdaTpPUM7rH1P4X/download") # install MultiPanel
# alternatively you may install from the included .tgz file
# install.packages("MultiPanel.tgz", repos = NULL, type="source", lib=ulib, INSTALL_opts="--no-multiarch")
}
# Load CRAN packages
library(igraph) # to do network "stuff", works with version 1.2.4.2
# library(jtools) # for "easy" model output (function summ()), works with version 2.1.1
# library(rsq) # for R-squared (function rsq()), works with version 2.0
# library(vegan) # for Mantel tests (FST by instream distance), works with version 2.5-6
# library(MuMIn) # for model selection using dredge(), works with version 1.43.17
# Load custom packages and check for updates
if (internal){
# Load lab internal package for publication figure preparation
library(SwissRiverPlot) # to plot maps of Switzerland, works with version 0.4-2
update_SRP()
}else{
library(OpenSwissRiverPlot) # to plot maps of Switzerland, works with version 0.4-0
update_OSRP()
}
library(MultiPanel) # to plot multipanel figures, works with version 0.6-3
update_MultiPanel()
#* Figures preparation ####
##*** Figure format ####
pdf=F # set to TRUE if figures should be prepared as PDF
##*** Size ####
fig.width=12 # standard figure width in inches
fig.height=9 # standard figure height in inches
##*** Labels ####
label_A <- expression(italic(G.~fossarum)~"type A") # italic G. fossarum A
label_B <- expression(italic(G.~fossarum)~"type B") # italic G. fossarum B
label_mod <- "Simulation data"
D_label <- c("0.001","0.01","0.1")
W_label <- c("0","0.5","1")
K_label <- c("0","1")
D <- paste0("disp_",D_label)
W <- paste0("w_up_",W_label)
K <- paste0("K_scale_",K_label)
dlab <- c(as.expression(bquote(italic(d)~"="~.(D_label[[1]]))), # d = 000.1
as.expression(bquote(italic(d)~"="~.(D_label[[2]]))), # d = 00.1
as.expression(bquote(italic(d)~"="~.(D_label[[3]])))) # d = 0.1
wlab <- c(as.expression(bquote(italic(W)~"="~.(W_label[[1]]))), # W = 0.0
as.expression(bquote(italic(W)~"="~.(W_label[[2]]))), # W = 0.5
as.expression(bquote(italic(W)~"="~.(W_label[[3]])))) # W = 1.0
klab_short <- c(as.expression(bquote(italic(K)~"="~.(K_label[[1]]))), # K = 0
as.expression(bquote(italic(K)~"="~.(K_label[[2]])))) # K = 1
klab <- c("Scaling: No","Scaling: Yes")
labs_short <- expand.grid(K_label, W_label, D_label)
# labs <- expand.grid(klab_short, wlab, dlab)
# labs_comb <- sprintf('%s; %s; %s', labs[,3], labs[,2], labs[,1])lab_Ar <- "Mean allelic richness"
labs_comb <- c(as.expression(bquote(italic(d)~"="~.(D_label[[1]])*";"~italic(W)~"="~.(W_label[[1]])*";"~italic(K)~"="~.(K_label[[1]]))),
as.expression(bquote(italic(d)~"="~.(D_label[[1]])*";"~italic(W)~"="~.(W_label[[1]])*";"~italic(K)~"="~.(K_label[[2]]))),
as.expression(bquote(italic(d)~"="~.(D_label[[1]])*";"~italic(W)~"="~.(W_label[[2]])*";"~italic(K)~"="~.(K_label[[1]]))),
as.expression(bquote(italic(d)~"="~.(D_label[[1]])*";"~italic(W)~"="~.(W_label[[2]])*";"~italic(K)~"="~.(K_label[[2]]))),
as.expression(bquote(italic(d)~"="~.(D_label[[1]])*";"~italic(W)~"="~.(W_label[[3]])*";"~italic(K)~"="~.(K_label[[1]]))),
as.expression(bquote(italic(d)~"="~.(D_label[[1]])*";"~italic(W)~"="~.(W_label[[3]])*";"~italic(K)~"="~.(K_label[[2]]))),
as.expression(bquote(italic(d)~"="~.(D_label[[2]])*";"~italic(W)~"="~.(W_label[[1]])*";"~italic(K)~"="~.(K_label[[1]]))),
as.expression(bquote(italic(d)~"="~.(D_label[[2]])*";"~italic(W)~"="~.(W_label[[1]])*";"~italic(K)~"="~.(K_label[[2]]))),
as.expression(bquote(italic(d)~"="~.(D_label[[2]])*";"~italic(W)~"="~.(W_label[[2]])*";"~italic(K)~"="~.(K_label[[1]]))),
as.expression(bquote(italic(d)~"="~.(D_label[[2]])*";"~italic(W)~"="~.(W_label[[2]])*";"~italic(K)~"="~.(K_label[[2]]))),
as.expression(bquote(italic(d)~"="~.(D_label[[2]])*";"~italic(W)~"="~.(W_label[[3]])*";"~italic(K)~"="~.(K_label[[1]]))),
as.expression(bquote(italic(d)~"="~.(D_label[[2]])*";"~italic(W)~"="~.(W_label[[3]])*";"~italic(K)~"="~.(K_label[[2]]))),
as.expression(bquote(italic(d)~"="~.(D_label[[3]])*";"~italic(W)~"="~.(W_label[[1]])*";"~italic(K)~"="~.(K_label[[1]]))),
as.expression(bquote(italic(d)~"="~.(D_label[[3]])*";"~italic(W)~"="~.(W_label[[1]])*";"~italic(K)~"="~.(K_label[[2]]))),
as.expression(bquote(italic(d)~"="~.(D_label[[3]])*";"~italic(W)~"="~.(W_label[[2]])*";"~italic(K)~"="~.(K_label[[1]]))),
as.expression(bquote(italic(d)~"="~.(D_label[[3]])*";"~italic(W)~"="~.(W_label[[2]])*";"~italic(K)~"="~.(K_label[[2]]))),
as.expression(bquote(italic(d)~"="~.(D_label[[3]])*";"~italic(W)~"="~.(W_label[[3]])*";"~italic(K)~"="~.(K_label[[1]]))),
as.expression(bquote(italic(d)~"="~.(D_label[[3]])*";"~italic(W)~"="~.(W_label[[3]])*";"~italic(K)~"="~.(K_label[[2]]))))
lab_Ar <- "Mean allelic richness"
lab_Ho <- "Mean observed heterozygosity"
lab_Hs <- "Expected heterozygosity"
lab_Ar_short <- "Mean Ar"
lab_Ho_short <- "Mean Ho"
lab_Hs_short <- "He"
measure1 <- "SPO = Sum of perpendicular offsets"
measure1_short <- "SPO"
measure2 <- "MPO = Median of perpendicular offsets"
measure2_short <- "MPO"
measure3 <- "Perpendicular offset"
measure3a <- "perpendicular offsets"
measure4 <- "DMPO"
lab_sub <- c("(a)","(b)","(c)","(d)","(e)","(f)")
sum_digits <- 1
sum_cex <- 1.4
median_digits <- 3
median_cex <- 1.4
##*** Colors ####
# Colors for two species
col_Gfos_A <- "#bf812d" # RGB 191, 129, 45; yellowish or orange
col_Gfos_B <- "#35978f" # RGB 53, 151, 143; turquoise
# Color for Gammarus fossarum complex
col_Gfos <- "#9970ab"
# Color for model data
colMod <- rgb(70,70,70,max=255)
# Colors for waterways
col_water <- "darkgrey"
col_rhine <- "lightgrey"
alpha1 <- "CC" # 80% alpha, check https://gist.github.com/lopspower/03fb1cc0ac9f32ef38f4 for transparency code
alpha2 <- "80" # 50% alpha, check https://gist.github.com/lopspower/03fb1cc0ac9f32ef38f4 for transparency code
white_transparent <- paste0("#FFFFFF",alpha2) # Transparent white
# Preparation for heatmap style color palette
col_pal <- c('dark red','white','navy blue')
col_fun <- colorRamp(col_pal)
rgb2hex <- function(r,g,b) rgb(r, g, b, maxColorValue = 255)
paletteLength <- 100 # how finely should the color ramp be divided
my_palette <- colorRampPalette(col_pal)(n = paletteLength)
#* Functions ####
#### Function for GLM and prediction appending to data
# The function requires a model formulation, the data, a name for the output, the model family, a significance level, and if selection should be implemented
glm.bind <- function(model, data, name, family, sign, step=F){
assign(paste0("glm_",name),glm(formula(model), data, family=family))
b <- get(paste0("glm_",name))
if (step){
assign(paste0("sglm_",name), step(get(paste0("glm_",name)))) # backward selection
c <- get(paste0("sglm_",name))
assign(paste0("Predict_sglm_",name),predict(get(paste0("sglm_",name)), type="response", se.fit=T))
assign(paste0("sglm_",name,"_upr"),get(paste0("Predict_sglm_",name))$fit + (sign * get(paste0("Predict_sglm_",name))$se.fit))
assign(paste0("sglm_",name,"_lwr"),get(paste0("Predict_sglm_",name))$fit - (sign * get(paste0("Predict_sglm_",name))$se.fit))
assign(paste0("sglm_",name,"_fit"),get(paste0("Predict_sglm_",name))$fit)
data <- cbind(data, cbind(get(paste0("sglm_",name,"_fit")), get(paste0("sglm_",name,"_lwr")), get(paste0("sglm_",name,"_upr"))))
colnames(data)[c(ncol(data)-2,ncol(data)-1,ncol(data))] <- c(paste0("sglm_",name,"_fit"),paste0("sglm_",name,"_lwr"),paste0("sglm_",name,"_upr"))
}else{
assign(paste0("Predict_glm_",name),predict(get(paste0("glm_",name)), type="response", se.fit=T))
assign(paste0("glm_",name,"_upr"),get(paste0("Predict_glm_",name))$fit + (sign * get(paste0("Predict_glm_",name))$se.fit))
assign(paste0("glm_",name,"_lwr"),get(paste0("Predict_glm_",name))$fit - (sign * get(paste0("Predict_glm_",name))$se.fit))
assign(paste0("glm_",name,"_fit"),get(paste0("Predict_glm_",name))$fit)
data <- cbind(data, cbind(get(paste0("glm_",name,"_fit")), get(paste0("glm_",name,"_lwr")), get(paste0("glm_",name,"_upr"))))
colnames(data)[c(ncol(data)-2,ncol(data)-1,ncol(data))] <- c(paste0("glm_",name,"_fit"),paste0("glm_",name,"_lwr"),paste0("glm_",name,"_upr"))
}
a <- data
if (step){
return(list(a,b,c))
}else{
return(list(a,b))
}
}
# The function requires a model formulation, the data, a name for the output, the model family, a significance level, and if selection should be implemented
lm.bind <- function(model, data, name, sign, step=F){
assign(paste0("lm_",name),lm(formula(model), data))
b <- get(paste0("lm_",name))
if (step){
assign(paste0("slm_",name), step(get(paste0("lm_",name)))) # backward selection
c <- get(paste0("slm_",name))
assign(paste0("Predict_slm_",name),predict(get(paste0("slm_",name)), type="response", se.fit=T))
assign(paste0("slm_",name,"_upr"),get(paste0("Predict_slm_",name))$fit + (sign * get(paste0("Predict_slm_",name))$se.fit))
assign(paste0("slm_",name,"_lwr"),get(paste0("Predict_slm_",name))$fit - (sign * get(paste0("Predict_slm_",name))$se.fit))
assign(paste0("slm_",name,"_fit"),get(paste0("Predict_slm_",name))$fit)
data <- cbind(data, cbind(get(paste0("slm_",name,"_fit")), get(paste0("slm_",name,"_lwr")), get(paste0("slm_",name,"_upr"))))
colnames(data)[c(ncol(data)-2,ncol(data)-1,ncol(data))] <- c(paste0("slm_",name,"_fit"),paste0("slm_",name,"_lwr"),paste0("slm_",name,"_upr"))
}else{
assign(paste0("Predict_lm_",name),predict(get(paste0("lm_",name)), type="response", se.fit=T))
assign(paste0("lm_",name,"_upr"),get(paste0("Predict_lm_",name))$fit + (sign * get(paste0("Predict_lm_",name))$se.fit))
assign(paste0("lm_",name,"_lwr"),get(paste0("Predict_lm_",name))$fit - (sign * get(paste0("Predict_lm_",name))$se.fit))
assign(paste0("lm_",name,"_fit"),get(paste0("Predict_lm_",name))$fit)
data <- cbind(data, cbind(get(paste0("lm_",name,"_fit")), get(paste0("lm_",name,"_lwr")), get(paste0("lm_",name,"_upr"))))
colnames(data)[c(ncol(data)-2,ncol(data)-1,ncol(data))] <- c(paste0("lm_",name,"_fit"),paste0("lm_",name,"_lwr"),paste0("lm_",name,"_upr"))
}
a <- data
if (step){
return(list(a,b,c))
}else{
return(list(a,b))
}
}
#### Function for GLM plots
# wrapper function to prepare the figures of the GLM output, including confidence intervals (as defined in glm.bind(sign=x))
GLMplot <- function(x,y,dat,model,xlabel,ylabel,ylim=NULL,axislog="", col1=col_Gfos_A, col2=col_Gfos_B, pt.cex=1, CI_border=T, trans=0.3, xrev=F, xax=NULL, yax=NULL, pointtrans=F, cex.lab=2, cex.axis=1.5, legend=T, cex.legend=1.5){
col1trans <- rgb(col2rgb(col1)[1,]/255,col2rgb(col1)[2,]/255,col2rgb(col1)[3,]/255,trans)
col2trans <- rgb(col2rgb(col2)[1,]/255,col2rgb(col2)[2,]/255,col2rgb(col2)[3,]/255,trans)
form <- reformulate(y, response = x)
formfit <- reformulate(y, response = paste0(model,"_fit"))
formupr <- reformulate(y, response = paste0(model,"_upr"))
formlwr <- reformulate(y, response = paste0(model,"_lwr"))
xcol <- which(colnames(dat)==x)
ycol <- which(colnames(dat)==y)
lwrcol <- which(colnames(dat)==paste0(model,"_lwr"))
uprcol <- which(colnames(dat)==paste0(model,"_upr"))
DATAordered <- dat[order(dat[,ycol]),]
left <- min(DATAordered[,ycol])
right <- max(DATAordered[,ycol])
if (xrev==T){
xrange <- c(right,left)
}else{
xrange <- c(left,right)
}
if (pointtrans==T){
col1point <- col1trans
col2point <- col2trans
}else{
col1point <- col1
col2point <- col2
}
par(mar=c(3.1+cex.lab, 3.1+cex.lab, 0.5, 0.5))
plot(form, dat, type = "n", las = 1, bty = "l",
xlab=xlabel,
ylab=ylabel,
xlim=xrange,
ylim=ylim,
log=axislog,
xaxt=xax, yaxt=yax, cex.lab=cex.lab, cex.axis=cex.axis)
polygon(c(rev(DATAordered[,ycol][DATAordered$spec=="A"]), DATAordered[,ycol][DATAordered$spec=="A"]),
c(rev(DATAordered[,lwrcol][DATAordered$spec=="A"]), DATAordered[,uprcol][DATAordered$spec=="A"]),
col = col1trans, border = NA)
polygon(c(rev(DATAordered[,ycol][DATAordered$spec=="B"]), DATAordered[,ycol][DATAordered$spec=="B"]),
c(rev(DATAordered[,lwrcol][DATAordered$spec=="B"]), DATAordered[,uprcol][DATAordered$spec=="B"]),
col = col2trans, border = NA)
points(form, data = subset(DATAordered, spec == "A"), pch = 16, col = col1point, cex=pt.cex)
points(form, data = subset(DATAordered, spec == "B"), pch = 16, col = col2point, cex=pt.cex)
lines(formfit, data = subset(DATAordered, spec == "A"), lwd = 2.5, col=col1)
if(CI_border){lines(formupr, data = subset(DATAordered, spec == "A"), lwd = 2, lty=2, col=col1)}
if(CI_border){lines(formlwr, data = subset(DATAordered, spec == "A"), lwd = 2, lty=2, col=col1)}
lines(formfit, data = subset(DATAordered, spec == "B"), lwd = 2.5, col=col2)
if(CI_border){lines(formupr, data = subset(DATAordered, spec == "B"), lwd = 2, lty=2, col=col2)}
if(CI_border){lines(formlwr, data = subset(DATAordered, spec == "B"), lwd = 2, lty=2, col=col2)}
if (legend){
legend("bottomright",c(label_A,label_B),pch = 16, col = c(col1,col2), bty="n", cex=cex.legend)
}
}
#### Function to calculate Euclidean distance
# As proposed by user Shambho (https://stackoverflow.com/users/3547167) here: https://stackoverflow.com/a/24747155
euc.dist <- function(x1, x2){sqrt(sum((x1 - x2) ^ 2))}
#### Function to find endpoint for a perpendicular segment from the point (x0,y0) to the line
# As proposed by user MrFlick (https://stackoverflow.com/users/2372064) here: https://stackoverflow.com/a/30399576
perp.segment.coord <- function(x0, y0, a=0,b=1){
# defined by lm.mod as y=a+b*x
x1 <- (x0+b*y0-a*b)/(1+b^2)
y1 <- a + b*x1
return(list(x0=x0, y0=y0, x1=x1, y1=y1))
}
#### Function to calculate decimal variant of ceiling()
# As proposed by user Ferroao (https://stackoverflow.com/users/6388753/ferroao) here: https://stackoverflow.com/a/59861612/6380381
ceiling_dec <- function(x, decimals=1) {
x2<-x*10^decimals
ceiling(x2)/10^decimals
}
#### Function to specify decimals
# Copied from https://stackoverflow.com/a/12135122/6380381
specify_decimal <- function(x, k, formatC=TRUE){
if (formatC){
trimws(formatC(round(x, digits=k),digits=k, format="f"))
}else{
trimws(format(round(x, k), nsmall=k))
}
}
#### DATA PREPARATION -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ####
updist_Mod <- updist[match(modsite,V(net)$name)]
catch_Mod <- V(net)$Total_Catch[match(modsite,V(net)$name)]
catch_A_red <- V(net)$Total_Catch[match(microsite_A_red,V(net)$name)] # prepare for Gfos A as well (not preexisting as such)
catch_B <- V(net)$Total_Catch[match(microsite_B,V(net)$name)] # prepare for Gfos B as well (not preexisting as such)
betw_undir_Mod <- betw_undir[match(modsite,V(net)$name)]
clos_undir_Mod <- clos_undir[match(modsite,V(net)$name)]
degree_undir_Mod <- degree_undir$res[match(modsite,V(net)$name)]
if(!existing_data){
##*** PopGen table preparation ####
Ar_modelled <- data.frame(matrix(nrow=length(empiricaldata), ncol=length(D)*length(W)*length(K)+1))
rownames(Ar_modelled) <- adj.mat.names[empiricaldata]
Ho_modelled <- data.frame(matrix(nrow=length(empiricaldata), ncol=length(D)*length(W)*length(K)+1))
rownames(Ho_modelled) <- adj.mat.names[empiricaldata]
Hs_modelled <- data.frame(matrix(nrow=length(empiricaldata), ncol=length(D)*length(W)*length(K)+1))
rownames(Hs_modelled) <- adj.mat.names[empiricaldata]
r <- 0
for (d in 1:length(D)){ # looping over dispersal rates
for (w in 1:length(W)){ # looping over dispersal directionalities
for (k in 1:length(K)){ # looping over carrying capacities
r <- r+1
label_Mod <- paste0("D=",D_label[d],", W_up=",W_label[w],", K=",K_label[k])
load(paste0(DF,"/02_Data_prep/",prep_folder,"/IndPopGenData_",D[d],"_",W[w],"_",K[k],".Rdata"))
orderMod <- order(updist_Mod, decreasing = T)
if (r==1){
match_Mod <- match_Mod[orderMod]
}
#*** Mean Ar by upstream distance ####
updist_Mod_plot <- updist_Mod[orderMod]
meanAr_Mod_updist <- meanAr_Mod[orderMod]
orderA_red <- order(updist_A_red, decreasing = T)
updist_A_red_plot <- updist_A_red[orderA_red]
meanAr_A_red_updist <- meanAr_A_red[orderA_red]
orderB <- order(updist_B, decreasing = T)
updist_B_plot <- updist_B[orderB]
meanAr_B_updist <- meanAr_B[orderB]
#*** Mean Ho by upstream distance ####
updist_Mod_plot <- updist_Mod[orderMod]
meanHo_Mod_updist <- meanHo_Mod[orderMod]
orderA_red <- order(updist_A_red, decreasing = T)
updist_A_red_plot <- updist_A_red[orderA_red]
meanHo_A_red_updist <- meanHo_A_red[orderA_red]
orderB <- order(updist_B, decreasing = T)
updist_B_plot <- updist_B[orderB]
meanHo_B_updist <- meanHo_B[orderB]
#*** He by upstream distance ####
updist_Mod_plot <- updist_Mod[orderMod]
meanHs_Mod_updist <- meanHs_Mod[orderMod]
orderA_red <- order(updist_A_red, decreasing = T)
updist_A_red_plot <- updist_A_red[orderA_red]
meanHs_A_red_updist <- meanHs_A_red[orderA_red]
orderB <- order(updist_B, decreasing = T)
updist_B_plot <- updist_B[orderB]
meanHs_B_updist <- meanHs_B[orderB]
# save modelled popgen values to table
if (r==1){
Ar_modelled[,1] <- updist_Mod_plot
colnames(Ar_modelled)[1] <- "upstream_distance"
}
Ar_modelled[,1+r] <- meanAr_Mod_updist
colnames(Ar_modelled)[r+1] <- paste0(D[d],"_",W[w],"_",K[k])
# save modelled popgen values to table
if (r==1){
Ho_modelled[,1] <- updist_Mod_plot
colnames(Ho_modelled)[1] <- "upstream_distance"
}
Ho_modelled[,1+r] <- meanHo_Mod_updist
colnames(Ho_modelled)[r+1] <- paste0(D[d],"_",W[w],"_",K[k])
# save modelled popgen values to table
if (r==1){
Hs_modelled[,1] <- updist_Mod_plot
colnames(Hs_modelled)[1] <- "upstream_distance"
}
Hs_modelled[,1+r] <- meanHs_Mod_updist
colnames(Hs_modelled)[r+1] <- paste0(D[d],"_",W[w],"_",K[k])
} # end looping over carrying capacities
} # end looping over dispersal directionalities
} # end looping over dispersal rates
}else{
load("PopGenData.RData")
}
#### EXPLANATORY VARIABLES -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ####
# check for correlations
expl.var1 <- DATA[,!(colnames(DATA) %in% c("meanAr","meanHo","meanHs","network_match","spec","log_updist","log_catch","clos_undir","clos_dir"))]
cor.var1 <- cor(expl.var1, method = "kendall")
# transform explanatory variables
if(log_trans){
DATA[,c("betw_undir","betw_dir","degree","catch")] <- log(DATA[c("betw_undir","betw_dir","degree","catch")])
DATA[DATA==-Inf] <- 0
}
expl.var <- DATA[,!(colnames(DATA) %in% c("meanAr","meanHo","meanHs","degree","betw_undir","clos_dir","network_match","spec","log_updist","log_catch","clos_undir","std_clos_dir"))]
cor.var <- cor(expl.var, method = "kendall")
#### MODELLING EMPIRICAL DATA -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ####
##*** Model settings ####
#### Define ranges for predict function
range_betw_undir_B <- seq(min(betw_undir_B),max(betw_undir_B),(max(betw_undir_B)-min(betw_undir_B))/1000)
range_betw_undir_A_red <- seq(min(betw_undir_A_red),max(betw_undir_A_red),(max(betw_undir_A_red)-min(betw_undir_A_red))/1000)
range_clos_undir_B <- seq(min(clos_undir_B),max(clos_undir_B),(max(clos_undir_B)-min(clos_undir_B))/1000)
range_clos_undir_A_red <- seq(min(clos_undir_A_red),max(clos_undir_A_red),(max(clos_undir_A_red)-min(clos_undir_A_red))/1000)
range_degree_undir_B <- seq(min(degree_undir_B),max(degree_undir_B),(max(degree_undir_B)-min(degree_undir_B))/1000)
range_degree_undir_A_red <- seq(min(degree_undir_A_red),max(degree_undir_A_red),(max(degree_undir_A_red)-min(degree_undir_A_red))/1000)
range_betw_dir_B <- seq(min(betw_dir_B),max(betw_dir_B),(max(betw_dir_B)-min(betw_dir_B))/1000)
range_betw_dir_A_red <- seq(min(betw_dir_A_red),max(betw_dir_A_red),(max(betw_dir_A_red)-min(betw_dir_A_red))/1000)
range_clos_dir_B <- seq(min(clos_dir_B),max(clos_dir_B),(max(clos_dir_B)-min(clos_dir_B))/1000)
range_clos_dir_A_red <- seq(min(clos_dir_A_red),max(clos_dir_A_red),(max(clos_dir_A_red)-min(clos_dir_A_red))/1000)
range_updist_B <- seq(1,max(updist_B),1000)
range_updist_A_red <- seq(1,max(updist_A_red),1000)
range_dist_B <- seq(1,max(dist_B),1000)
range_dist_A_red <- seq(1,max(dist_A_red),1000)
hist(DATA$catch)
##*** Models meanAr ####
#### Initial model exploration
shapiro.test(DATA$meanAr)
# Interaction model
int.mod.Ar <- lm(meanAr ~ updist*betw_dir*std_clos_undir*catch*spec, DATA, na.action = "na.fail")
summary(int.mod.Ar)
opar <- par(mfrow=c(2,2))
plot(int.mod.Ar)
par(opar)
step(int.mod.Ar)
# best model keeps some interactions
if(log_trans){
int.sel.mod.Ar <- lm(meanAr ~ updist + betw_dir + std_clos_undir + catch +
spec + updist:betw_dir + updist:std_clos_undir + betw_dir:std_clos_undir +
updist:catch + betw_dir:catch + std_clos_undir:catch + updist:spec +
betw_dir:spec + std_clos_undir:spec + catch:spec + updist:betw_dir:std_clos_undir +
updist:betw_dir:catch + updist:std_clos_undir:catch + betw_dir:std_clos_undir:catch +
updist:betw_dir:spec + updist:catch:spec + betw_dir:catch:spec +
updist:betw_dir:std_clos_undir:catch + updist:betw_dir:catch:spec,
data = DATA, na.action = "na.fail")
}else{
int.sel.mod.Ar <- lm(meanAr ~ updist + betw_dir + std_clos_undir + catch +
spec + updist:betw_dir + updist:std_clos_undir + betw_dir:std_clos_undir +
updist:catch + betw_dir:catch + std_clos_undir:catch + updist:spec +
betw_dir:spec + std_clos_undir:spec + catch:spec + updist:betw_dir:std_clos_undir +
updist:betw_dir:catch + betw_dir:std_clos_undir:catch + updist:betw_dir:spec +
updist:std_clos_undir:spec + betw_dir:std_clos_undir:spec +
updist:catch:spec + betw_dir:catch:spec + std_clos_undir:catch:spec +
updist:betw_dir:catch:spec + betw_dir:std_clos_undir:catch:spec,
data = DATA, na.action = "na.fail")
}
# Linear model
lin.mod.Ar <- lm(meanAr ~ updist+betw_dir+std_clos_undir+catch+spec, DATA, na.action = "na.fail")
summary(lin.mod.Ar)
opar <- par(mfrow=c(2,2))
plot(lin.mod.Ar)
par(opar)
MuMIn::dredge(lin.mod.Ar)
step(lin.mod.Ar)
# best model: ctc+upd (untransformed); btw_dir+spc+upd (log-transformed)
if(log_trans){
sel.mod.Ar <- lm(meanAr ~ updist+betw_dir+std_clos_undir, DATA, na.action = "na.fail")
}else{
sel.mod.Ar <- lm(meanAr ~ updist+catch, DATA, na.action = "na.fail")
}
# Comparison
AIC(int.sel.mod.Ar)
AIC(sel.mod.Ar)
AIC(glm(meanAr ~ updist+catch+spec, DATA, family="Gamma"))
# linear model outperforms interaction model
summary(sel.mod.Ar)
opar <- par(mfrow=c(2,2))
plot(sel.mod.Ar)
par(opar)
car::vif(sel.mod.Ar)
#### Full LM of allelic richness without interactions
model <- lm.bind(meanAr ~ updist+betw_dir+std_clos_undir+catch+spec, DATA, "Ar_full", critval, step=T)
DATA <- model[[1]]
lm_Ar_full <- model[[2]]
summary(lm_Ar_full)
car::vif(lm_Ar_full)
slm_Ar_full <- model[[3]]
summary(slm_Ar_full)
car::vif(slm_Ar_full) # should be the same as above (car::vif(sel.mod.Ar))
#### LM of allelic richness by upstream distance * species
AIC(lm(meanAr ~ updist+spec, DATA))
AIC(lm(meanAr ~ updist, DATA))
model <- lm.bind(meanAr ~ updist, DATA, "Ar_updist", critval, step=T)
DATA <- model[[1]]
lm_Ar_updist <- model[[2]]
slm_Ar_updist <- model[[3]]
#### LM of allelic richness by undirected closeness centrality
AIC(lm(meanAr ~ std_clos_undir+spec, DATA))
AIC(lm(meanAr ~ std_clos_undir, DATA))
model <- lm.bind(meanAr ~ std_clos_undir, DATA, "Ar_clos", critval, step=T)
DATA <- model[[1]]
lm_Ar_clos <- model[[2]]
slm_Ar_clos <- model[[3]]
#### LM of allelic richness by directed betweenness centrality
AIC(lm(meanAr ~ betw_dir+spec, DATA))
AIC(lm(meanAr ~ betw_dir, DATA))
model <- lm.bind(meanAr ~ betw_dir, DATA, "Ar_betw_dir", critval, step=T)
DATA <- model[[1]]
lm_Ar_betw_dir <- model[[2]]
slm_Ar_betw_dir <- model[[3]]
##*** Models meanHo ####
shapiro.test(DATA$meanHo)
# Interaction model
int.mod.Ho <- lm(meanHo ~ updist*betw_dir*std_clos_undir*catch*spec, DATA, na.action = "na.fail")
summary(int.mod.Ho)
opar <- par(mfrow=c(2,2))
plot(int.mod.Ho)
par(opar)
step(int.mod.Ho)
# best model keeps some interactions
if(log_trans){
int.sel.mod.Ho <- lm(meanHo ~ updist * betw_dir * std_clos_undir * catch *
spec, data = DATA, na.action = "na.fail")
}else{
int.sel.mod.Ho <- lm(meanHo ~ updist + betw_dir + std_clos_undir + catch +
spec + updist:betw_dir + updist:std_clos_undir + betw_dir:std_clos_undir +
updist:catch + betw_dir:catch + std_clos_undir:catch + updist:spec +
betw_dir:spec + std_clos_undir:spec + catch:spec + updist:betw_dir:std_clos_undir +
betw_dir:std_clos_undir:catch + updist:betw_dir:spec + std_clos_undir:catch:spec,
data = DATA, na.action = "na.fail")
}
# Linear model
lin.mod.Ho <- lm(meanHo ~ updist+betw_dir+std_clos_undir+catch+spec, DATA, na.action = "na.fail")
summary(lin.mod.Ho)
opar <- par(mfrow=c(2,2))
plot(lin.mod.Ho)
par(opar)
MuMIn::dredge(lin.mod.Ho)
step(lin.mod.Ho)
# best model: btw_dir+spc+std_cls_und (untransformed); btw_dir+spc+std_cls_und (log-transformed)
if(log_trans){
sel.mod.Ho <- lm(meanHo ~ betw_dir+std_clos_undir, DATA, na.action = "na.fail")
}else{
sel.mod.Ho <- lm(meanHo ~ betw_dir+std_clos_undir+spec, DATA, na.action = "na.fail")
}
# Comparison
AIC(int.sel.mod.Ho)
AIC(sel.mod.Ho)
# AIC(glm(meanHo ~ betw_dir+std_clos_undir+spec, DATA, family= "quasibinomial"))
# linear model outperforms interaction model
summary(int.sel.mod.Ho)
opar <- par(mfrow=c(2,2))
plot(int.sel.mod.Ho)
par(opar)
car::vif(int.sel.mod.Ho)
summary(sel.mod.Ho)
opar <- par(mfrow=c(2,2))
plot(sel.mod.Ho)
par(opar)
car::vif(sel.mod.Ho)
#### Full LM of mean observed heterozygosity without interactions
model <- lm.bind(meanHo ~ updist+betw_dir+std_clos_undir+catch+spec, DATA, "Ho_full", critval, step=T)
DATA <- model[[1]]
lm_Ho_full <- model[[2]]
summary(lm_Ho_full)
car::vif(lm_Ho_full)
slm_Ho_full <- model[[3]]
summary(slm_Ho_full)
car::vif(slm_Ho_full) # should be the same as above (car::vif(sel.mod.Ho))
#### LM of observed heterozygosity by upstream distance * species
AIC(lm(meanHo ~ updist+spec, DATA))
AIC(lm(meanHo ~ updist, DATA))
model <- lm.bind(meanHo ~ updist, DATA, "Ho_updist", critval, step=F)
DATA <- model[[1]]
lm_Ho_updist <- model[[2]]
# slm_Ho_updist <- model[[3]]
#### LM of observed heterozygosity by closeness centrality * species
AIC(lm(meanHo ~ std_clos_undir+spec, DATA))
AIC(lm(meanHo ~ std_clos_undir, DATA))
model <- lm.bind(meanHo ~ std_clos_undir, DATA, "Ho_clos", critval, step=T)
DATA <- model[[1]]
lm_Ho_clos <- model[[2]]
slm_Ho_clos <- model[[3]]
#### LM of observed heterozygosity by directed betweenness centrality * species
AIC(lm(meanHo ~ betw_dir+spec, DATA))
AIC(lm(meanHo ~ betw_dir, DATA))
model <- lm.bind(meanHo ~ betw_dir, DATA, "Ho_betw_dir", critval, step=T)
DATA <- model[[1]]
lm_Ho_betw_dir <- model[[2]]
slm_Ho_betw_dir <- model[[3]]
##*** Models He ####
shapiro.test(DATA$meanHs)
# Interaction model
int.mod.Hs <- lm(meanHs ~ updist*betw_dir*std_clos_undir*catch*spec, DATA, na.action = "na.fail")
summary(int.mod.Hs)
opar <- par(mfrow=c(2,2))
plot(int.mod.Hs)
par(opar)
step(int.mod.Hs)
# best model keeps some interactions
if(log_trans){
int.sel.mod.Hs <- lm(meanHs ~ betw_dir + spec + betw_dir:spec, data = DATA, na.action = "na.fail")
}else{
int.sel.mod.Hs <- lm(meanHs ~ catch, data = DATA, na.action = "na.fail")
}
# Linear model
lin.mod.Hs <- lm(meanHs ~ updist+betw_dir+std_clos_undir+catch+spec, DATA, na.action = "na.fail")
summary(lin.mod.Hs)
opar <- par(mfrow=c(2,2))
plot(lin.mod.Hs)
par(opar)
MuMIn::dredge(lin.mod.Hs)
step(lin.mod.Hs)
# best model: ctc (untransformed); (Int) (log-transformed); ctc+sps (log-transformed, step)
if(log_trans){
sel.mod.Hs <- lm(meanHs ~ 1, DATA, na.action = "na.fail")
}else{
sel.mod.Hs <- lm(meanHs ~ catch, DATA, na.action = "na.fail")
}
# Comparison
AIC(int.sel.mod.Hs)
AIC(sel.mod.Hs)
# AIC(glm(meanHs ~ catch, DATA, family= "quasibinomial"))
# linear model outperforms interaction model
summary(sel.mod.Hs)
opar <- par(mfrow=c(2,2))
plot(sel.mod.Hs)
par(opar)
car::vif(sel.mod.Hs)
#### Full LM of expected heterozygosity without interactions
model <- lm.bind(meanHs ~ updist+betw_dir+std_clos_undir+catch+spec, DATA, "Hs_full", critval, step=T)
DATA <- model[[1]]
lm_Hs_full <- model[[2]]
summary(lm_Hs_full)
car::vif(lm_Hs_full)
slm_Hs_full <- model[[3]]
summary(slm_Hs_full)
car::vif(slm_Hs_full) # should be the same as above (car::vif(sel.mod.Hs))
#### LM of expected heterozygosity by upstream distance * species
AIC(lm(meanHs ~ updist+spec, DATA))
AIC(lm(meanHs ~ updist, DATA))
model <- lm.bind(meanHo ~ updist, DATA, "Hs_updist", critval, step=F)
DATA <- model[[1]]
lm_Hs_updist <- model[[2]]
# slm_Hs_updist <- model[[3]]
#### LM of expected heterozygosity by closeness centrality * species
AIC(lm(meanHs ~ std_clos_undir+spec, DATA))
AIC(lm(meanHs ~ std_clos_undir, DATA))
model <- lm.bind(meanHs ~ std_clos_undir, DATA, "Hs_clos", critval, step=F)
DATA <- model[[1]]
lm_Hs_clos <- model[[2]]
# slm_Hs_clos <- model[[3]]
#### LM of expected heterozygosity by directed betweenness centrality * species
AIC(lm(meanHs ~ betw_dir+spec, DATA))
AIC(lm(meanHs ~ betw_dir, DATA))
model <- lm.bind(meanHs ~ betw_dir, DATA, "Hs_betw_dir", critval, step=T)
DATA <- model[[1]]
lm_Hs_betw_dir <- model[[2]]
slm_Hs_betw_dir <- model[[3]]
##*** Models Fst ####
#### Spatial distance between populations with Fos A and between populations with Fos B
DIST_B <- distances(net, v=V(net)[match(microsite_B,V(net)$name)], to=V(net)[match(microsite_B,V(net)$name)], weights=E(net))
DIST_A_red <- distances(net, v=V(net)[match(microsite_A_red,V(net)$name)], to=V(net)[match(microsite_A_red,V(net)$name)], weights=E(net))
if(fst){
#### Mantel test of genetic differentiation by instream distance
mantel_A_red <- vegan::mantel(FST_A_red, DIST_A_red, method="pearson", permutations=1000)
mantel_B <- vegan::mantel(FST_B, DIST_B, method="pearson", permutations=1000)
#### LM of genetic differentiation by spatial distance
model_dist_fst_B <- lm(fst_B~dist_B)
summary(model_dist_fst_B)
opar <- par(mfrow=c(2,2))
plot(model_dist_fst_B)
par(opar)
resp_dist_fst_B <- predict(model_dist_fst_B, list(dist_B = range_dist_B), type="response")
model_dist_fst_A_red <- lm(fst_A_red~dist_A_red)
summary(model_dist_fst_A_red)
opar <- par(mfrow=c(2,2))
plot(model_dist_fst_A_red)
par(opar)
resp_dist_fst_A_red <- predict(model_dist_fst_A_red, list(dist_A_red = range_dist_A_red), type="response")
shapiro.test(DISTDATA$nonneg_fst)
int.mod.Fst <- lm(nonneg_fst ~ dist*spec, DISTDATA, na.action = "na.fail")
summary(int.mod.Fst)
opar <- par(mfrow=c(2,2))
plot(int.mod.Fst)
par(opar)
MuMIn::dredge(int.mod.Fst)
step(int.mod.Fst)
# interaction model outperforms linear model
car::vif(int.mod.Fst)
lin.mod.Fst <- lm(nonneg_fst ~ dist+spec, DISTDATA, na.action = "na.fail")
summary(lin.mod.Fst)
opar <- par(mfrow=c(2,2))
plot(lin.mod.Fst)
par(opar)
MuMIn::dredge(lin.mod.Fst)
car::vif(lin.mod.Fst)
log.mod.Fst <- lm(nonneg_fst ~ log(dist)*spec, DISTDATA, na.action = "na.fail")
power <- seq(0,1,0.01)
AICpower <- c()
for (i in 1:length(power)){
pow.mod.Fst <- lm(nonneg_fst ~ I(dist^power[i])*spec, DISTDATA, na.action = "na.fail")
AICpower[i] <- AIC(pow.mod.Fst)
}
plot(AICpower~power, xlab="power term", ylab="AIC")
pow.mod.Fst <- lm(nonneg_fst ~ I(dist^power[which.min(AICpower)])*spec, DISTDATA, na.action = "na.fail")
AIC(int.mod.Fst)
AIC(lin.mod.Fst)
AIC(log.mod.Fst)
AIC(pow.mod.Fst)
summary(pow.mod.Fst)
#### Full LM of expected heterozygosity without interactions
model <- lm.bind(nonneg_fst ~ I(dist^power[which.min(AICpower)])*spec, DISTDATA, "fst_power", critval, step=T)
DISTDATA <- model[[1]]
lm_fst_power <- model[[2]]
summary(lm_fst_power)
car::vif(lm_fst_power)
slm_fst_power <- model[[3]]
summary(slm_fst_power)
car::vif(slm_fst_power)
}
#### PERPENDICULAR DISTANCES -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ####
##*** Preparing matrices####
orthdist_Ar_A <- vector()
orthdist_Ar_A_directed <- vector()
sum_orthdist_Ar_A <- vector()
hist_Ar_A <- matrix(nrow=length(modsite_GfosA), ncol=ncol(Ar_modelled)-1)
hist_Ar_A_directed <- matrix(nrow=length(modsite_GfosA), ncol=ncol(Ar_modelled)-1)
orthdist_Ar_B <- vector()
orthdist_Ar_B_directed <- vector()
sum_orthdist_Ar_B <- vector()
hist_Ar_B <- matrix(nrow=length(modsite_GfosB), ncol=ncol(Ar_modelled)-1)
hist_Ar_B_directed <- matrix(nrow=length(modsite_GfosB), ncol=ncol(Ar_modelled)-1)
orthdist_Ho_A <- vector()
orthdist_Ho_A_directed <- vector()
sum_orthdist_Ho_A <- vector()
hist_Ho_A <- matrix(nrow=length(modsite_GfosA), ncol=ncol(Ho_modelled)-1)
hist_Ho_A_directed <- matrix(nrow=length(modsite_GfosA), ncol=ncol(Ho_modelled)-1)
orthdist_Ho_B <- vector()
orthdist_Ho_B_directed <- vector()
sum_orthdist_Ho_B <- vector()
hist_Ho_B <- matrix(nrow=length(modsite_GfosB), ncol=ncol(Ho_modelled)-1)
hist_Ho_B_directed <- matrix(nrow=length(modsite_GfosB), ncol=ncol(Ho_modelled)-1)
orthdist_Hs_A <- vector()
orthdist_Hs_A_directed <- vector()
sum_orthdist_Hs_A <- vector()
hist_Hs_A <- matrix(nrow=length(modsite_GfosA), ncol=ncol(Hs_modelled)-1)
hist_Hs_A_directed <- matrix(nrow=length(modsite_GfosA), ncol=ncol(Hs_modelled)-1)
orthdist_Hs_B <- vector()
orthdist_Hs_B_directed <- vector()
sum_orthdist_Hs_B <- vector()
hist_Hs_B <- matrix(nrow=length(modsite_GfosB), ncol=ncol(Hs_modelled)-1)
hist_Hs_B_directed <- matrix(nrow=length(modsite_GfosB), ncol=ncol(Hs_modelled)-1)
##*** Calculating distances####
for (i in 2:ncol(Ar_modelled)){
Ar_Mod_A <- Ar_modelled[,i][rownames(Ar_modelled)%in%modsite_GfosA]
Ar_Mod_B <- Ar_modelled[,i][rownames(Ar_modelled)%in%modsite_GfosB]
Ho_Mod_A <- Ho_modelled[,i][rownames(Ho_modelled)%in%modsite_GfosA]
Ho_Mod_B <- Ho_modelled[,i][rownames(Ho_modelled)%in%modsite_GfosB]
Hs_Mod_A <- Hs_modelled[,i][rownames(Hs_modelled)%in%modsite_GfosA]
Hs_Mod_B <- Hs_modelled[,i][rownames(Hs_modelled)%in%modsite_GfosB]
for (j in 1:length(meanAr_A_red_updist)){
point <- cbind(meanAr_A_red_updist,Ar_Mod_A)[j,]
seg <- unlist(perp.segment.coord(point[1],point[2]))
orthdist_Ar_A[j] <- euc.dist(c(seg[1],seg[2]),c(seg[3],seg[4]))
orthdist_Ar_A_directed[j] <- sign(seg[2]-seg[4])*orthdist_Ar_A[j]
}
for (k in 1:length(meanAr_B_updist)){
point <- cbind(meanAr_B_updist,Ar_Mod_B)[k,]
seg <- unlist(perp.segment.coord(point[1],point[2]))
orthdist_Ar_B[k] <- euc.dist(c(seg[1],seg[2]),c(seg[3],seg[4]))
orthdist_Ar_B_directed[k] <- sign(seg[2]-seg[4])*orthdist_Ar_B[k]
}
for (j in 1:length(meanHo_A_red_updist)){
point <- cbind(meanHo_A_red_updist,Ho_Mod_A)[j,]
seg <- unlist(perp.segment.coord(point[1],point[2]))
orthdist_Ho_A[j] <- euc.dist(c(seg[1],seg[2]),c(seg[3],seg[4]))
orthdist_Ho_A_directed[j] <- sign(seg[2]-seg[4])*orthdist_Ho_A[j]
}
for (k in 1:length(meanHo_B_updist)){
point <- cbind(meanHo_B_updist,Ho_Mod_B)[k,]
seg <- unlist(perp.segment.coord(point[1],point[2]))
orthdist_Ho_B[k] <- euc.dist(c(seg[1],seg[2]),c(seg[3],seg[4]))
orthdist_Ho_B_directed[k] <- sign(seg[2]-seg[4])*orthdist_Ho_B[k]
}
for (j in 1:length(meanHs_A_red_updist)){
point <- cbind(meanHs_A_red_updist,Hs_Mod_A)[j,]
seg <- unlist(perp.segment.coord(point[1],point[2]))
orthdist_Hs_A[j] <- euc.dist(c(seg[1],seg[2]),c(seg[3],seg[4]))
orthdist_Hs_A_directed[j] <- sign(seg[2]-seg[4])*orthdist_Ho_A[j]
}
for (k in 1:length(meanHs_B_updist)){
point <- cbind(meanHs_B_updist,Hs_Mod_B)[k,]
seg <- unlist(perp.segment.coord(point[1],point[2]))
orthdist_Hs_B[k] <- euc.dist(c(seg[1],seg[2]),c(seg[3],seg[4]))
orthdist_Hs_B_directed[k] <- sign(seg[2]-seg[4])*orthdist_Ho_B[k]
}
sum_orthdist_Ar_A[[i-1]] <- sum(orthdist_Ar_A)
sum_orthdist_Ar_B[[i-1]] <- sum(orthdist_Ar_B)
sum_orthdist_Ho_A[[i-1]] <- sum(orthdist_Ho_A)
sum_orthdist_Ho_B[[i-1]] <- sum(orthdist_Ho_B)
sum_orthdist_Hs_A[[i-1]] <- sum(orthdist_Hs_A)
sum_orthdist_Hs_B[[i-1]] <- sum(orthdist_Hs_B)
hist_Ar_A[,i-1] <- orthdist_Ar_A
hist_Ar_B[,i-1] <- orthdist_Ar_B
hist_Ho_A[,i-1] <- orthdist_Ho_A
hist_Ho_B[,i-1] <- orthdist_Ho_B
hist_Hs_A[,i-1] <- orthdist_Hs_A
hist_Hs_B[,i-1] <- orthdist_Hs_B
hist_Ar_A_directed[,i-1] <- orthdist_Ar_A_directed
hist_Ar_B_directed[,i-1] <- orthdist_Ar_B_directed
hist_Ho_A_directed[,i-1] <- orthdist_Ho_A_directed
hist_Ho_B_directed[,i-1] <- orthdist_Ho_B_directed
hist_Hs_A_directed[,i-1] <- orthdist_Hs_A_directed
hist_Hs_B_directed[,i-1] <- orthdist_Hs_B_directed
}
names(sum_orthdist_Ar_A) <- colnames(Ar_modelled)[-1]
names(sum_orthdist_Ar_B) <- colnames(Ar_modelled)[-1]
names(sum_orthdist_Ho_A) <- colnames(Ho_modelled)[-1]
names(sum_orthdist_Ho_B) <- colnames(Ho_modelled)[-1]
names(sum_orthdist_Hs_A) <- colnames(Hs_modelled)[-1]
names(sum_orthdist_Hs_B) <- colnames(Hs_modelled)[-1]
#### VALUES -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ####
nrow(A_red)+nrow(B) # Number of used individuals
if (internal){
nrow(Gfos[which(Gfos$type!="K"),])
}
min(meanAr_A_red)
max(meanAr_A_red)
mean(meanAr_A_red)
median(meanAr_A_red)
sd(meanAr_A_red)
min(meanAr_B)
max(meanAr_B)
mean(meanAr_B)
median(meanAr_B)
sd(meanAr_B)
min(meanHo_A_red)
max(meanHo_A_red)
mean(meanHo_A_red)
median(meanHo_A_red)
sd(meanHo_A_red)
min(meanHo_B)
max(meanHo_B)
mean(meanHo_B)
median(meanHo_B)
sd(meanHo_B)
min(meanHs_A_red)
max(meanHs_A_red)
mean(meanHs_A_red)
median(meanHs_A_red)
sd(meanHs_A_red)
min(meanHs_B)
max(meanHs_B)
mean(meanHs_B)
median(meanHs_B)
sd(meanHs_B)
if(fst){
print(min(fst_A_red))
print(max(fst_A_red))
print(mean(fst_A_red))
print(median(fst_A_red))
print(sd(fst_A_red))
print(min(fst_B))
print(max(fst_B))
print(mean(fst_B))
print(median(fst_B))
print(sd(fst_B))
print(mantel_A_red)
print(mantel_B)
}
# Get range of scaled carrying capacities (from C++ file, lines 166, 215)
# Extract total catchments sizes from graph object (since included here, otherwise use catch_area.in directly)
total_catch <- V(net)$Total_Catch
# K scaling according to sqrt(catchment size)
sqrt(total_catch)
sum_sqrt_catch_size <- sum(sqrt(total_catch))
K_scaled <- round(sqrt(total_catch) * (1000*length(total_catch)/sum_sqrt_catch_size ))
range(K_scaled)
#### FIGURES -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ####
dir.create(paste0(WD,"/Analysis_",output), showWarnings=F)
##** Preparing multipanel figures ####
pla <- list(
a=2, # columns of subplots
b=1, # rows of subplots
x=3, # number of rows
y=3, # number of cols
sub1=F,
sub2=T,
main=F,
main_t=NULL,
sub1_t=NULL,
sub2_t=klab,
x_t=wlab,
y_t=dlab,
main_c=NULL,
sub1_c=3,
sub2_c=2.5,
x_c=2,
y_c=2)
lab <- c(pla$sub1_t,rep(c(pla$sub2_t),ifelse(pla$sub2,pla$b,0)),rep(pla$x_t,pla$b),rep(pla$y_t,pla$a))
lab_cex <- c(rep(pla$sub1_c,length(pla$sub1_t)),rep(c(pla$sub2_c),ifelse(pla$sub2,pla$a*pla$b,0)),rep(pla$x_c,pla$b*length(pla$x_t)),rep(pla$y_c,pla$a*length(pla$y_t)))
##** FIG 1 ####
## Empirical data maps
if(pdf){
pdf(paste0(WD,"/Analysis_",output,"/Fig1.pdf"), width=1.5*fig.width, height=(3*fig.width)/4.5)
}else{
png(paste0(WD,"/Analysis_",output,"/Fig1.png"), width=1.5*fig.width, height=(3*fig.width)/4.5, units="in", res=300)
}
nf <- layout(matrix(c(12, 7, 8, 9,
10, 1, 2, 3,
11, 4, 5, 6), nrow=3, byrow=T),
widths=c(0.5,3,3,3),
heights=c(0.5,2,2),
respect=T)
# layout.show(nf)
figmar <- c(4.5,4.5,0.5,0)
figmgp <- c(1.7,0.3,0)
col_switch <- 1/2
# #### Map of mean allelic richness in G. fossarum A
par(mar=figmar, mgp=figmgp, tcl=0.2, xaxs="i", yaxs="i")
if (internal){
river_plot(north_arrow = F, overview_map = F, scalebar=T, arrows = F, border_outline=F, width_border=2, col_rhine = col_rhine, col_water=col_water, plot_rhone=F, lines_rhone=F, plot_ticino=F, lines_ticino=F, plot_inn=F, lines_inn=F, lakes=TRUE, rivers=TRUE, axes="no_label", river_nr=FALSE)
}else{
river_plot(north_arrow = F, overview_map = F, scalebar=T, arrows = F, border_outline=F, width_border=2, col_rhine = col_rhine, col_water=col_water, plot_rhone=F, plot_ticino=F, plot_inn=F, lakes=TRUE, rivers=TRUE, axes="no_label", river_nr=FALSE)
}
values <- (meanAr_A_red-min(meanAr_A_red))/ (max(meanAr_A_red)-min(meanAr_A_red)) # transform meanAr values to range [0,1] for heatmap plotting
if(!any(is.na(values))){
for(i in 1:length(match_A_red)){
points(site_coord$x[match_A_red[i]], site_coord$y[match_A_red[i]], col=rgb2hex(col_fun(values))[i], pch=19, cex=2.5)
l1 <- mean(meanAr_A_red)+(col_switch)*(max(meanAr_A_red)-median(meanAr_A_red))
l2 <- mean(meanAr_A_red)-(col_switch)*(median(meanAr_A_red)-min(meanAr_A_red))
text(site_coord$x[match_A_red[i]], site_coord$y[match_A_red[i]], round(meanAr_A_red[i],1), col=ifelse(meanAr_A_red[i]>l1|meanAr_A_red[i]<l2,"white","black"), cex=0.8)
}
}
overview_map(xl = 495000,xr = 545000,yt = 280000, yb = 230000)
north_arrow(x=825000, y=80000)
gradient.legend(meanAr_A_red, val.cex = 1, palette=col_pal)
mtext(side = 3, text = lab_sub[1], line = 0.5, adj=0, cex = 1.5)
#### Map of mean observed heterozygosity in G. fossarum A
par(mar=figmar, mgp=figmgp, tcl=0.2, xaxs="i", yaxs="i")
if (internal){
river_plot(north_arrow = F, overview_map = F, scalebar=F, arrows = F, border_outline=F, width_border=2, col_rhine = col_rhine, col_water=col_water, plot_rhone=F, lines_rhone=F, plot_ticino=F, lines_ticino=F, plot_inn=F, lines_inn=F, lakes=TRUE, rivers=TRUE, axes="no_label", river_nr=FALSE)
}else{
river_plot(north_arrow = F, overview_map = F, scalebar=F, arrows = F, border_outline=F, width_border=2, col_rhine = col_rhine, col_water=col_water, plot_rhone=F, plot_ticino=F, plot_inn=F, lakes=TRUE, rivers=TRUE, axes="no_label", river_nr=FALSE)
}
mtext(side = 2, text = expression(paste(bold("CH1903"), " / ", bold("LV03"))), line = 1 + 1, cex = 1)
values <- (meanHo_A_red-min(meanHo_A_red))/ (max(meanHo_A_red)-min(meanHo_A_red)) # transform meanHo values to range [0,1] for heatmap plotting
if(!any(is.na(values))){
for(i in 1:length(match_A_red)){
points(site_coord$x[match_A_red[i]], site_coord$y[match_A_red[i]], col=rgb2hex(col_fun(values))[i], pch=19, cex=2.5)
l1 <- mean(meanHo_A_red)+(col_switch)*(max(meanHo_A_red)-median(meanHo_A_red))
l2 <- mean(meanHo_A_red)-(col_switch)*(median(meanHo_A_red)-min(meanHo_A_red))
text(site_coord$x[match_A_red[i]], site_coord$y[match_A_red[i]], round(meanHo_A_red[i],1), col=ifelse(meanHo_A_red[i]>l1|meanHo_A_red[i]<l2,"white","black"), cex=0.8)
}
}
gradient.legend(meanHo_A_red, val.cex=1, palette=col_pal)
mtext(side = 3, text = lab_sub[3], line = 0.5, adj=0, cex = 1.5)
#### Map of expected heterozygosity in G. fossarum A
par(mar=figmar, mgp=figmgp, tcl=0.2, xaxs="i", yaxs="i")
if (internal){
river_plot(north_arrow = F, overview_map = F, scalebar=F, arrows = F, border_outline=F, width_border=2, col_rhine = col_rhine, col_water=col_water, plot_rhone=F, lines_rhone=F, plot_ticino=F, lines_ticino=F, plot_inn=F, lines_inn=F, lakes=TRUE, rivers=TRUE, axes="no_label", river_nr=FALSE)
}else{
river_plot(north_arrow = F, overview_map = F, scalebar=F, arrows = F, border_outline=F, width_border=2, col_rhine = col_rhine, col_water=col_water, plot_rhone=F, plot_ticino=F, plot_inn=F, lakes=TRUE, rivers=TRUE, axes="no_label", river_nr=FALSE)
}
mtext(side = 2, text = expression(paste(bold("CH1903"), " / ", bold("LV03"))), line = 1 + 1, cex = 1)
values <- (meanHs_A_red-min(meanHs_A_red))/ (max(meanHs_A_red)-min(meanHs_A_red)) # transform meanHs values to range [0,1] for heatmap plotting
if(!any(is.na(values))){
for(i in 1:length(match_A_red)){
points(site_coord$x[match_A_red[i]], site_coord$y[match_A_red[i]], col=rgb2hex(col_fun(values))[i], pch=19, cex=2.5)
l1 <- mean(meanHs_A_red)+(col_switch)*(max(meanHs_A_red)-median(meanHs_A_red))
l2 <- mean(meanHs_A_red)-(col_switch)*(median(meanHs_A_red)-min(meanHs_A_red))
text(site_coord$x[match_A_red[i]], site_coord$y[match_A_red[i]], round(meanHs_A_red[i],1), col=ifelse(meanHs_A_red[i]>l1|meanHs_A_red[i]<l2,"white","black"), cex=0.8)
}
}
gradient.legend(meanHs_A_red, val.cex=1, palette=col_pal)
mtext(side = 3, text = lab_sub[5], line = 0.5, adj=0, cex = 1.5)
#### Map of mean allelic richness in G. fossarum B
par(mar=figmar, mgp=figmgp, tcl=0.2, xaxs="i", yaxs="i")
if (internal){
river_plot(north_arrow = F, overview_map = F, scalebar=F, arrows = F, border_outline=F, width_border=2, col_rhine = col_rhine, col_water=col_water, plot_rhone=F, lines_rhone=F, plot_ticino=F, lines_ticino=F, plot_inn=F, lines_inn=F, lakes=TRUE, rivers=TRUE, axes="no_label", river_nr=FALSE)
}else{
river_plot(north_arrow = F, overview_map = F, scalebar=F, arrows = F, border_outline=F, width_border=2, col_rhine = col_rhine, col_water=col_water, plot_rhone=F, plot_ticino=F, plot_inn=F, lakes=TRUE, rivers=TRUE, axes="no_label", river_nr=FALSE)
}
mtext(side = 1, text = expression(paste(bold("CH1903"), " / ", bold("LV03"))), line = 1 + 1, cex = 1)
values <- (meanAr_B-min(meanAr_B))/ (max(meanAr_B)-min(meanAr_B)) # transform meanAr values to range [0,1] for heatmap plotting
if(!any(is.na(values))){
for(i in 1:length(match_B)){
points(site_coord$x[match_B[i]], site_coord$y[match_B[i]], col=rgb2hex(col_fun(values))[i], pch=19, cex=2.5)
l1 <- mean(meanAr_B)+(col_switch)*(max(meanAr_B)-median(meanAr_B))
l2 <- mean(meanAr_B)-(col_switch)*(median(meanAr_B)-min(meanAr_B))
text(site_coord$x[match_B[i]], site_coord$y[match_B[i]], round(meanAr_B[i],1), col=ifelse(meanAr_B[i]>l1|meanAr_B[i]<l2,"white","black"), cex=0.8)
}
}
gradient.legend(meanAr_B, val.cex = 1, palette=col_pal)
mtext(side = 3, text = lab_sub[2], line = 0.5, adj=0, cex = 1.5)
#### Map of mean observed heterozygosity in G. fossarum B
par(mar=figmar, mgp=figmgp, tcl=0.2, xaxs="i", yaxs="i")
if (internal){
river_plot(north_arrow = F, overview_map = F, scalebar=F, arrows = F, border_outline=F, width_border=2, col_rhine = col_rhine, col_water=col_water, plot_rhone=F, lines_rhone=F, plot_ticino=F, lines_ticino=F, plot_inn=F, lines_inn=F, lakes=TRUE, rivers=TRUE, axes="no_label", river_nr=FALSE)
}else{
river_plot(north_arrow = F, overview_map = F, scalebar=F, arrows = F, border_outline=F, width_border=2, col_rhine = col_rhine, col_water=col_water, plot_rhone=F, plot_ticino=F, plot_inn=F, lakes=TRUE, rivers=TRUE, axes="no_label", river_nr=FALSE)
}
mtext(side = 1, text = expression(paste(bold("CH1903"), " / ", bold("LV03"))), line = 1 + 1, cex = 1)
mtext(side = 2, text = expression(paste(bold("CH1903"), " / ", bold("LV03"))), line = 1 + 1, cex = 1)
values <- (meanHo_B-min(meanHo_B))/ (max(meanHo_B)-min(meanHo_B)) # transform meanHo values to range [0,1] for heatmap plotting
for(i in 1:length(match_B)){
points(site_coord$x[match_B[i]], site_coord$y[match_B[i]], col=rgb2hex(col_fun(values))[i], pch=19, cex=2.5)
l1 <- mean(meanHo_B)+(col_switch)*(max(meanHo_B)-median(meanHo_B))
l2 <- mean(meanHo_B)-(col_switch)*(median(meanHo_B)-min(meanHo_B))
text(site_coord$x[match_B[i]], site_coord$y[match_B[i]], round(meanHo_B[i],1), col=ifelse(meanHo_B[i]>l1|meanHo_B[i]<l2,"white","black"), cex=0.8)
}
gradient.legend(meanHo_B, val.cex = 1, palette=col_pal)
mtext(side = 3, text = lab_sub[4], line = 0.5, adj=0, cex = 1.5)
#### Map of expected heterozygosity in G. fossarum B
par(mar=figmar, mgp=figmgp, tcl=0.2, xaxs="i", yaxs="i")
if (internal){
river_plot(north_arrow = F, overview_map = F, scalebar=F, arrows = F, border_outline=F, width_border=2, col_rhine = col_rhine, col_water=col_water, plot_rhone=F, lines_rhone=F, plot_ticino=F, lines_ticino=F, plot_inn=F, lines_inn=F, lakes=TRUE, rivers=TRUE, axes="no_label", river_nr=FALSE)
}else{
river_plot(north_arrow = F, overview_map = F, scalebar=F, arrows = F, border_outline=F, width_border=2, col_rhine = col_rhine, col_water=col_water, plot_rhone=F, plot_ticino=F, plot_inn=F, lakes=TRUE, rivers=TRUE, axes="no_label", river_nr=FALSE)
}
mtext(side = 1, text = expression(paste(bold("CH1903"), " / ", bold("LV03"))), line = 1 + 1, cex = 1)
mtext(side = 2, text = expression(paste(bold("CH1903"), " / ", bold("LV03"))), line = 1 + 1, cex = 1)
values <- (meanHs_B-min(meanHs_B))/ (max(meanHs_B)-min(meanHs_B)) # transform meanHs values to range [0,1] for heatmap plotting
for(i in 1:length(match_B)){
points(site_coord$x[match_B[i]], site_coord$y[match_B[i]], col=rgb2hex(col_fun(values))[i], pch=19, cex=2.5)
l1 <- mean(meanHs_B)+(col_switch)*(max(meanHs_B)-median(meanHs_B))
l2 <- mean(meanHs_B)-(col_switch)*(median(meanHs_B)-min(meanHs_B))
text(site_coord$x[match_B[i]], site_coord$y[match_B[i]], round(meanHs_B[i],1), col=ifelse(meanHs_B[i]>l1|meanHs_B[i]<l2,"white","black"), cex=0.8)
}
gradient.legend(meanHs_B, val.cex = 1, palette=col_pal)
mtext(side = 3, text = lab_sub[6], line = 0.5, adj=0, cex = 1.5)
par(mar=c(0,0,0,0),mgp=c(3,1,0))
plot.new()
text(0.5,0.5, lab_Ar ,adj=c(0.5,0.5), cex=2.5)
plot.new()
text(0.5,0.5, lab_Ho ,adj=c(0.5,0.5), cex=2.5)
plot.new()
text(0.5,0.5, lab_Hs ,adj=c(0.5,0.5), cex=2.5)
plot.new()
text(1,0.5,label_A,adj=c(0.5,0), cex=2.5, srt=90)
plot.new()
text(1,0.5,label_B,adj=c(0.5,0), cex=2.5, srt=90)
dev.off()
##** FIG 2 ####
if(pdf){
pdf(paste0(WD,"/Analysis_",output,"/Fig2.pdf"), width=22.5, height=10)
}else{
png(paste0(WD,"/Analysis_",output,"/Fig2.png"), width=22.5, height=10, units="in", res=300)
}
par(mfrow=c(2,3))
ylim <- c(min(meanHo_A_red,meanHo_B,meanHs_A_red,meanHs_B),max(meanHo_A_red,meanHo_B,meanHs_A_red,meanHs_B))
#### meanAr~updist
GLMplot("meanAr","updist",DATA,model="slm_Ar_updist",pt.cex=2, CI_border=F,xlabel="Upstream distance [km]",ylabel=lab_Ar, xrev=T, xax="n", cex.lab=2, cex.axis=1.5, cex.legend=1.5)
axis(1, c(0,50,100,150,200,250,300), at=c(0,50000,100000,150000,200000,250000,300000), cex.axis=1.5)
text(300000, par("usr")[4], lab_sub[1], cex=2, adj=c(0.5,1))
#### meanHo~updist
GLMplot("meanHo","updist",DATA,model="lm_Ho_updist",ylim=ylim,pt.cex=2, CI_border=F,xlabel="Upstream distance [km]",ylabel=lab_Ho, xrev=T, xax="n", cex.lab=2, cex.axis=1.5, legend=F)
axis(1, c(0,50,100,150,200,250,300), at=c(0,50000,100000,150000,200000,250000,300000), cex.axis=1.5)
text(300000, par("usr")[4], lab_sub[3], cex=2, adj=c(0.5,1))
#### meanHs~updist
GLMplot("meanHs","updist",DATA,model="lm_Hs_updist",ylim=ylim,pt.cex=2, CI_border=F,xlabel="Upstream distance [km]",ylabel=lab_Hs, xrev=T, xax="n", cex.lab=2, cex.axis=1.5, legend=F)
axis(1, c(0,50,100,150,200,250,300), at=c(0,50000,100000,150000,200000,250000,300000), cex.axis=1.5)
text(300000, par("usr")[4], lab_sub[5], cex=2, adj=c(0.5,1))
#### meanAr~std_clos_undir
GLMplot("meanAr","std_clos_undir",DATA,model="slm_Ar_clos",pt.cex=2, CI_border=F,xlabel="Standardized closeness centrality",ylabel=lab_Ar, legend=F)
text(0, par("usr")[4], lab_sub[2], cex=2, adj=c(0.5,1))
#### meanHo~std_clos_undir
GLMplot("meanHo","std_clos_undir",DATA,model="slm_Ho_clos",ylim=ylim,pt.cex=2, CI_border=F,xlabel="Standardized closeness centrality",ylabel=lab_Ho, legend=F)
text(0, par("usr")[4], lab_sub[4], cex=2, adj=c(0.5,1))
# dev.off()
#### meanHs~std_clos_undir
GLMplot("meanHs","std_clos_undir",DATA,model="lm_Hs_clos",ylim=ylim,pt.cex=2, CI_border=F,xlabel="Standardized closeness centrality",ylabel=lab_Hs, legend=F)
text(0, par("usr")[4], lab_sub[6], cex=2, adj=c(0.5,1))
dev.off()
##** FIG 3 ####
##** Modelled data maps
#*** Mean Ar maps
main_3=F
mp_dim <- multipanel.dimensions(main.col=pla$a,main.row=pla$b,sub.row=pla$x,sub.col=pla$y,sub1=pla$sub1,sub2=pla$sub2, main=main_3, h.main=0.5, w.legend=0, h.sub2=0.3, w.axis=0.7, h.axis=0, spacer.sub.col=0.5)
if(pdf){
pdf(paste0(WD,"/Analysis_",output,"/Fig3.pdf"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]))
}else{
png(paste0(WD,"/Analysis_",output,"/Fig3.png"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]), units="in", res=600)
}
par(mar=c(0,0,0,0))
nf <- multipanel.layout(main.col=pla$a,main.row=pla$b,sub.row=pla$x,sub.col=pla$y,sub1=pla$sub1,sub2=pla$sub2, main=main_3, h.main=0.5, w.legend=0, h.sub2=0.3, w.axis=0.7, h.axis=0, spacer.sub.col=0.5)
for (j in 2:ncol(Ar_modelled)){
if (internal){
river_plot(width_country=0.5, lwd_rivers=0.5, lwd_lakes=0.5, xlimit=c(495000,825000), col_water=col_water, north_arrow = F, overview_map = F, scalebar=F, arrows = F, border_outline=F, width_border=2, col_rhine = col_rhine, plot_rhone=F, lines_rhone=F, plot_ticino=F, lines_ticino=F, plot_inn=F, lines_inn=F, lakes=TRUE, rivers=TRUE, axes="none", river_nr=T)
}else{
river_plot(width_country=0.5, lwd_rivers=0.5, lwd_lakes=0.5, xlimit=c(495000,825000), col_water=col_water, north_arrow = F, overview_map = F, scalebar=F, arrows = F, border_outline=F, width_border=2, col_rhine = col_rhine, plot_rhone=F, plot_ticino=F, plot_inn=F, lakes=TRUE, rivers=TRUE, axes="none", river_nr=F)
}
values <- (Ar_modelled[,j]-min(Ar_modelled[,j]))/ (max(Ar_modelled[,j])-min(Ar_modelled[,j])) # transform meanAr values to range [0,1] for heatmap plotting
if(!any(is.na(values))){
for(i in 1:length(match_Mod)){
points(site_coord$x[match_Mod[i]], site_coord$y[match_Mod[i]], bg=rgb2hex(col_fun(values))[i], pch=21, lwd=0.5, cex=1)
}
}
if (j %in% c(2,3)){
mtext(lab[6], side=3, line=-1, cex=1.3)
}
if (j %in% c(8,9)){
mtext(lab[7], side=3, line=-1, cex=1.3)
}
if (j %in% c(14,15)){
mtext(lab[8], side=3, line=-1, cex=1.3)
}
gradient.legend(Ar_modelled[,j],alpha=1, val.midpoint=F, round=1, val.cex=1, val.gap = 0.5, title.gap=0.1, xl = 505000, xr = 635000, yb = 20000, yt = 40000, horizontal=T)
}
if(main_3){
plot.new()
text(0.5,0.5,"Simulated mean allelic richness",adj=c(0.5,0.5), cex=3)
}
for (i in 1:5){
plot.new()
if (i %in% c(1:2)){
text(0.5,1,lab[i],adj=c(0.5,1), cex=lab_cex[i])
}
if (i %in% c(3:5)){
text(0.5,0.5,lab[i],adj=c(0.5,0.5), cex=lab_cex[i])
}
}
dev.off()
##** FIG 4 ####
##** Histogram of orthogonal distance to 1:1 line
main_4=F
mp_dim <- multipanel.dimensions(main.col=pla$a,main.row=pla$b,pla$x,pla$y,sub1=pla$sub1,sub2=pla$sub2,main=main_4, h.main=0.5)
if(pdf){
pdf(paste0(WD,"/Analysis_",output,"/Fig4.pdf"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]))
}else{
png(paste0(WD,"/Analysis_",output,"/Fig4.png"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]), units="in", res=600)
}
nf <- multipanel.layout(main.col=pla$a,main.row=pla$b,pla$x,pla$y,sub1=pla$sub1,sub2=pla$sub2,main=main_4, h.main=0.5)
for (i in 1:ncol(hist_Ar_A)){
par(mar=c(0,0,0,0))
yclip <- 30
ylim <- 40
hist(hist_Ar_A[,i],
breaks=seq(0,ceiling(max(hist_Ar_A,hist_Ar_B, na.rm=T))-0.5,0.5),
xlim=c(0,ceiling(max(hist_Ar_A,hist_Ar_B, na.rm=T))),
ylim=c(0,ylim),
xaxt="n",
yaxt="n",
col=col_Gfos_A,
main="")
clip(0,ceiling(max(hist_Ar_A,hist_Ar_B, na.rm=T)), 0, yclip)
abline(v=median(hist_Ar_A[,i], na.rm=T),col=col_Gfos_A)
abline(v=median(hist_Ar_B[,i], na.rm=T),col=col_Gfos_B)
clip(0,ceiling(max(hist_Ar_A,hist_Ar_B, na.rm=T)), 0, ylim)
textbox(ceiling(max(hist_Ar_A,hist_Ar_B, na.rm=T)),30,paste0(measure2_short," = ",formatC(round(median(hist_Ar_A[,i], na.rm=T),median_digits),digits=median_digits, format="f")), txt.cex=median_cex, txt.col=col_Gfos_A, txt.adj=1, frm.col="white", frm.brd = "white", frm.siz = 0.2)
textbox(ceiling(max(hist_Ar_A,hist_Ar_B, na.rm=T)),25,paste0(measure2_short," = ",formatC(round(median(hist_Ar_B[,i], na.rm=T),median_digits),digits=median_digits, format="f")), txt.cex=median_cex, txt.col=col_Gfos_B, txt.adj=1, frm.col="white", frm.brd = "white", frm.siz = 0.2)
hist(hist_Ar_A[,i],
breaks=seq(0,ceiling(max(hist_Ar_A,hist_Ar_B, na.rm=T))-0.5,0.5),
xlim=c(0,ceiling(max(hist_Ar_A,hist_Ar_B, na.rm=T))),
ylim=c(0,ylim),
xaxt="n",
yaxt="n",
col=col_Gfos_A,
main="", add=T)
hist(hist_Ar_B[,i],
breaks=seq(0,ceiling(max(hist_Ar_A,hist_Ar_B, na.rm=T))-0.5,0.5),
xlim=c(0,ceiling(max(hist_Ar_A,hist_Ar_B, na.rm=T))),
ylim=c(0,ylim),
xaxt="n",
yaxt="n",
col=col_Gfos_B,
main="",
add=T)
if (i %in% c(1,2)){
mtext(lab[6], side=3, line=0.8, cex=1.3)
}
if (i %in% c(7,8)){
mtext(lab[7], side=3, line=0.8, cex=1.3)
}
if (i %in% c(13,14)){
mtext(lab[8], side=3, line=0.8, cex=1.3)
}
if (i %in% c(3)){
mtext("Frequencies [counts]", side=2, line=1, cex=1)
}
if (i %in% c(11,12)){
mtext(measure3, side=1, line=3, cex=1)
}
if (i %in% c(5:6,11:12,17:18)){
axis(1)
}
if (i %in% c(1,3,5)){
axis(2, at=seq(0,yclip,5), labels=F)
}
if (i %in% c(2,4,6)){
axis(2, at=seq(0,yclip,5))
}
if (i %in% c(13:18)){
axis(4, at=seq(0,yclip,5), labels=F)
}
}
if(main_4){
plot.new()
text(0.5,0.7,paste0(lab_Ar,": Distribution of ",measure3a),adj=c(0.5,0.5),cex=3)
}
for (i in 1:length(lab)){
plot.new()
if (i %in% c(1:2)){
text(0.5,1,lab[i],adj=c(0.5,1), cex=lab_cex[i])
}
if (i %in% c(3:5)){
text(0,0.5,lab[i],adj=c(0,0.5), cex=lab_cex[i])
}
}
# Add example plot
par(mar=c(0,6,16,0.5), pty="s")
i <- ncol(Ar_modelled)
Ar_Mod_A <- Ar_modelled[,i][rownames(Ar_modelled)%in%modsite_GfosA]
Ar_Mod_B <- Ar_modelled[,i][rownames(Ar_modelled)%in%modsite_GfosB]
plot(Ar_Mod_A~meanAr_A_red_updist, type="n",
xlim=c(min(Ar_modelled[,-1], na.rm=T),max(Ar_modelled[,-1], na.rm=T)),
ylim=c(min(Ar_modelled[,-1], na.rm=T),max(Ar_modelled[,-1], na.rm=T)),
xaxt="s",
yaxt="n",
xlab="",
ylab="",
asp=1)
par(xpd=T)
polygon(c(-4.5,-4.5,par("usr")[3],par("usr")[3]),c(par("usr")[1],9,par("usr")[4],par("usr")[1]), col="lightgrey", border=NA)
segments(-4.5,par("usr")[1],par("usr")[3],par("usr")[1], lty=1, col="grey", lwd=1.5)
segments(-4.5,9,par("usr")[3],par("usr")[4], lty=1, col="grey", lwd=1.5)
text(-2.5,18,"Example plot for\nperpendicular offsets", cex=2, adj=0, col="darkgrey")
text(-2.5,15,"See Fig. S10 for all plots",cex=1, adj=0, col="darkgrey")
par(xpd=F)
mtext(expression(bold("Model data:")*" Ar"), side=2, line=2, cex=0.8)
mtext(expression(bold("Empirical data:")*" Ar"), side=1, line=2.5, cex=0.8)
box()
axis(3,labels=F)
axis(2)
abline(0,1, lwd=1, lty=2) # add 1:1 line
points(Ar_Mod_A~meanAr_A_red_updist,col=col_Gfos_A, pch=16)
points(Ar_Mod_B~meanAr_B_updist,col=col_Gfos_B, pch=16)
for (j in 1:length(meanAr_A_red_updist)){
point <- cbind(meanAr_A_red_updist,Ar_Mod_A)[j,]
seg <- unlist(perp.segment.coord(point[1],point[2]))
segments(seg[1],seg[2],seg[3],seg[4], col=col_Gfos_A, lty=2, lwd=0.5)
}
for (k in 1:length(meanAr_B_updist)){
point <- cbind(meanAr_B_updist,Ar_Mod_B)[k,]
seg <- unlist(perp.segment.coord(point[1],point[2]))
segments(seg[1],seg[2],seg[3],seg[4], col=col_Gfos_B, lty=2, lwd=0.5)
}
text(max(Ar_modelled[,-1]),min(Ar_modelled[,-1])+1.5,paste0(measure1_short, " = ",formatC(round(sum(orthdist_Ar_A),sum_digits),digits=sum_digits, format="f")), adj=1, col=col_Gfos_A)
text(max(Ar_modelled[,-1]),min(Ar_modelled[,-1])+0.5,paste0(measure1_short," = ",formatC(round(sum(orthdist_Ar_B),sum_digits),digits=sum_digits, format="f")), adj=1, col=col_Gfos_B)
par(xpd=T)
legend(-5,28,c(label_A,label_B),pch = 16, col = c(col_Gfos_A,col_Gfos_B), bty="n", cex=2)
# text(0,0,"SOSO = Sum of squared orthogonals", adj=0)
par(xpd=F)
dev.off()
##** FIG 5 ####
##** Fst by instream distance (power function) by species, showing IBD
if(fst){
if(pdf){
pdf(paste0(WD,"/Analysis_",output,"/Fig5.pdf"), width=8, height=6)
}else{
png(paste0(WD,"/Analysis_",output,"/Fig5.png"), width=8, height=6, units="in", res=300)
}
GLMplot("fst","dist",dat=DISTDATA,model="slm_fst_power", CI_border = F, xlabel="Instream distance [km]",ylabel=expression('Genetic diff. [Pairwise Nei F'[ST]*']'),xax="n",pointtrans = T)
axis(1, c(0,50,100,150,200,250), at=c(0,50000,100000,150000,200000,250000), cex.axis=1.5)
dev.off()
}
#### SUPP INFO -+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ####
dir.create(paste0(WD,"/Analysis_",output,"/SuppFigs"), showWarnings=F)
##** FIG S1 ####
# create an overview map (once at the end of looping over parameter space)
if(pdf){
pdf(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS1.pdf"), width=8, height=6)
}else{
png(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS1.png"), width=8, height=6, units="in", res=300)
}
par(mar=c(0,0,0,0))
V(net)$modsite <- ifelse(V(net)$name%in%modsite,2,1)
if (internal){
river_plot(overview_map = F, col_rhine=col_rhine, col_rhone=NA, col_ticino = NA, col_inn=NA, col_water = col_water, axes="none")
}else{
river_plot(overview_map = F, col_rhine=col_rhine, col_rhone=NA, col_ticino = NA, col_inn=NA, col_water = col_water, axes="none")
}
plot(net, layout=net_layout, edge.arrow.size=0, edge.width=2.5, edge.color="blue",
vertex.color=c(adjustcolor("white",0),"dark red")[V(net)$modsite], vertex.size=ifelse(V(net)$modsite==2,1,1), vertex.frame.color=col_water, vertex.shape="none",
vertex.label=NA,
rescale=F, xlim=c(min(net_layout[,1]), max(net_layout[,1])), ylim = c(min(net_layout[,2]), max(net_layout[,2])), asp = 0, add=T) # look at the existing/preloaded data
cex_small=0.5
cex_big=0.75
if (internal){
points(Rhine$chx[Rhine$sample==0], Rhine$chy[Rhine$sample==0], pch=21, col="black", bg="white", cex=cex_small, lwd=0.75) # plot empty samples
points(Rhine$chx[Rhine$sample==1], Rhine$chy[Rhine$sample==1], pch=21, col="black", bg="black", cex=cex_small, lwd=0.75) # plot amphipod data
points(Gfos$chx[Gfos$type=="K"], Gfos$chy[Gfos$type=="K"], pch=21, col="black", bg=col_Gfos, cex=cex_big) # plot Gammarus fossarum complex
points(Gfos$chx[Gfos$type=="A"], Gfos$chy[Gfos$type=="A"], pch=21, col="black", bg=col_Gfos_A, cex=cex_big) # plot Gammarus fossarum type A
points(Gfos$chx[Gfos$type=="B"], Gfos$chy[Gfos$type=="B"], pch=21, col="black", bg=col_Gfos_B, cex=cex_big) # plot Gammarus fossarum type B
points(Gfos$chx[Gfos$type=="C"], Gfos$chy[Gfos$type=="C"], pch=21, col="black", bg=col_Gfos, cex=cex_big) # plot Gammarus fossarum type C
}
points(site_coord$x, site_coord$y, col=c(1,1,0)[site_coord$gendata], bg=c(col_Gfos_A,col_Gfos_B,0)[site_coord$gendata], cex=1.5, pch=24)
points(site_coord$x[site_coord$gendata==3], site_coord$y[site_coord$gendata==3], col=1, bg=col_Gfos_B, cex=1.5, pch=24)
points(site_coord$x[site_coord$gendata==3], site_coord$y[site_coord$gendata==3], col=col_Gfos_A, cex=0.75, pch=17)
text(465000,315000,"Microsat data (> 15 ind.)", adj=0)
legend(465000,315000, c(label_A,label_B,"Both"), col=c(1,1,1), pt.bg=c(col_Gfos_A,col_Gfos_B,col_Gfos_B), pt.cex=1.5, pch=24, bty="n")
legend(465000,315000, c("","",""), col=c(col_Gfos_A,col_Gfos_B,col_Gfos_A), pt.cex=0.75, pch=17, bty="n")
text(465000,265000,"Presence/Absence data", adj=0, cex=0.8)
legend(465000,265000, cex=0.8, c(expression(italic(G. ~ fossarum) ~ "complex"), label_A,label_B, "Amphipods present", "Amphipods absent"), col=c(1,1,1,1,1), pt.bg=c(col_Gfos,col_Gfos_A,col_Gfos_B,"black","white"), pt.cex=c(cex_big,cex_big,cex_big,cex_small,cex_small), pch=21, bty="n")
dev.off()
##** FIG S2 ####
##** Correlation plot all explanatory variables
if(pdf){
pdf(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS2.pdf"), width=8, height=6)
}else{
png(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS2.png"), width=8, height=6, units="in", res=300)
}
PerformanceAnalytics::chart.Correlation(expl.var1, method = "kendall")
dev.off()
##** FIG S3 ####
##** Correlation plot selected and transformed explanatory variables
if(pdf){
pdf(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS3.pdf"), width=8, height=6)
}else{
png(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS3.png"), width=8, height=6, units="in", res=300)
}
PerformanceAnalytics::chart.Correlation(expl.var, method = "kendall")
dev.off()
##** FIG S4 ####
#Perpendicular offset example
x <- seq(1,10,1)
y1 <- seq(1,10,1)
y2 <- seq(3,12,1)
y3 <- seq(-1,8,1)
y4 <- seq(2,20,2)
y5 <- seq(20,2,-2)
y6 <- sample(seq(5.499,5.501,0.00001),10)
y6 <- y6[order(y6)[c(1,10,3,8,5,6,7,4,9,2)]]
cor1 <- cor(x,y1)
cor2 <- cor(x,y2)
cor3 <- cor(x,y3)
cor4 <- cor(x,y4)
cor5 <- cor(x,y5)
cor6 <- abs(cor(x,y6))
orthdist_y1 <- c()
orthdist_y1_directed <- c()
orthdist_y2 <- c()
orthdist_y2_directed <- c()
orthdist_y3 <- c()
orthdist_y3_directed <- c()
orthdist_y4 <- c()
orthdist_y4_directed <- c()
orthdist_y5 <- c()
orthdist_y5_directed <- c()
orthdist_y6 <- c()
orthdist_y6_directed <- c()
# all combined in one plot
col1 <- "darkgreen"
col2 <- "steelblue"
col3 <- "red"
col4 <- "orange"
col5 <- "purple"
col6 <- "gold"
plot(x,y1, xlim=c(min(x,y1,y2,y3,y4,y5,y6),max(x,y1,y2,y3,y4,y5,y6)), ylim=c(min(x,y1,y2,y3,y4,y5,y6),max(x,y1,y2,y3,y4,y5,y6)),
asp=1, type="n", xlab="Empirical data", ylab="Simulated data")
abline(0,1, lwd=1, lty=2)
points(x,y1,col=col1, pch=16)
for (i in 1:length(x)){
point <- cbind(x,y1)[i,]
seg <- unlist(perp.segment.coord(point[1],point[2]))
orthdist_y1[i] <- euc.dist(c(seg[1],seg[2]),c(seg[3],seg[4]))
orthdist_y1_directed[i] <- sign(seg[2]-seg[4])*orthdist_y1[i]
segments(seg[1],seg[2],seg[3],seg[4], col=col1, lty=2, lwd=0.5)
}
points(x,y2,col=col2, pch=16)
for (i in 1:length(x)){
point <- cbind(x,y2)[i,]
seg <- unlist(perp.segment.coord(point[1],point[2]))
orthdist_y2[i] <- euc.dist(c(seg[1],seg[2]),c(seg[3],seg[4]))
orthdist_y2_directed[i] <- sign(seg[2]-seg[4])*orthdist_y2[i]
segments(seg[1],seg[2],seg[3],seg[4], col=col2, lty=2, lwd=0.5)
}
points(x,y3,col=col3, pch=16)
for (i in 1:length(x)){
point <- cbind(x,y3)[i,]
seg <- unlist(perp.segment.coord(point[1],point[2]))
orthdist_y3[i] <- euc.dist(c(seg[1],seg[2]),c(seg[3],seg[4]))
orthdist_y3_directed[i] <- sign(seg[2]-seg[4])*orthdist_y3[i]
segments(seg[1],seg[2],seg[3],seg[4], col=col3, lty=2, lwd=0.5)
}
points(x,y4,col=col4, pch=16)
for (i in 1:length(x)){
point <- cbind(x,y4)[i,]
seg <- unlist(perp.segment.coord(point[1],point[2]))
orthdist_y4[i] <- euc.dist(c(seg[1],seg[2]),c(seg[3],seg[4]))
orthdist_y4_directed[i] <- sign(seg[2]-seg[4])*orthdist_y4[i]
segments(seg[1],seg[2],seg[3],seg[4], col=col4, lty=2, lwd=0.5)
}
points(x,y5,col=col5, pch=16)
for (i in 1:length(x)){
point <- cbind(x,y5)[i,]
seg <- unlist(perp.segment.coord(point[1],point[2]))
orthdist_y5[i] <- euc.dist(c(seg[1],seg[2]),c(seg[3],seg[4]))
orthdist_y5_directed[i] <- sign(seg[2]-seg[4])*orthdist_y5[i]
segments(seg[1],seg[2],seg[3],seg[4], col=col5, lty=2, lwd=0.5)
}
points(x,y6,col=col6, pch=16)
for (i in 1:length(x)){
point <- cbind(x,y6)[i,]
seg <- unlist(perp.segment.coord(point[1],point[2]))
orthdist_y6[i] <- euc.dist(c(seg[1],seg[2]),c(seg[3],seg[4]))
orthdist_y6_directed[i] <- sign(seg[2]-seg[4])*orthdist_y6[i]
segments(seg[1],seg[2],seg[3],seg[4], col=col6, lty=2, lwd=0.5)
}
text(8,0,paste0("Cor: ", specify_decimal(cor1,1)), col=col1)
text(15,0,paste0("SPO: ", specify_decimal(sum(orthdist_y1),1)), col=col1)
text(20,0,paste0("MPO: ", specify_decimal(median(orthdist_y1),1)),col=col1)
text(25,0,paste0("DMPO: ", specify_decimal(median(orthdist_y1_directed),1)), col=col1)
text(8,1,paste0("Cor: ", specify_decimal(cor2,1)), col=col2)
text(15,1,paste0("SPO: ", specify_decimal(sum(orthdist_y2),1)), col=col2)
text(20,1,paste0("MPO: ", specify_decimal(median(orthdist_y2),1)),col=col2)
text(25,1,paste0("DMPO: ", specify_decimal(median(orthdist_y2_directed),1)), col=col2)
text(8,2,paste0("Cor: ", specify_decimal(cor3,1)), col=col3)
text(15,2,paste0("SPO: ", specify_decimal(sum(orthdist_y3),1)), col=col3)
text(20,2,paste0("MPO: ", specify_decimal(median(orthdist_y3),1)),col=col3)
text(25,2,paste0("DMPO: ", specify_decimal(median(orthdist_y3_directed),1)), col=col3)
text(8,3,paste0("Cor: ", specify_decimal(cor4,1)), col=col4)
text(15,3,paste0("SPO: ", specify_decimal(sum(orthdist_y4),1)), col=col4)
text(20,3,paste0("MPO: ", specify_decimal(median(orthdist_y4),1)),col=col4)
text(25,3,paste0("DMPO: ", specify_decimal(median(orthdist_y4_directed),1)), col=col4)
text(8,4,paste0("Cor: ", specify_decimal(cor5,1)), col=col5)
text(15,4,paste0("SPO: ", specify_decimal(sum(orthdist_y5),1)), col=col5)
text(20,4,paste0("MPO: ", specify_decimal(median(orthdist_y5),1)),col=col5)
text(25,4,paste0("DMPO: ", specify_decimal(median(orthdist_y5_directed),1)), col=col5)
text(8,5,paste0("Cor: ", specify_decimal(cor6,1)), col=col6)
text(15,5,paste0("SPO: ", specify_decimal(sum(orthdist_y6),1)), col=col6)
text(20,5,paste0("MPO: ", specify_decimal(median(orthdist_y6),1)),col=col6)
text(25,5,paste0("DMPO: ", specify_decimal(median(orthdist_y6_directed),1)), col=col6)
# six plots
if(pdf){
pdf(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS4.pdf"), width=9, height=6)
}else{
png(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS4.png"), width=9, height=6, units="in", res=300)
}
nf <- multipanel.layout(main.col=1,main.row=1,sub.col=3,sub.row=2,sub1=F,sub2=F,main=pla$main, w.legend=0.05, w.axis=0.3, h.axis=0.3)
x <- seq(1,10,1)
y1 <- seq(1,10,1)
y2 <- seq(3,12,1)
y3 <- seq(-1,8,1)
y4 <- seq(2,20,2)
y5 <- seq(20,2,-2)
while(cor6>=0.05){
y6 <- sample(seq(5.499,5.501,0.00001),10)
y6 <- y6[order(y6)[c(1,10,3,8,5,6,7,4,9,2)]]
cor6 <- abs(cor(x,y6))
}
par(mar=c(0,0,0,0))
x_text <- 20
y_text_1 <- 6
y_text_2 <- 4
y_text_3 <- 2
y_text_4 <- 0
cex_perp <- 2
col_perp <- "steelblue"
col1 <- col_perp
col2 <- col_perp
col3 <- col_perp
col4 <- col_perp
col5 <- col_perp
col6 <- col_perp
lwd_perp <- 1
pt.cex <- 2
plot(x,y1, xlim=c(min(x,y1,y2,y3,y4,y5,y6),max(x,y1,y2,y3,y4,y5,y6)), ylim=c(min(x,y1,y2,y3,y4,y5,y6),max(x,y1,y2,y3,y4,y5,y6)),
asp=1, type="n", xlab="Empirical data", ylab="Simulated data", xaxt="n", yaxt="n")
abline(0,1, lwd=1, lty=2)
points(x,y1,col=col1, pch=16, cex=pt.cex)
for (i in 1:length(x)){
point <- cbind(x,y1)[i,]
seg <- unlist(perp.segment.coord(point[1],point[2]))
orthdist_y1[i] <- euc.dist(c(seg[1],seg[2]),c(seg[3],seg[4]))
orthdist_y1_directed[i] <- sign(seg[2]-seg[4])*orthdist_y1[i]
segments(seg[1],seg[2],seg[3],seg[4], col=col1, lty=2, lwd=lwd_perp)
}
text(par("usr")[1]+((par("usr")[2]-par("usr")[1])/40),par("usr")[4]-((par("usr")[4]-par("usr")[3])/40), "(a)", adj=c(0,1), cex=cex_perp)
text(x_text,y_text_1,paste0("Cor: ", specify_decimal(cor1,1)), col=col1, adj=1, cex=cex_perp)
text(x_text,y_text_2,paste0("SPO: ", specify_decimal(sum(orthdist_y1),1)), col=col1, adj=1, cex=cex_perp)
text(x_text,y_text_3,paste0("MPO: ", specify_decimal(median(orthdist_y1),1)),col=col1, adj=1, cex=cex_perp)
text(x_text,y_text_4,paste0("DMPO: ", specify_decimal(median(orthdist_y1_directed),1)), col=col1, adj=1, cex=cex_perp)
axis(2)
plot(x,y4, xlim=c(min(x,y1,y2,y3,y4,y5,y6),max(x,y1,y2,y3,y4,y5,y6)), ylim=c(min(x,y1,y2,y3,y4,y5,y6),max(x,y1,y2,y3,y4,y5,y6)),
asp=1, type="n", xlab="Empirical data", ylab="Simulated data", xaxt="n", yaxt="n")
abline(0,1, lwd=1, lty=2)
points(x,y4,col=col4, pch=16, cex=pt.cex)
for (i in 1:length(x)){
point <- cbind(x,y4)[i,]
seg <- unlist(perp.segment.coord(point[1],point[2]))
orthdist_y4[i] <- euc.dist(c(seg[1],seg[2]),c(seg[3],seg[4]))
orthdist_y4_directed[i] <- sign(seg[2]-seg[4])*orthdist_y4[i]
segments(seg[1],seg[2],seg[3],seg[4], col=col4, lty=2, lwd=lwd_perp)
}
text(par("usr")[1]+((par("usr")[2]-par("usr")[1])/40),par("usr")[4]-((par("usr")[4]-par("usr")[3])/40), "(d)", adj=c(0,1), cex=cex_perp)
text(x_text,y_text_1,paste0("Cor: ", specify_decimal(cor4,1)), col=col4, adj=1, cex=cex_perp)
text(x_text,y_text_2,paste0("SPO: ", specify_decimal(sum(orthdist_y4),1)), col=col4, adj=1, cex=cex_perp)
text(x_text,y_text_3,paste0("MPO: ", specify_decimal(median(orthdist_y4),1)),col=col4, adj=1, cex=cex_perp)
text(x_text,y_text_4,paste0("DMPO: ", specify_decimal(median(orthdist_y4_directed),1)), col=col4, adj=1, cex=cex_perp)
axis(1)
axis(2)
plot(x,y2, xlim=c(min(x,y1,y2,y3,y4,y5,y6),max(x,y1,y2,y3,y4,y5,y6)), ylim=c(min(x,y1,y2,y3,y4,y5,y6),max(x,y1,y2,y3,y4,y5,y6)),
asp=1, type="n", xlab="Empirical data", ylab="Simulated data", xaxt="n", yaxt="n")
abline(0,1, lwd=1, lty=2)
points(x,y2,col=col2, pch=16, cex=pt.cex)
for (i in 1:length(x)){
point <- cbind(x,y2)[i,]
seg <- unlist(perp.segment.coord(point[1],point[2]))
orthdist_y2[i] <- euc.dist(c(seg[1],seg[2]),c(seg[3],seg[4]))
orthdist_y2_directed[i] <- sign(seg[2]-seg[4])*orthdist_y2[i]
segments(seg[1],seg[2],seg[3],seg[4], col=col2, lty=2, lwd=lwd_perp)
}
text(par("usr")[1]+((par("usr")[2]-par("usr")[1])/40),par("usr")[4]-((par("usr")[4]-par("usr")[3])/40), "(b)", adj=c(0,1), cex=cex_perp)
text(x_text,y_text_1,paste0("Cor: ", specify_decimal(cor2,1)), col=col2, adj=1, cex=cex_perp)
text(x_text,y_text_2,paste0("SPO: ", specify_decimal(sum(orthdist_y2),1)), col=col2, adj=1, cex=cex_perp)
text(x_text,y_text_3,paste0("MPO: ", specify_decimal(median(orthdist_y2),1)),col=col2, adj=1, cex=cex_perp)
text(x_text,y_text_4,paste0("DMPO: ", specify_decimal(median(orthdist_y2_directed),1)), col=col2, adj=1, cex=cex_perp)
plot(x,y5, xlim=c(min(x,y1,y2,y3,y4,y5,y6),max(x,y1,y2,y3,y4,y5,y6)), ylim=c(min(x,y1,y2,y3,y4,y5,y6),max(x,y1,y2,y3,y4,y5,y6)),
asp=1, type="n", xlab="Empirical data", ylab="Simulated data", xaxt="n", yaxt="n")
abline(0,1, lwd=1, lty=2)
points(x,y5,col=col5, pch=16, cex=pt.cex)
for (i in 1:length(x)){
point <- cbind(x,y5)[i,]
seg <- unlist(perp.segment.coord(point[1],point[2]))
orthdist_y5[i] <- euc.dist(c(seg[1],seg[2]),c(seg[3],seg[4]))
orthdist_y5_directed[i] <- sign(seg[2]-seg[4])*orthdist_y5[i]
segments(seg[1],seg[2],seg[3],seg[4], col=col5, lty=2, lwd=lwd_perp)
}
text(par("usr")[1]+((par("usr")[2]-par("usr")[1])/40),par("usr")[4]-((par("usr")[4]-par("usr")[3])/40), "(e)", adj=c(0,1), cex=cex_perp)
text(x_text,y_text_1,paste0("Cor: ", specify_decimal(cor5,1)), col=col5, adj=1, cex=cex_perp)
text(x_text,y_text_2,paste0("SPO: ", specify_decimal(sum(orthdist_y5),1)), col=col5, adj=1, cex=cex_perp)
text(x_text,y_text_3,paste0("MPO: ", specify_decimal(median(orthdist_y5),1)),col=col5, adj=1, cex=cex_perp)
text(x_text,y_text_4,paste0("DMPO: ", specify_decimal(median(orthdist_y5_directed),1)), col=col5, adj=1, cex=cex_perp)
axis(1)
plot(x,y3, xlim=c(min(x,y1,y2,y3,y4,y5,y6),max(x,y1,y2,y3,y4,y5,y6)), ylim=c(min(x,y1,y2,y3,y4,y5,y6),max(x,y1,y2,y3,y4,y5,y6)),
asp=1, type="n", xlab="Empirical data", ylab="Simulated data", xaxt="n", yaxt="n")
abline(0,1, lwd=1, lty=2)
points(x,y3,col=col3, pch=16, cex=pt.cex)
for (i in 1:length(x)){
point <- cbind(x,y3)[i,]
seg <- unlist(perp.segment.coord(point[1],point[2]))
orthdist_y3[i] <- euc.dist(c(seg[1],seg[2]),c(seg[3],seg[4]))
orthdist_y3_directed[i] <- sign(seg[2]-seg[4])*orthdist_y3[i]
segments(seg[1],seg[2],seg[3],seg[4], col=col3, lty=2, lwd=lwd_perp)
}
text(par("usr")[1]+((par("usr")[2]-par("usr")[1])/40),par("usr")[4]-((par("usr")[4]-par("usr")[3])/40), "(c)", adj=c(0,1), cex=cex_perp)
text(x_text,y_text_1,paste0("Cor: ", specify_decimal(cor3,1)), col=col3, adj=1, cex=cex_perp)
text(x_text,y_text_2,paste0("SPO: ", specify_decimal(sum(orthdist_y3),1)), col=col3, adj=1, cex=cex_perp)
text(x_text,y_text_3,paste0("MPO: ", specify_decimal(median(orthdist_y3),1)),col=col3, adj=1, cex=cex_perp)
text(x_text,y_text_4,paste0("DMPO: ", specify_decimal(median(orthdist_y3_directed),1)), col=col3, adj=1, cex=cex_perp)
plot(x,y6, xlim=c(min(x,y1,y2,y3,y4,y5,y6),max(x,y1,y2,y3,y4,y5,y6)), ylim=c(min(x,y1,y2,y3,y4,y5,y6),max(x,y1,y2,y3,y4,y5,y6)),
asp=1, type="n", xlab="Empirical data", ylab="Simulated data", xaxt="n", yaxt="n")
abline(0,1, lwd=1, lty=2)
points(x,y6,col=col6, pch=16, cex=pt.cex)
for (i in 1:length(x)){
point <- cbind(x,y6)[i,]
seg <- unlist(perp.segment.coord(point[1],point[2]))
orthdist_y6[i] <- euc.dist(c(seg[1],seg[2]),c(seg[3],seg[4]))
orthdist_y6_directed[i] <- sign(seg[2]-seg[4])*orthdist_y6[i]
segments(seg[1],seg[2],seg[3],seg[4], col=col6, lty=2, lwd=lwd_perp)
}
text(par("usr")[1]+((par("usr")[2]-par("usr")[1])/40),par("usr")[4]-((par("usr")[4]-par("usr")[3])/40), "(f)", adj=c(0,1), cex=cex_perp)
text(x_text,y_text_1,paste0("Cor: ", specify_decimal(cor6,1)), col=col6, adj=1, cex=cex_perp)
text(x_text,y_text_2,paste0("SPO: ", specify_decimal(sum(orthdist_y6),1)), col=col6, adj=1, cex=cex_perp)
text(x_text,y_text_3,paste0("MPO: ", specify_decimal(median(orthdist_y6),1)),col=col6, adj=1, cex=cex_perp)
text(x_text,y_text_4,paste0("DMPO: ", specify_decimal(median(orthdist_y6_directed),1)), col=col6, adj=1, cex=cex_perp)
axis(1)
plot.new()
text(0.5,0.5,"Simulated data", cex=2, srt = 90)
plot.new()
text(0.5,0.5,"Simulated data", cex=2, srt = 90)
plot.new()
text(0.5,0.5,"Empirical data", cex=2)
plot.new()
text(0.5,0.5,"Empirical data", cex=2)
plot.new()
text(0.5,0.5,"Empirical data", cex=2)
dev.off()
##** FIG S5 ####
#*** Mean Ho maps
mp_dim <- multipanel.dimensions(main.col=pla$a,main.row=pla$b,sub.row=pla$x,sub.col=pla$y,sub1=pla$sub1,sub2=pla$sub2, main=T, h.main=0.5, w.legend=0, h.sub2=0.3, w.axis=0.7, h.axis=0, spacer.sub.col=0.5)
if(pdf){
pdf(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS5.pdf"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]))
}else{
png(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS5.png"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]), units="in", res=600)
}
par(mar=c(0,0,0,0))
nf <- multipanel.layout(main.col=pla$a,main.row=pla$b,sub.row=pla$x,sub.col=pla$y,sub1=pla$sub1,sub2=pla$sub2, main=T, h.main=0.5, w.legend=0, h.sub2=0.3, w.axis=0.7, h.axis=0, spacer.sub.col=0.5)
for (j in 2:ncol(Ho_modelled)){
if (internal){
river_plot(width_country=0.5, lwd_rivers=0.5, xlimit=c(495000,825000), col_water=col_water, north_arrow = F, overview_map = F, scalebar=F, arrows = F, border_outline=F, width_border=2, col_rhine = col_rhine, plot_rhone=F, lines_rhone=F, plot_ticino=F, lines_ticino=F, plot_inn=F, lines_inn=F, lakes=TRUE, rivers=TRUE, axes="none", river_nr=T)
}else{
river_plot(width_country=0.5, lwd_rivers=0.5, xlimit=c(495000,825000), col_water=col_water, north_arrow = F, overview_map = F, scalebar=F, arrows = F, border_outline=F, width_border=2, col_rhine = col_rhine, plot_rhone=F, plot_ticino=F, plot_inn=F, lakes=TRUE, rivers=TRUE, axes="none", river_nr=F)
}
values <- (Ho_modelled[,j]-min(Ho_modelled[,j]))/ (max(Ho_modelled[,j])-min(Ho_modelled[,j])) # transform meanAr values to range [0,1] for heatmap plotting
if(!any(is.na(values))){
for(i in 1:length(match_Mod)){
points(site_coord$x[match_Mod[i]], site_coord$y[match_Mod[i]], bg=rgb2hex(col_fun(values))[i], pch=21, lwd=0.5, cex=1)
}
}
if (j %in% c(2,3)){
mtext(lab[6], side=3, line=-1, cex=1.3)
}
if (j %in% c(8,9)){
mtext(lab[7], side=3, line=-1, cex=1.3)
}
if (j %in% c(14,15)){
mtext(lab[8], side=3, line=-1, cex=1.3)
}
gradient.legend(Ho_modelled[,j],alpha=1, val.midpoint=F, round=2, val.cex=1, val.gap = 0.5, title.gap=0.1, xl = 505000, xr = 635000, yb = 20000, yt = 40000, horizontal=T)
}
plot.new()
text(0.5,0.5,"Simulated observed heterozygosity",adj=c(0.5,0.5), cex=3)
for (i in 1:5){
plot.new()
if (i %in% c(1:2)){
text(0.5,1,lab[i],adj=c(0.5,1), cex=lab_cex[i])
}
if (i %in% c(3:5)){
text(0.5,0.5,lab[i],adj=c(0.5,0.5), cex=lab_cex[i])
}
}
dev.off()
##** FIG S6 ####
#*** Mean Hs maps
mp_dim <- multipanel.dimensions(main.col=pla$a,main.row=pla$b,sub.row=pla$x,sub.col=pla$y,sub1=pla$sub1,sub2=pla$sub2, main=T, h.main=0.5, w.legend=0, h.sub2=0.3, w.axis=0.7, h.axis=0, spacer.sub.col=0.5)
if(pdf){
pdf(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS6.pdf"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]))
}else{
png(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS6.png"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]), units="in", res=600)
}
par(mar=c(0,0,0,0))
nf <- multipanel.layout(main.col=pla$a,main.row=pla$b,sub.row=pla$x,sub.col=pla$y,sub1=pla$sub1,sub2=pla$sub2, main=T, h.main=0.5, w.legend=0, h.sub2=0.3, w.axis=0.7, h.axis=0, spacer.sub.col=0.5)
for (j in 2:ncol(Hs_modelled)){
if (internal){
river_plot(width_country=0.5, lwd_rivers=0.5, xlimit=c(495000,825000), col_water=col_water, north_arrow = F, overview_map = F, scalebar=F, arrows = F, border_outline=F, width_border=2, col_rhine = col_rhine, plot_rhone=F, lines_rhone=F, plot_ticino=F, lines_ticino=F, plot_inn=F, lines_inn=F, lakes=TRUE, rivers=TRUE, axes="none", river_nr=T)
}else{
river_plot(width_country=0.5, lwd_rivers=0.5, xlimit=c(495000,825000), col_water=col_water, north_arrow = F, overview_map = F, scalebar=F, arrows = F, border_outline=F, width_border=2, col_rhine = col_rhine, plot_rhone=F, plot_ticino=F, plot_inn=F, lakes=TRUE, rivers=TRUE, axes="none", river_nr=F)
}
values <- (Hs_modelled[,j]-min(Hs_modelled[,j]))/ (max(Hs_modelled[,j])-min(Hs_modelled[,j])) # transform meanAr values to range [0,1] for heatmap plotting
if(!any(is.na(values))){
for(i in 1:length(match_Mod)){
points(site_coord$x[match_Mod[i]], site_coord$y[match_Mod[i]], bg=rgb2hex(col_fun(values))[i], pch=21, lwd=0.5, cex=1)
}
}
if (j %in% c(2,3)){
mtext(lab[6], side=3, line=-1, cex=1.3)
}
if (j %in% c(8,9)){
mtext(lab[7], side=3, line=-1, cex=1.3)
}
if (j %in% c(14,15)){
mtext(lab[8], side=3, line=-1, cex=1.3)
}
gradient.legend(Hs_modelled[,j],alpha=1, val.midpoint=F, round=2, val.cex=1, val.gap = 0.5, title.gap=0.1, xl = 505000, xr = 635000, yb = 20000, yt = 40000, horizontal=T)
}
plot.new()
text(0.5,0.5,"Simulated expected heterozygosity",adj=c(0.5,0.5), cex=3)
for (i in 1:5){
plot.new()
if (i %in% c(1:2)){
text(0.5,1,lab[i],adj=c(0.5,1), cex=lab_cex[i])
}
if (i %in% c(3:5)){
text(0.5,0.5,lab[i],adj=c(0.5,0.5), cex=lab_cex[i])
}
}
dev.off()
##** W: Model performance ####
maxhist_Ar <- ceiling(max(hist_Ar_A,hist_Ar_B, na.rm=T))
maxhist_Ho <- ceiling(10*max(hist_Ho_A,hist_Ho_B, na.rm=T))/10
maxhist_Hs <- ceiling(10*max(hist_Hs_A,hist_Hs_B, na.rm=T))/10
w00_Ar_A <- hist_Ar_A[,c(1,2,7,8,13,14)]
w05_Ar_A <- hist_Ar_A[,c(3,4,9,10,15,16)]
w10_Ar_A <- hist_Ar_A[,c(5,6,11,12,17,18)]
w00_Ar_B <- hist_Ar_B[,c(1,2,7,8,13,14)]
w05_Ar_B <- hist_Ar_B[,c(3,4,9,10,15,16)]
w10_Ar_B <- hist_Ar_B[,c(5,6,11,12,17,18)]
w00_Ho_A <- hist_Ho_A[,c(1,2,7,8,13,14)]
w05_Ho_A <- hist_Ho_A[,c(3,4,9,10,15,16)]
w10_Ho_A <- hist_Ho_A[,c(5,6,11,12,17,18)]
w00_Ho_B <- hist_Ho_B[,c(1,2,7,8,13,14)]
w05_Ho_B <- hist_Ho_B[,c(3,4,9,10,15,16)]
w10_Ho_B <- hist_Ho_B[,c(5,6,11,12,17,18)]
w00_Hs_A <- hist_Hs_A[,c(1,2,7,8,13,14)]
w05_Hs_A <- hist_Hs_A[,c(3,4,9,10,15,16)]
w10_Hs_A <- hist_Hs_A[,c(5,6,11,12,17,18)]
w00_Hs_B <- hist_Hs_B[,c(1,2,7,8,13,14)]
w05_Hs_B <- hist_Hs_B[,c(3,4,9,10,15,16)]
w10_Hs_B <- hist_Hs_B[,c(5,6,11,12,17,18)]
diffSPO_w00both_Ar_A <- c(apply(w00_Ar_A,2,sum)-apply(w05_Ar_A,2,sum),apply(w00_Ar_A,2,sum)-apply(w10_Ar_A,2,sum))
diffSPO_w00both_Ar_B <- c(apply(w00_Ar_B,2,sum)-apply(w05_Ar_B,2,sum),apply(w00_Ar_B,2,sum)-apply(w10_Ar_B,2,sum))
diffSPO_w00both_Ho_A <- c(apply(w00_Ho_A,2,sum)-apply(w05_Ho_A,2,sum),apply(w00_Ho_A,2,sum)-apply(w10_Ho_A,2,sum))
diffSPO_w00both_Ho_B <- c(apply(w00_Ho_B,2,sum)-apply(w05_Ho_B,2,sum),apply(w00_Ho_B,2,sum)-apply(w10_Ho_B,2,sum))
diffSPO_w00both_Hs_A <- c(apply(w00_Hs_A,2,sum)-apply(w05_Hs_A,2,sum),apply(w00_Hs_A,2,sum)-apply(w10_Hs_A,2,sum))
diffSPO_w00both_Hs_B <- c(apply(w00_Hs_B,2,sum)-apply(w05_Hs_B,2,sum),apply(w00_Hs_B,2,sum)-apply(w10_Hs_B,2,sum))
diffMPO_w00both_Ar_A <- c(apply(w00_Ar_A,2,median)-apply(w05_Ar_A,2,median),apply(w00_Ar_A,2,median)-apply(w10_Ar_A,2,median))
diffMPO_w00both_Ar_B <- c(apply(w00_Ar_B,2,median)-apply(w05_Ar_B,2,median),apply(w00_Ar_B,2,median)-apply(w10_Ar_B,2,median))
diffMPO_w00both_Ho_A <- c(apply(w00_Ho_A,2,median)-apply(w05_Ho_A,2,median),apply(w00_Ho_A,2,median)-apply(w10_Ho_A,2,median))
diffMPO_w00both_Ho_B <- c(apply(w00_Ho_B,2,median)-apply(w05_Ho_B,2,median),apply(w00_Ho_B,2,median)-apply(w10_Ho_B,2,median))
diffMPO_w00both_Hs_A <- c(apply(w00_Hs_A,2,median)-apply(w05_Hs_A,2,median),apply(w00_Hs_A,2,median)-apply(w10_Hs_A,2,median))
diffMPO_w00both_Hs_B <- c(apply(w00_Hs_B,2,median)-apply(w05_Hs_B,2,median),apply(w00_Hs_B,2,median)-apply(w10_Hs_B,2,median))
list_w00both <- c(diffMPO_w00both_Ar_A,diffMPO_w00both_Ar_B,
diffMPO_w00both_Ho_A,diffMPO_w00both_Ho_B,
diffMPO_w00both_Hs_A,diffMPO_w00both_Hs_B,
diffSPO_w00both_Ar_A,diffSPO_w00both_Ar_B,
diffSPO_w00both_Ho_A,diffSPO_w00both_Ho_B,
diffSPO_w00both_Hs_A,diffSPO_w00both_Hs_B)
total_comparison_w00both <- length(list_w00both)
improved_fit_w00both <- sum(list_w00both<0)
# Model performance W
improved_fit_w00both/total_comparison_w00both
##** FIG S7 ####
##** Perpendicular offset histogram comparison for W
if(pdf){
pdf(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS7.pdf"), width=8, height=6)
}else{
png(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS7.png"), width=8, height=6, units="in", res=300)
}
op <- par(mfrow = c(3,3),
oma = c(5,5,2,0) + 0.1,
mar = c(0,0,2,1) + 0.1)
hist(w00_Ar_A, col=col_Gfos_A, cex.axis=2, xaxt="n", main="", ylim=c(0,70), xlim = c(0,maxhist_Ar), breaks=seq(0,maxhist_Ar,0.25))
hist(w00_Ar_B, col=col_Gfos_B, add=T, breaks=seq(0,maxhist_Ar,0.25))
axis(1, labels=F, cex.axis=2)
mtext(side = 3, text = lab_Ar_short, line = 1, adj=0.5, cex = 1.5)
abline(v=median(w00_Ar_A, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(w00_Ar_A, na.rm=T), col=col_Gfos_A, lty=2, lwd=2)
abline(v=median(w00_Ar_B, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(w00_Ar_B, na.rm=T), col=col_Gfos_B, lty=2, lwd=2)
textbox(4,65, txt = lab[3], txt.adj=0.5, txt.cex = 2, frm.brd = NA, frm.col = white_transparent)
legend(2,50, c(expression("Median" ~ italic(G. ~ fossarum) ~ "type A"),expression("Median" ~ italic(G. ~ fossarum) ~ "type B")),
lty=2, col=c(col_Gfos_A,col_Gfos_B), bty="n", cex=0.85, lwd=2)
hist(w00_Ho_A, col=col_Gfos_A, yaxt="n", xaxt="n", main="", ylim=c(0,70), xlim = c(0,maxhist_Ho), breaks=seq(0,maxhist_Ho,0.025))
hist(w00_Ho_B, col=col_Gfos_B, add=T, breaks=seq(0,maxhist_Ho,0.025))
axis(1, labels=F, cex.axis=2)
axis(2, labels=F, cex.axis=2)
mtext(side = 3, text = lab_Ho_short, line = 1, adj=0.5, cex = 1.5)
abline(v=median(w00_Ho_A, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(w00_Ho_A, na.rm=T), col=col_Gfos_A, lty=2, lwd=2)
abline(v=median(w00_Ho_B, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(w00_Ho_B, na.rm=T), col=col_Gfos_B, lty=2, lwd=2)
textbox(0.3,65, txt = lab[3], txt.adj=0.5, txt.cex = 2, frm.brd = NA, frm.col = white_transparent)
hist(w00_Hs_A, col=col_Gfos_A, yaxt="n", xaxt="n", main="", ylim=c(0,70), xlim = c(0,maxhist_Hs), breaks=seq(0,maxhist_Hs,0.025))
hist(w00_Hs_B, col=col_Gfos_B, add=T, breaks=seq(0,maxhist_Hs,0.025))
axis(1, labels=F, cex.axis=2)
axis(2, labels=F, cex.axis=2)
mtext(side = 3, text = lab_Hs_short, line = 1, adj=0.5, cex = 1.5)
abline(v=median(w00_Hs_A, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(w00_Hs_A, na.rm=T), col=col_Gfos_A, lty=2, lwd=2)
abline(v=median(w00_Hs_B, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(w00_Hs_B, na.rm=T), col=col_Gfos_B, lty=2, lwd=2)
textbox(0.3,65, txt = lab[3], txt.adj=0.5, txt.cex = 2, frm.brd = NA, frm.col = white_transparent)
hist(w05_Ar_A, col=col_Gfos_A, cex.axis=2, xaxt="n", main="", ylim=c(0,70), xlim = c(0,maxhist_Ar), breaks=seq(0,maxhist_Ar,0.25))
hist(w05_Ar_B, col=col_Gfos_B, add=T, breaks=seq(0,maxhist_Ar,0.25))
axis(1, labels=F, cex.axis=2)
abline(v=median(w05_Ar_A, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(w05_Ar_A, na.rm=T), col=col_Gfos_A, lty=2, lwd=2)
abline(v=median(w05_Ar_B, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(w05_Ar_B, na.rm=T), col=col_Gfos_B, lty=2, lwd=2)
textbox(4,65, txt = lab[4], txt.adj=0.5, txt.cex = 2, frm.brd = NA, frm.col = white_transparent)
hist(w05_Ho_A, col=col_Gfos_A, yaxt="n", xaxt="n", main="", ylim=c(0,70), xlim = c(0,maxhist_Ho), breaks=seq(0,maxhist_Ho,0.025))
hist(w05_Ho_B, col=col_Gfos_B, add=T, breaks=seq(0,maxhist_Ho,0.025))
axis(1, labels=F, cex.axis=2)
axis(2, labels=F, cex.axis=2)
abline(v=median(w05_Ho_A, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(w05_Ho_A, na.rm=T), col=col_Gfos_A, lty=2, lwd=2)
abline(v=median(w05_Ho_B, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(w05_Ho_B, na.rm=T), col=col_Gfos_B, lty=2, lwd=2)
textbox(0.3,65, txt = lab[4], txt.adj=0.5, txt.cex = 2, frm.brd = NA, frm.col = white_transparent)
hist(w05_Hs_A, col=col_Gfos_A, yaxt="n", xaxt="n", main="", ylim=c(0,70), xlim = c(0,maxhist_Hs), breaks=seq(0,maxhist_Hs,0.025))
hist(w05_Hs_B, col=col_Gfos_B, add=T, breaks=seq(0,maxhist_Hs,0.025))
axis(1, labels=F, cex.axis=2)
axis(2, labels=F, cex.axis=2)
abline(v=median(w05_Hs_A, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(w05_Hs_A, na.rm=T), col=col_Gfos_A, lty=2, lwd=2)
abline(v=median(w05_Hs_B, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(w05_Hs_B, na.rm=T), col=col_Gfos_B, lty=2, lwd=2)
textbox(0.3,65, txt = lab[4], txt.adj=0.5, txt.cex = 2, frm.brd = NA, frm.col = white_transparent)
hist(w10_Ar_A, col=col_Gfos_A, cex.axis=2, xaxt="n", main="", ylim=c(0,70), xlim = c(0,maxhist_Ar), breaks=seq(0,maxhist_Ar,0.25))
hist(w10_Ar_B, col=col_Gfos_B, add=T, breaks=seq(0,maxhist_Ar,0.25))
axis(1, labels=T, cex.axis=2, padj=0.5)
abline(v=median(w10_Ar_A, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(w10_Ar_A, na.rm=T), col=col_Gfos_A, lty=2, lwd=2)
abline(v=median(w10_Ar_B, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(w10_Ar_B, na.rm=T), col=col_Gfos_B, lty=2, lwd=2)
textbox(4,65, txt = lab[5], txt.adj=0.5, txt.cex = 2, frm.brd = NA, frm.col = white_transparent)
hist(w10_Ho_A, col=col_Gfos_A, yaxt="n", xaxt="n", main="", ylim=c(0,70), xlim = c(0,maxhist_Ho), breaks=seq(0,maxhist_Ho,0.025))
hist(w10_Ho_B, col=col_Gfos_B, add=T, breaks=seq(0,maxhist_Ho,0.025))
axis(1, labels=T, cex.axis=2, padj=0.5)
axis(2, labels=F, cex.axis=2)
abline(v=median(w10_Ho_A, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(w10_Ho_A, na.rm=T), col=col_Gfos_A, lty=2, lwd=2)
abline(v=median(w10_Ho_B, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(w10_Ho_B, na.rm=T), col=col_Gfos_B, lty=2, lwd=2)
textbox(0.3,65, txt = lab[5], txt.adj=0.5, txt.cex = 2, frm.brd = NA, frm.col = white_transparent)
hist(w10_Hs_A, col=col_Gfos_A, yaxt="n", xaxt="n", main="", ylim=c(0,70), xlim = c(0,maxhist_Hs), breaks=seq(0,maxhist_Hs,0.025))
hist(w10_Hs_B, col=col_Gfos_B, add=T, breaks=seq(0,maxhist_Hs,0.025))
axis(1, labels=T, cex.axis=2, padj=0.5)
axis(2, labels=F, cex.axis=2)
abline(v=median(w10_Hs_A, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(w10_Hs_A, na.rm=T), col=col_Gfos_A, lty=2, lwd=2)
abline(v=median(w10_Hs_B, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(w10_Hs_B, na.rm=T), col=col_Gfos_B, lty=2, lwd=2)
textbox(0.3,65, txt = lab[5], txt.adj=0.5, txt.cex = 2, frm.brd = NA, frm.col = white_transparent)
strwidth(lab[5]) * 2
title(xlab = "Perpendicular offset",
ylab = "Frequency",
outer = TRUE, line = 3.5, cex.lab=2)
dev.off()
##** d: Model performance ####
d0001_Ar_A <- hist_Ar_A[,(1:6)]
d001_Ar_A <- hist_Ar_A[,c(7:12)]
d01_Ar_A <- hist_Ar_A[,c(13:18)]
d0001_Ar_B <- hist_Ar_B[,(1:6)]
d001_Ar_B <- hist_Ar_B[,c(7:12)]
d01_Ar_B <- hist_Ar_B[,c(13:18)]
d0001_Ho_A <- hist_Ho_A[,(1:6)]
d001_Ho_A <- hist_Ho_A[,c(7:12)]
d01_Ho_A <- hist_Ho_A[,c(13:18)]
d0001_Ho_B <- hist_Ho_B[,(1:6)]
d001_Ho_B <- hist_Ho_B[,c(7:12)]
d01_Ho_B <- hist_Ho_B[,c(13:18)]
d0001_Hs_A <- hist_Hs_A[,(1:6)]
d001_Hs_A <- hist_Hs_A[,c(7:12)]
d01_Hs_A <- hist_Hs_A[,c(13:18)]
d0001_Hs_B <- hist_Hs_B[,(1:6)]
d001_Hs_B <- hist_Hs_B[,c(7:12)]
d01_Hs_B <- hist_Hs_B[,c(13:18)]
diffSPO_d0001both_Ar_A <- c(apply(d0001_Ar_A,2,sum)-apply(d001_Ar_A,2,sum),apply(d0001_Ar_A,2,sum)-apply(d01_Ar_A,2,sum))
diffSPO_d0001both_Ar_B <- c(apply(d0001_Ar_B,2,sum)-apply(d001_Ar_B,2,sum),apply(d0001_Ar_B,2,sum)-apply(d01_Ar_B,2,sum))
diffSPO_d0001both_Ho_A <- c(apply(d0001_Ho_A,2,sum)-apply(d001_Ho_A,2,sum),apply(d0001_Ho_A,2,sum)-apply(d01_Ho_A,2,sum))
diffSPO_d0001both_Ho_B <- c(apply(d0001_Ho_B,2,sum)-apply(d001_Ho_B,2,sum),apply(d0001_Ho_B,2,sum)-apply(d01_Ho_B,2,sum))
diffSPO_d0001both_Hs_A <- c(apply(d0001_Hs_A,2,sum)-apply(d001_Hs_A,2,sum),apply(d0001_Hs_A,2,sum)-apply(d01_Hs_A,2,sum))
diffSPO_d0001both_Hs_B <- c(apply(d0001_Hs_B,2,sum)-apply(d001_Hs_B,2,sum),apply(d0001_Hs_B,2,sum)-apply(d01_Hs_B,2,sum))
diffMPO_d0001both_Ar_A <- c(apply(d0001_Ar_A,2,median)-apply(d001_Ar_A,2,median),apply(d0001_Ar_A,2,median)-apply(d01_Ar_A,2,median))
diffMPO_d0001both_Ar_B <- c(apply(d0001_Ar_B,2,median)-apply(d001_Ar_B,2,median),apply(d0001_Ar_B,2,median)-apply(d01_Ar_B,2,median))
diffMPO_d0001both_Ho_A <- c(apply(d0001_Ho_A,2,median)-apply(d001_Ho_A,2,median),apply(d0001_Ho_A,2,median)-apply(d01_Ho_A,2,median))
diffMPO_d0001both_Ho_B <- c(apply(d0001_Ho_B,2,median)-apply(d001_Ho_B,2,median),apply(d0001_Ho_B,2,median)-apply(d01_Ho_B,2,median))
diffMPO_d0001both_Hs_A <- c(apply(d0001_Hs_A,2,median)-apply(d001_Hs_A,2,median),apply(d0001_Hs_A,2,median)-apply(d01_Hs_A,2,median))
diffMPO_d0001both_Hs_B <- c(apply(d0001_Hs_B,2,median)-apply(d001_Hs_B,2,median),apply(d0001_Hs_B,2,median)-apply(d01_Hs_B,2,median))
list_d0001both <- c(diffMPO_d0001both_Ar_A,diffMPO_d0001both_Ar_B,
diffMPO_d0001both_Ho_A,diffMPO_d0001both_Ho_B,
diffMPO_d0001both_Hs_A,diffMPO_d0001both_Hs_B,
diffSPO_d0001both_Ar_A,diffSPO_d0001both_Ar_B,
diffSPO_d0001both_Ho_A,diffSPO_d0001both_Ho_B,
diffSPO_d0001both_Hs_A,diffSPO_d0001both_Hs_B)
total_comparison_d0001both <- length(list_d0001both)
improved_fit_d0001both <- sum(list_d0001both<0)
# Model performance d
improved_fit_d0001both/total_comparison_d0001both
##** FIG S8 ####
##** Perpendicular offset histogram comparison for d
if(pdf){
pdf(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS8.pdf"), width=8, height=6)
}else{
png(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS8.png"), width=8, height=6, units="in", res=300)
}
op <- par(mfrow = c(3,3),
oma = c(5,5,2,0) + 0.1,
mar = c(0,0,2,1) + 0.1)
hist(d0001_Ar_A, col=col_Gfos_A, cex.axis=2, xaxt="n", main="", ylim=c(0,70), xlim = c(0,maxhist_Ar), breaks=seq(0,maxhist_Ar,0.25))
hist(d0001_Ar_B, col=col_Gfos_B, add=T, breaks=seq(0,maxhist_Ar,0.25))
axis(1, labels=F, cex.axis=2)
mtext(side = 3, text = lab_Ar_short, line = 1, adj=0.5, cex = 1.5)
abline(v=median(d0001_Ar_A, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(d0001_Ar_A, na.rm=T), col=col_Gfos_A, lty=2, lwd=2)
abline(v=median(d0001_Ar_B, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(d0001_Ar_B, na.rm=T), col=col_Gfos_B, lty=2, lwd=2)
textbox(4,65, txt = lab[6], txt.adj=0.5, txt.cex = 2, frm.brd = NA, frm.col = white_transparent)
legend(2,50, c(expression("Median" ~ italic(G. ~ fossarum) ~ "type A"),expression("Median" ~ italic(G. ~ fossarum) ~ "type B")),
lty=2, col=c(col_Gfos_A,col_Gfos_B), bty="n", cex=0.85, lwd=2)
hist(d0001_Ho_A, col=col_Gfos_A, yaxt="n", xaxt="n", main="", ylim=c(0,70), xlim = c(0,maxhist_Ho), breaks=seq(0,maxhist_Ho,0.025))
hist(d0001_Ho_B, col=col_Gfos_B, add=T, breaks=seq(0,maxhist_Ho,0.025))
axis(1, labels=F, cex.axis=2)
axis(2, labels=F, cex.axis=2)
mtext(side = 3, text = lab_Ho_short, line = 1, adj=0.5, cex = 1.5)
abline(v=median(d0001_Ho_A, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(d0001_Ho_A, na.rm=T), col=col_Gfos_A, lty=2, lwd=2)
abline(v=median(d0001_Ho_B, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(d0001_Ho_B, na.rm=T), col=col_Gfos_B, lty=2, lwd=2)
textbox(0.3,65, txt = lab[6], txt.adj=0.5, txt.cex = 2, frm.brd = NA, frm.col = white_transparent)
hist(d0001_Hs_A, col=col_Gfos_A, yaxt="n", xaxt="n", main="", ylim=c(0,70), xlim = c(0,maxhist_Hs), breaks=seq(0,maxhist_Hs,0.025))
hist(d0001_Hs_B, col=col_Gfos_B, add=T, breaks=seq(0,maxhist_Hs,0.025))
axis(1, labels=F, cex.axis=2)
axis(2, labels=F, cex.axis=2)
mtext(side = 3, text = lab_Hs_short, line = 1, adj=0.5, cex = 1.5)
abline(v=median(d0001_Hs_A, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(d0001_Hs_A, na.rm=T), col=col_Gfos_A, lty=2, lwd=2)
abline(v=median(d0001_Hs_B, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(d0001_Hs_B, na.rm=T), col=col_Gfos_B, lty=2, lwd=2)
textbox(0.3,65, txt = lab[6], txt.adj=0.5, txt.cex = 2, frm.brd = NA, frm.col = white_transparent)
hist(d001_Ar_A, col=col_Gfos_A, cex.axis=2, xaxt="n", main="", ylim=c(0,70), xlim = c(0,maxhist_Ar), breaks=seq(0,maxhist_Ar,0.25))
hist(d001_Ar_B, col=col_Gfos_B, add=T, breaks=seq(0,maxhist_Ar,0.25))
axis(1, labels=F, cex.axis=2)
abline(v=median(d001_Ar_A, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(d001_Ar_A, na.rm=T), col=col_Gfos_A, lty=2, lwd=2)
abline(v=median(d001_Ar_B, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(d001_Ar_B, na.rm=T), col=col_Gfos_B, lty=2, lwd=2)
textbox(4,65, txt = lab[7], txt.adj=0.5, txt.cex = 2, frm.brd = NA, frm.col = white_transparent)
hist(d001_Ho_A, col=col_Gfos_A, yaxt="n", xaxt="n", main="", ylim=c(0,70), xlim = c(0,maxhist_Ho), breaks=seq(0,maxhist_Ho,0.025))
hist(d001_Ho_B, col=col_Gfos_B, add=T, breaks=seq(0,maxhist_Ho,0.025))
axis(1, labels=F, cex.axis=2)
axis(2, labels=F, cex.axis=2)
abline(v=median(d001_Ho_A, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(d001_Ho_A, na.rm=T), col=col_Gfos_A, lty=2, lwd=2)
abline(v=median(d001_Ho_B, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(d001_Ho_B, na.rm=T), col=col_Gfos_B, lty=2, lwd=2)
textbox(0.3,65, txt = lab[7], txt.adj=0.5, txt.cex = 2, frm.brd = NA, frm.col = white_transparent)
hist(d001_Hs_A, col=col_Gfos_A, yaxt="n", xaxt="n", main="", ylim=c(0,70), xlim = c(0,maxhist_Hs), breaks=seq(0,maxhist_Hs,0.025))
hist(d001_Hs_B, col=col_Gfos_B, add=T, breaks=seq(0,maxhist_Hs,0.025))
axis(1, labels=F, cex.axis=2)
axis(2, labels=F, cex.axis=2)
abline(v=median(d001_Hs_A, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(d001_Hs_A, na.rm=T), col=col_Gfos_A, lty=2, lwd=2)
abline(v=median(d001_Hs_B, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(d001_Hs_B, na.rm=T), col=col_Gfos_B, lty=2, lwd=2)
textbox(0.3,65, txt = lab[7], txt.adj=0.5, txt.cex = 2, frm.brd = NA, frm.col = white_transparent)
hist(d01_Ar_A, col=col_Gfos_A, cex.axis=2, xaxt="n", main="", ylim=c(0,70), xlim = c(0,maxhist_Ar), breaks=seq(0,maxhist_Ar,0.25))
hist(d01_Ar_B, col=col_Gfos_B, add=T, breaks=seq(0,maxhist_Ar,0.25))
axis(1, labels=T, cex.axis=2, padj=0.5)
abline(v=median(d01_Ar_A, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(d01_Ar_A, na.rm=T), col=col_Gfos_A, lty=2, lwd=2)
abline(v=median(d01_Ar_B, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(d01_Ar_B, na.rm=T), col=col_Gfos_B, lty=2, lwd=2)
textbox(4,65, txt = lab[8], txt.adj=0.5, txt.cex = 2, frm.brd = NA, frm.col = white_transparent)
hist(d01_Ho_A, col=col_Gfos_A, yaxt="n", xaxt="n", main="", ylim=c(0,70), xlim = c(0,maxhist_Ho), breaks=seq(0,maxhist_Ho,0.025))
hist(d01_Ho_B, col=col_Gfos_B, add=T, breaks=seq(0,maxhist_Ho,0.025))
axis(1, labels=T, cex.axis=2, padj=0.5)
axis(2, labels=F, cex.axis=2)
abline(v=median(d01_Ho_A, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(d01_Ho_A, na.rm=T), col=col_Gfos_A, lty=2, lwd=2)
abline(v=median(d01_Ho_B, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(d01_Ho_B, na.rm=T), col=col_Gfos_B, lty=2, lwd=2)
textbox(0.3,65, txt = lab[8], txt.adj=0.5, txt.cex = 2, frm.brd = NA, frm.col = white_transparent)
hist(d01_Hs_A, col=col_Gfos_A, yaxt="n", xaxt="n", main="", ylim=c(0,70), xlim = c(0,maxhist_Hs), breaks=seq(0,maxhist_Hs,0.025))
hist(d01_Hs_B, col=col_Gfos_B, add=T, breaks=seq(0,maxhist_Hs,0.025))
axis(1, labels=T, cex.axis=2, padj=0.5)
axis(2, labels=F, cex.axis=2)
abline(v=median(d01_Hs_A, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(d01_Hs_A, na.rm=T), col=col_Gfos_A, lty=2, lwd=2)
abline(v=median(d01_Hs_B, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(d01_Hs_B, na.rm=T), col=col_Gfos_B, lty=2, lwd=2)
textbox(0.3,65, txt = lab[8], txt.adj=0.5, txt.cex = 2, frm.brd = NA, frm.col = white_transparent)
title(xlab = "Perpendicular offset",
ylab = "Frequency",
outer = TRUE, line = 3.5, cex.lab=2)
dev.off()
##** K: Model performance ####
k0_Ar_A <- hist_Ar_A[,c(1,3,5,7,9,11,13,15,17)]
k1_Ar_A <- hist_Ar_A[,c(2,4,6,8,10,12,14,16,18)]
k0_Ar_B <- hist_Ar_B[,c(1,3,5,7,9,11,13,15,17)]
k1_Ar_B <- hist_Ar_B[,c(2,4,6,8,10,12,14,16,18)]
k0_Ho_A <- hist_Ho_A[,c(1,3,5,7,9,11,13,15,17)]
k1_Ho_A <- hist_Ho_A[,c(2,4,6,8,10,12,14,16,18)]
k0_Ho_B <- hist_Ho_B[,c(1,3,5,7,9,11,13,15,17)]
k1_Ho_B <- hist_Ho_B[,c(2,4,6,8,10,12,14,16,18)]
k0_Hs_A <- hist_Hs_A[,c(1,3,5,7,9,11,13,15,17)]
k1_Hs_A <- hist_Hs_A[,c(2,4,6,8,10,12,14,16,18)]
k0_Hs_B <- hist_Hs_B[,c(1,3,5,7,9,11,13,15,17)]
k1_Hs_B <- hist_Hs_B[,c(2,4,6,8,10,12,14,16,18)]
diffSPO_k0k1_Ar_A <- apply(k0_Ar_A,2,sum)-apply(k1_Ar_A,2,sum)
diffSPO_k0k1_Ar_B <- apply(k0_Ar_B,2,sum)-apply(k1_Ar_B,2,sum)
diffSPO_k0k1_Ho_A <- apply(k0_Ho_A,2,sum)-apply(k1_Ho_A,2,sum)
diffSPO_k0k1_Ho_B <- apply(k0_Ho_B,2,sum)-apply(k1_Ho_B,2,sum)
diffSPO_k0k1_Hs_A <- apply(k0_Hs_A,2,sum)-apply(k1_Hs_A,2,sum)
diffSPO_k0k1_Hs_B <- apply(k0_Hs_B,2,sum)-apply(k1_Hs_B,2,sum)
diffMPO_k0k1_Ar_A <- apply(k0_Ar_A,2,median)-apply(k1_Ar_A,2,median)
diffMPO_k0k1_Ar_B <- apply(k0_Ar_B,2,median)-apply(k1_Ar_B,2,median)
diffMPO_k0k1_Ho_A <- apply(k0_Ho_A,2,median)-apply(k1_Ho_A,2,median)
diffMPO_k0k1_Ho_B <- apply(k0_Ho_B,2,median)-apply(k1_Ho_B,2,median)
diffMPO_k0k1_Hs_A <- apply(k0_Hs_A,2,median)-apply(k1_Hs_A,2,median)
diffMPO_k0k1_Hs_B <- apply(k0_Hs_B,2,median)-apply(k1_Hs_B,2,median)
list_k0k1 <- c(diffMPO_k0k1_Ar_A,diffMPO_k0k1_Ar_B,
diffMPO_k0k1_Ho_A,diffMPO_k0k1_Ho_B,
diffMPO_k0k1_Hs_A,diffMPO_k0k1_Hs_B,
diffSPO_k0k1_Ar_A,diffSPO_k0k1_Ar_B,
diffSPO_k0k1_Ho_A,diffSPO_k0k1_Ho_B,
diffSPO_k0k1_Hs_A,diffSPO_k0k1_Hs_B)
total_comparison_k0k1 <- length(list_k0k1)
improved_fit_k0k1 <- sum(list_k0k1<0)
# Model performance K
improved_fit_k0k1/total_comparison_k0k1
##** FIG S9 ####
##** Perpendicular offset histogram comparison for K
scal.fact <- 0.75
if(pdf){
pdf(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS9.pdf"), width=8, height=6*scal.fact)
}else{
png(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS9.png"), width=8, height=6*scal.fact, units="in", res=300)
}
op <- par(mfrow = c(3*scal.fact,3),
oma = c(5*scal.fact,5*scal.fact,2*scal.fact,0) + 0.1,
mar = c(0,0,2*scal.fact,1*scal.fact) + 0.1)
hist(k0_Ar_A, col=col_Gfos_A, tcl=-0.5*scal.fact, cex.axis=2*scal.fact, xaxt="n", yaxt="n", main="", ylim=c(0,90), xlim = c(0,maxhist_Ar), breaks=seq(0,maxhist_Ar,0.25))
hist(k0_Ar_B, col=col_Gfos_B, add=T, breaks=seq(0,maxhist_Ar,0.25))
axis(1, labels=F, cex.axis=2*scal.fact, tcl=-0.5*scal.fact)
axis(2, labels=T, cex.axis=2*scal.fact, tcl=-0.5*scal.fact, padj=0.5)
mtext(side = 3, text = lab_Ar_short, line = 1, adj=0.5, cex = 1.5)
abline(v=median(k0_Ar_A, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(k0_Ar_A, na.rm=T), col=col_Gfos_A, lty=2, lwd=2)
abline(v=median(k0_Ar_B, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(k0_Ar_B, na.rm=T), col=col_Gfos_B, lty=2, lwd=2)
textbox(4,85, txt = lab[1], txt.adj=0.5, txt.cex = 2*scal.fact, frm.brd = NA, frm.col = white_transparent)
legend(2,70, c(expression("Median" ~ italic(G. ~ fossarum) ~ "type A"),expression("Median" ~ italic(G. ~ fossarum) ~ "type B")),
lty=2, col=c(col_Gfos_A,col_Gfos_B), bty="n", cex=0.85, lwd=2)
hist(k0_Ho_A, col=col_Gfos_A, tcl=-0.5*scal.fact, cex.axis=2*scal.fact, xaxt="n", yaxt="n", main="", ylim=c(0,90), xlim = c(0,maxhist_Ho), breaks=seq(0,maxhist_Ho,0.025))
hist(k0_Ho_B, col=col_Gfos_B, add=T, breaks=seq(0,maxhist_Ho,0.025))
axis(1, labels=F, cex.axis=2*scal.fact, tcl=-0.5*scal.fact)
axis(2, labels=F, cex.axis=2*scal.fact, tcl=-0.5*scal.fact)
mtext(side = 3, text = lab_Ho_short, line = 1, adj=0.5, cex = 1.5)
abline(v=median(k0_Ho_A, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(k0_Ho_A, na.rm=T), col=col_Gfos_A, lty=2, lwd=2)
abline(v=median(k0_Ho_B, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(k0_Ho_B, na.rm=T), col=col_Gfos_B, lty=2, lwd=2)
textbox(0.3,85, txt = lab[1], txt.adj=0.5, txt.cex = 2*scal.fact, frm.brd = NA, frm.col = white_transparent)
hist(k0_Hs_A, col=col_Gfos_A, tcl=-0.5*scal.fact, cex.axis=2*scal.fact, xaxt="n", yaxt="n", main="", ylim=c(0,90), xlim = c(0,maxhist_Hs), breaks=seq(0,maxhist_Hs,0.025))
hist(k0_Hs_B, col=col_Gfos_B, add=T, breaks=seq(0,maxhist_Hs,0.025))
axis(1, labels=F, cex.axis=2*scal.fact, tcl=-0.5*scal.fact)
axis(2, labels=F, cex.axis=2*scal.fact, tcl=-0.5*scal.fact)
mtext(side = 3, text = lab_Hs_short, line = 1, adj=0.5, cex = 1.5)
abline(v=median(k0_Hs_A, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(k0_Hs_A, na.rm=T), col=col_Gfos_A, lty=2, lwd=2)
abline(v=median(k0_Hs_B, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(k0_Hs_B, na.rm=T), col=col_Gfos_B, lty=2, lwd=2)
textbox(0.3,85, txt = lab[1], txt.adj=0.5, txt.cex = 2*scal.fact, frm.brd = NA, frm.col = white_transparent)
hist(k1_Ar_A, col=col_Gfos_A, tcl=-0.5*scal.fact, cex.axis=2*scal.fact, xaxt="n", yaxt="n", main="", ylim=c(0,90), xlim = c(0,maxhist_Ar), breaks=seq(0,maxhist_Ar,0.25))
hist(k1_Ar_B, col=col_Gfos_B, add=T, breaks=seq(0,maxhist_Ar,0.25))
axis(1, labels=T, cex.axis=2*scal.fact, tcl=-0.5*scal.fact, padj=0)
axis(2, labels=T, cex.axis=2*scal.fact, tcl=-0.5*scal.fact, padj=0.5)
abline(v=median(k1_Ar_A, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(k1_Ar_A, na.rm=T), col=col_Gfos_A, lty=2, lwd=2)
abline(v=median(k1_Ar_B, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(k1_Ar_B, na.rm=T), col=col_Gfos_B, lty=2, lwd=2)
textbox(4,85, txt = lab[2], txt.adj=0.5, txt.cex = 2*scal.fact, frm.brd = NA, frm.col = white_transparent)
hist(k1_Ho_A, col=col_Gfos_A, tcl=-0.5*scal.fact, cex.axis=2*scal.fact, xaxt="n", yaxt="n", main="", ylim=c(0,90), xlim = c(0,maxhist_Ho), breaks=seq(0,maxhist_Ho,0.025))
hist(k1_Ho_B, col=col_Gfos_B, add=T, breaks=seq(0,maxhist_Ho,0.025))
axis(1, labels=T, cex.axis=2*scal.fact, tcl=-0.5*scal.fact, padj=0)
axis(2, labels=F, cex.axis=2*scal.fact, tcl=-0.5*scal.fact)
abline(v=median(k1_Ho_A, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(k1_Ho_A, na.rm=T), col=col_Gfos_A, lty=2, lwd=2)
abline(v=median(k1_Ho_B, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(k1_Ho_B, na.rm=T), col=col_Gfos_B, lty=2, lwd=2)
textbox(0.3,85, txt = lab[2], txt.adj=0.5, txt.cex = 2*scal.fact, frm.brd = NA, frm.col = white_transparent)
hist(k1_Hs_A, col=col_Gfos_A, tcl=-0.5*scal.fact, cex.axis=2*scal.fact, xaxt="n", yaxt="n", main="", ylim=c(0,90), xlim = c(0,maxhist_Hs), breaks=seq(0,maxhist_Hs,0.025))
hist(k1_Hs_B, col=col_Gfos_B, add=T, breaks=seq(0,maxhist_Hs,0.025))
axis(1, labels=T, cex.axis=2*scal.fact, tcl=-0.5*scal.fact, padj=0)
axis(2, labels=F, cex.axis=2*scal.fact, tcl=-0.5*scal.fact)
abline(v=median(k1_Hs_A, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(k1_Hs_A, na.rm=T), col=col_Gfos_A, lty=2, lwd=2)
abline(v=median(k1_Hs_B, na.rm=T), col=white_transparent, lwd=2)
abline(v=median(k1_Hs_B, na.rm=T), col=col_Gfos_B, lty=2, lwd=2)
textbox(0.3,85, txt = lab[2], txt.adj=0.5, txt.cex = 2*scal.fact, frm.brd = NA, frm.col = white_transparent)
title(xlab = "Perpendicular offset",
ylab = "Frequency",
outer = TRUE, line = 3.5*scal.fact, cex.lab=2*scal.fact)
dev.off()
##** FIG S10 ####
##** Orthogonal distance to 1:1 line
mp_dim <- multipanel.dimensions(main.col=pla$a,main.row=pla$b,pla$x,pla$y,sub1=pla$sub1,sub2=pla$sub2,main=pla$main)
if(pdf){
pdf(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS10.pdf"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]))
}else{
png(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS10.png"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]), units="in", res=600)
}
nf <- multipanel.layout(main.col=pla$a,main.row=pla$b,pla$x,pla$y,sub1=pla$sub1,sub2=pla$sub2,main=pla$main)
for (i in 2:ncol(Ar_modelled)){
Ar_Mod_A <- Ar_modelled[,i][rownames(Ar_modelled)%in%modsite_GfosA]
Ar_Mod_B <- Ar_modelled[,i][rownames(Ar_modelled)%in%modsite_GfosB]
par(mar=c(0,0,0,0))
plot(Ar_Mod_A~meanAr_A_red_updist, type="n",
xlim=c(min(Ar_modelled[,-1], na.rm=T),max(Ar_modelled[,-1], na.rm=T)),
ylim=c(min(Ar_modelled[,-1], na.rm=T),max(Ar_modelled[,-1], na.rm=T)),
xaxt=ifelse(i%in%c(6,7,12,13,18,19), "s", "n"),
yaxt=ifelse(i%in%c(3,5,7), "s", "n"),
asp=1) # create empty plot
if (i %in% c(14,16,18)){
axis(4, labels=F)
}
if (i %in% c(2,4,6)){
axis(2, labels=F)
}
if (i %in% c(2,3)){
mtext(lab[6], side=3, line=0.8, cex=1.4)
}
if (i %in% c(8,9)){
mtext(lab[7], side=3, line=0.8, cex=1.4)
}
if (i %in% c(14,15)){
mtext(lab[8], side=3, line=0.8, cex=1.4)
}
if (i %in% c(4)){
mtext(expression(bold("Model data: ")*" Mean allelic richness"), side=2, line=1, cex=1)
}
if (i %in% c(12,13)){
mtext(expression(bold("Empirical data:")*" Mean allelic richness"), side=1, line=3, cex=1)
}
abline(0,1, lwd=1, lty=2) # add 1:1 line
points(Ar_Mod_A~meanAr_A_red_updist,col=col_Gfos_A, pch=16)
points(Ar_Mod_B~meanAr_B_updist,col=col_Gfos_B, pch=16)
for (j in 1:length(meanAr_A_red_updist)){
point <- cbind(meanAr_A_red_updist,Ar_Mod_A)[j,]
seg <- unlist(perp.segment.coord(point[1],point[2]))
segments(seg[1],seg[2],seg[3],seg[4], col=col_Gfos_A, lty=2, lwd=0.5)
}
for (k in 1:length(meanAr_B_updist)){
point <- cbind(meanAr_B_updist,Ar_Mod_B)[k,]
seg <- unlist(perp.segment.coord(point[1],point[2]))
segments(seg[1],seg[2],seg[3],seg[4], col=col_Gfos_B, lty=2, lwd=0.5)
}
textbox(max(Ar_modelled[,-1], na.rm=T),min(Ar_modelled[,-1], na.rm=T)+2,paste0(measure1_short, " = ",formatC(round(sum_orthdist_Ar_A[[i-1]],sum_digits),digits=sum_digits, format="f")), txt.cex=sum_cex, txt.adj=1, txt.col=col_Gfos_A, frm.col=white_transparent, frm.brd = NA, frm.siz = 0.2)
textbox(max(Ar_modelled[,-1], na.rm=T),min(Ar_modelled[,-1], na.rm=T)+0.5,paste0(measure1_short, " = ",formatC(round(sum_orthdist_Ar_B[[i-1]],sum_digits),digits=sum_digits, format="f")), txt.cex=sum_cex, txt.adj=1, txt.col=col_Gfos_B, frm.col=white_transparent, frm.brd = NA, frm.siz = 0.2)
}
for (i in 1:length(lab)){
plot.new()
if (i %in% c(1:2)){
text(0.5,1,lab[i],adj=c(0.5,1), cex=lab_cex[i])
}
if (i %in% c(3:5)){
text(0,0.5,lab[i],adj=c(0,0.5), cex=lab_cex[i])
}
}
plot.new()
legend("topleft",c(label_A,label_B),pch = 16, col = c(col_Gfos_A,col_Gfos_B), bty="n", cex=2)
text(0,0,measure1, adj=0)
dev.off()
##** TABLE S11 ####
SPOrank_Ar_A <- labs_comb[order(apply(hist_Ar_A,2,sum))]
SPOrank_Ar_B <- labs_comb[order(apply(hist_Ar_B,2,sum))]
SPOrank_spo_Ar_A <- apply(hist_Ar_A,2,sum)[order(apply(hist_Ar_A,2,sum))]
SPOrank_d_Ar_A <- labs_short[,3][order(apply(hist_Ar_A,2,sum))]
SPOrank_W_Ar_A <- labs_short[,2][order(apply(hist_Ar_A,2,sum))]
SPOrank_K_Ar_A <- labs_short[,1][order(apply(hist_Ar_A,2,sum))]
SPOrank_spo_Ar_B <- apply(hist_Ar_B,2,sum)[order(apply(hist_Ar_B,2,sum))]
SPOrank_d_Ar_B <- labs_short[,3][order(apply(hist_Ar_B,2,sum))]
SPOrank_W_Ar_B <- labs_short[,2][order(apply(hist_Ar_B,2,sum))]
SPOrank_K_Ar_B <- labs_short[,1][order(apply(hist_Ar_B,2,sum))]
SPOrank_Ar <- data.frame("Ar_A_SPO"=SPOrank_spo_Ar_A,"Ar_A_d"=SPOrank_d_Ar_A,"Ar_A_W"=SPOrank_W_Ar_A,"Ar_A_K"=SPOrank_K_Ar_A,
"Ar_B_SPO"=SPOrank_spo_Ar_B,"Ar_B_d"=SPOrank_d_Ar_B,"Ar_B_W"=SPOrank_W_Ar_B,"Ar_B_K"=SPOrank_K_Ar_B)
write.csv2(SPOrank_Ar, paste0(WD,"/Analysis_",output,"/SuppFigs/Table_S11.csv"))
##** FIG S12 ####
###** Histogram of orthogonal distance to 1:1 line
main_5=F
mp_dim <- multipanel.dimensions(main.col=pla$a,main.row=pla$b,pla$x,pla$y,sub1=pla$sub1,sub2=pla$sub2,main=main_5, h.main=0.5)
if(pdf){
pdf(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS12.pdf"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]))
}else{
png(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS12.png"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]), units="in", res=600)
}
nf <- multipanel.layout(main.col=pla$a,main.row=pla$b,pla$x,pla$y,sub1=pla$sub1,sub2=pla$sub2,main=main_5, h.main=0.5)
for (i in 1:ncol(hist_Ho_A)){
par(mar=c(0,0,0,0))
yclip <- 28
ylim <- 35
hist(hist_Ho_A[,i],
breaks=seq(0,ceiling(max(hist_Ho_A,hist_Ho_B, na.rm=T)/0.1)*0.1,0.05),
xlim=c(0,ceiling(max(hist_Ho_A,hist_Ho_B, na.rm=T)/0.1)*0.1),
ylim=c(0,ylim),
xaxt="n",
yaxt="n",
col=col_Gfos_A,
main="")
clip(0,round(max(hist_Ho_A,hist_Ho_B, na.rm=T),1), 0, yclip)
abline(v=median(hist_Ho_A[,i], na.rm=T),col=col_Gfos_A)
abline(v=median(hist_Ho_B[,i], na.rm=T),col=col_Gfos_B)
clip(0,round(max(hist_Ho_A,hist_Ho_B, na.rm=T),1), 0, ylim)
textbox(round(max(hist_Ho_A,hist_Ho_B, na.rm=T),1),30,paste0(measure2_short," = ",formatC(round(median(hist_Ho_A[,i], na.rm=T),median_digits),digits=median_digits, format="f")), txt.cex=median_cex, txt.col=col_Gfos_A, txt.adj=1, frm.col="white", frm.brd = "white", frm.siz = 0.2)
textbox(round(max(hist_Ho_A,hist_Ho_B, na.rm=T),1),25,paste0(measure2_short," = ",formatC(round(median(hist_Ho_B[,i], na.rm=T),median_digits),digits=median_digits, format="f")), txt.cex=median_cex, txt.col=col_Gfos_B, txt.adj=1, frm.col="white", frm.brd = "white", frm.siz = 0.2)
hist(hist_Ho_A[,i],
breaks=seq(0,ceiling(max(hist_Ho_A,hist_Ho_B, na.rm=T)/0.1)*0.1,0.05),
xlim=c(0,ceiling(max(hist_Ho_A,hist_Ho_B, na.rm=T)/0.1)*0.1),
ylim=c(0,ylim),
xaxt="n",
yaxt="n",
col=col_Gfos_A,
main="", add=T)
hist(hist_Ho_B[,i],
breaks=seq(0,ceiling(max(hist_Ho_A,hist_Ho_B, na.rm=T)/0.1)*0.1,0.05),
xlim=c(0,ceiling(max(hist_Ho_A,hist_Ho_B, na.rm=T)/0.1)*0.1),
ylim=c(0,ylim),
xaxt="n",
yaxt="n",
col=col_Gfos_B,
main="",
add=T)
if (i %in% c(1,2)){
mtext(lab[6], side=3, line=0.8, cex=1.3)
}
if (i %in% c(7,8)){
mtext(lab[7], side=3, line=0.8, cex=1.3)
}
if (i %in% c(13,14)){
mtext(lab[8], side=3, line=0.8, cex=1.3)
}
if (i %in% c(3)){
mtext("Frequencies [counts]", side=2, line=1, cex=1)
}
if (i %in% c(11,12)){
mtext(measure3, side=1, line=3, cex=1)
}
if (i %in% c(5:6,11:12,17:18)){
axis(1, at=seq(0,round(max(hist_Ho_A,hist_Ho_B, na.rm=T),1),0.1), labels=c("0.0","","0.2","","0.4","",""))
}
if (i %in% c(1,3,5)){
axis(2, at=seq(0,yclip,5), labels=F)
}
if (i %in% c(2,4,6)){
axis(2, at=seq(0,yclip,5))
}
if (i %in% c(13:18)){
axis(4, at=seq(0,yclip,5), labels=F)
}
}
if(main_5){
plot.new()
text(0.5,0.7,paste0(lab_Ho,": Distribution of ",measure3a),adj=c(0.5,0.5),cex=3)
}
for (i in 1:length(lab)){
plot.new()
if (i %in% c(1:2)){
text(0.5,1,lab[i],adj=c(0.5,1), cex=lab_cex[i])
}
if (i %in% c(3:5)){
text(0,0.5,lab[i],adj=c(0,0.5), cex=lab_cex[i])
}
}
par(mar=c(0,6,16,0.5), pty="s")
i <- ncol(Ar_modelled)
Ar_Mod_A <- Ar_modelled[,i][rownames(Ar_modelled)%in%modsite_GfosA]
Ar_Mod_B <- Ar_modelled[,i][rownames(Ar_modelled)%in%modsite_GfosB]
plot(Ar_Mod_A~meanAr_A_red_updist, type="n",
xlim=c(min(Ar_modelled[,-1], na.rm=T),max(Ar_modelled[,-1], na.rm=T)),
ylim=c(min(Ar_modelled[,-1], na.rm=T),max(Ar_modelled[,-1], na.rm=T)),
xaxt="n",
yaxt="n",
xlab="",
ylab="",
bty="n",
asp=1)
par(xpd=T)
legend(-5,yclip,c(label_A,label_B),pch = 16, col = c(col_Gfos_A,col_Gfos_B), bty="n", cex=2)
par(xpd=F)
dev.off()
##** FIG S13 ####
##** Orthogonal distance to 1:1 line
mp_dim <- multipanel.dimensions(main.col=pla$a,main.row=pla$b,pla$x,pla$y,sub1=pla$sub1,sub2=pla$sub2,main=pla$main)
if(pdf){
pdf(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS13.pdf"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]))
}else{
png(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS13.png"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]), units="in", res=600)
}
nf <- multipanel.layout(main.col=pla$a,main.row=pla$b,pla$x,pla$y,sub1=pla$sub1,sub2=pla$sub2,main=pla$main)
for (i in 2:ncol(Ho_modelled)){
Ho_Mod_A <- Ho_modelled[,i][rownames(Ho_modelled)%in%modsite_GfosA]
Ho_Mod_B <- Ho_modelled[,i][rownames(Ho_modelled)%in%modsite_GfosB]
par(mar=c(0,0,0,0))
plot(Ho_Mod_A~meanHo_A_red_updist, type="n",
xlim=c(min(Ho_modelled[,-1], na.rm=T),max(Ho_modelled[,-1], na.rm=T)),
ylim=c(min(Ho_modelled[,-1], na.rm=T),max(Ho_modelled[,-1], na.rm=T)),
xaxt=ifelse(i%in%c(6,7,12,13,18,19), "s", "n"),
yaxt=ifelse(i%in%c(3,5,7), "s", "n"),
asp=1) # create empty plot
if (i %in% c(14,16,18)){
axis(4, labels=F)
}
if (i %in% c(2,4,6)){
axis(2, labels=F)
}
if (i %in% c(2,3)){
mtext(lab[6], side=3, line=0.8, cex=1.3)
}
if (i %in% c(8,9)){
mtext(lab[7], side=3, line=0.8, cex=1.3)
}
if (i %in% c(14,15)){
mtext(lab[8], side=3, line=0.8, cex=1.3)
}
if (i %in% c(4)){
mtext(expression(bold("Model data:")*" Mean observed heterozygosity"), side=2, line=1, cex=1)
}
if (i %in% c(12,13)){
mtext(expression(bold("Empirical data:")*" Mean observed heterozygosity"), side=1, line=3, cex=1)
}
abline(0,1, lwd=1, lty=2) # add 1:1 line
points(Ho_Mod_A~meanHo_A_red_updist,col=col_Gfos_A, pch=16)
points(Ho_Mod_B~meanHo_B_updist,col=col_Gfos_B, pch=16)
for (j in 1:length(meanHo_A_red_updist)){
point <- cbind(meanHo_A_red_updist,Ho_Mod_A)[j,]
seg <- unlist(perp.segment.coord(point[1],point[2]))
segments(seg[1],seg[2],seg[3],seg[4], col=col_Gfos_A, lty=2, lwd=0.5)
}
for (k in 1:length(meanHo_B_updist)){
point <- cbind(meanHo_B_updist,Ho_Mod_B)[k,]
seg <- unlist(perp.segment.coord(point[1],point[2]))
segments(seg[1],seg[2],seg[3],seg[4], col=col_Gfos_B, lty=2, lwd=0.5)
}
textbox(max(Ho_modelled[,-1], na.rm=T),min(Ho_modelled[,-1], na.rm=T)+0.18,paste0(measure1_short," = ",formatC(round(sum_orthdist_Ho_A[[i-1]],sum_digits),digits=sum_digits, format="f")), txt.cex=sum_cex, txt.adj=1, txt.col=col_Gfos_A, frm.col=white_transparent, frm.brd = NA, frm.siz = 0.2)
textbox(max(Ho_modelled[,-1], na.rm=T),min(Ho_modelled[,-1], na.rm=T)+0.05,paste0(measure1_short," = ",formatC(round(sum_orthdist_Ho_B[[i-1]],sum_digits),digits=sum_digits, format="f")), txt.cex=sum_cex, txt.adj=1, txt.col=col_Gfos_B, frm.col=white_transparent, frm.brd = NA, frm.siz = 0.2)
}
for (i in 1:length(lab)){
plot.new()
if (i %in% c(1:2)){
text(0.5,1,lab[i],adj=c(0.5,1), cex=lab_cex[i])
}
if (i %in% c(3:5)){
text(0,0.5,lab[i],adj=c(0,0.5), cex=lab_cex[i])
}
}
plot.new()
legend("topleft",c(label_A,label_B),pch = 16, col = c(col_Gfos_A,col_Gfos_B), bty="n", cex=2)
text(0,0,measure1, adj=0)
dev.off()
##** TABLE S14 ####
SPOrank_Ho_A <- labs_comb[order(apply(hist_Ho_A,2,sum))]
SPOrank_Ho_B <- labs_comb[order(apply(hist_Ho_B,2,sum))]
SPOrank_spo_Ho_A <- apply(hist_Ho_A,2,sum)[order(apply(hist_Ho_A,2,sum))]
SPOrank_d_Ho_A <- labs_short[,3][order(apply(hist_Ho_A,2,sum))]
SPOrank_W_Ho_A <- labs_short[,2][order(apply(hist_Ho_A,2,sum))]
SPOrank_K_Ho_A <- labs_short[,1][order(apply(hist_Ho_A,2,sum))]
SPOrank_spo_Ho_B <- apply(hist_Ho_B,2,sum)[order(apply(hist_Ho_B,2,sum))]
SPOrank_d_Ho_B <- labs_short[,3][order(apply(hist_Ho_B,2,sum))]
SPOrank_W_Ho_B <- labs_short[,2][order(apply(hist_Ho_B,2,sum))]
SPOrank_K_Ho_B <- labs_short[,1][order(apply(hist_Ho_B,2,sum))]
SPOrank_Ho <- data.frame("Ho_A_SPO"=SPOrank_spo_Ho_A,"Ho_A_d"=SPOrank_d_Ho_A,"Ho_A_W"=SPOrank_W_Ho_A,"Ho_A_K"=SPOrank_K_Ho_A,
"Ho_B_SPO"=SPOrank_spo_Ho_B,"Ho_B_d"=SPOrank_d_Ho_B,"Ho_B_W"=SPOrank_W_Ho_B,"Ho_B_K"=SPOrank_K_Ho_B)
write.csv2(SPOrank_Ho, paste0(WD,"/Analysis_",output,"/SuppFigs/Table_S14.csv"))
##** FIG S15 ####
##** Histogram of orthogonal distance to 1:1 line
main_s15=F
mp_dim <- multipanel.dimensions(main.col=pla$a,main.row=pla$b,pla$x,pla$y,sub1=pla$sub1,sub2=pla$sub2,main=main_s15, h.main=0.5)
if(pdf){
pdf(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS15.pdf"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]))
}else{
png(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS15.png"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]), units="in", res=600)
}
nf <- multipanel.layout(main.col=pla$a,main.row=pla$b,pla$x,pla$y,sub1=pla$sub1,sub2=pla$sub2,main=main_s15, h.main=0.5)
for (i in 1:ncol(hist_Hs_A)){
par(mar=c(0,0,0,0))
yclip <- 28
ylim <- 35
hist(hist_Hs_A[,i],
breaks=seq(0,ceiling_dec(max(hist_Hs_A,hist_Hs_B, na.rm=T),1),0.05),
xlim=c(0,ceiling_dec(max(hist_Hs_A,hist_Hs_B, na.rm=T),1)),
ylim=c(0,ylim),
xaxt="n",
yaxt="n",
col=col_Gfos_A,
main="")
clip(0,ceiling_dec(max(hist_Hs_A,hist_Hs_B, na.rm=T),1), 0, yclip)
abline(v=median(hist_Hs_A[,i], na.rm=T),col=col_Gfos_A)
abline(v=median(hist_Hs_B[,i], na.rm=T),col=col_Gfos_B)
clip(0,ceiling_dec(max(hist_Hs_A,hist_Hs_B, na.rm=T),1), 0, ylim)
textbox(ceiling_dec(max(hist_Hs_A,hist_Hs_B, na.rm=T),1),30,paste0(measure2_short," = ",formatC(round(median(hist_Hs_A[,i], na.rm=T),median_digits),digits=median_digits, format="f")), txt.cex=median_cex, txt.col=col_Gfos_A, txt.adj=1, frm.col="white", frm.brd = "white", frm.siz = 0.2)
textbox(ceiling_dec(max(hist_Hs_A,hist_Hs_B, na.rm=T),1),25,paste0(measure2_short," = ",formatC(round(median(hist_Hs_B[,i], na.rm=T),median_digits),digits=median_digits, format="f")), txt.cex=median_cex, txt.col=col_Gfos_B, txt.adj=1, frm.col="white", frm.brd = "white", frm.siz = 0.2)
hist(hist_Hs_A[,i],
breaks=seq(0,ceiling_dec(max(hist_Hs_A,hist_Hs_B, na.rm=T),1),0.05),
xlim=c(0,ceiling_dec(max(hist_Hs_A,hist_Hs_B, na.rm=T),1)),
ylim=c(0,ylim),
xaxt="n",
yaxt="n",
col=col_Gfos_A,
main="", add=T)
hist(hist_Hs_B[,i],
breaks=seq(0,ceiling_dec(max(hist_Hs_A,hist_Hs_B, na.rm=T),1),0.05),
xlim=c(0,ceiling_dec(max(hist_Hs_A,hist_Hs_B, na.rm=T),1)),
ylim=c(0,ylim),
xaxt="n",
yaxt="n",
col=col_Gfos_B,
main="",
add=T)
if (i %in% c(1,2)){
mtext(lab[6], side=3, line=0.8, cex=1.3)
}
if (i %in% c(7,8)){
mtext(lab[7], side=3, line=0.8, cex=1.3)
}
if (i %in% c(13,14)){
mtext(lab[8], side=3, line=0.8, cex=1.3)
}
if (i %in% c(3)){
mtext("Frequencies [counts]", side=2, line=1, cex=1)
}
if (i %in% c(11,12)){
mtext(measure3, side=1, line=3, cex=1)
}
if (i %in% c(5:6,11:12,17:18)){
axis(1, at=seq(0,ceiling_dec(max(hist_Hs_A,hist_Hs_B, na.rm=T),1),0.1), labels=c("0.0","","0.2","","0.4","","")[1:length(seq(0,ceiling_dec(max(hist_Hs_A,hist_Hs_B, na.rm=T),1),0.1))])
}
if (i %in% c(1,3,5)){
axis(2, at=seq(0,yclip,5), labels=F)
}
if (i %in% c(2,4,6)){
axis(2, at=seq(0,yclip,5))
}
if (i %in% c(13:18)){
axis(4, at=seq(0,yclip,5), labels=F)
}
}
if(main_s15){
plot.new()
text(0.5,0.7,paste0(lab_Hs,": Distribution of ",measure3a),adj=c(0.5,0.5),cex=3)
}
for (i in 1:length(lab)){
plot.new()
if (i %in% c(1:2)){
text(0.5,1,lab[i],adj=c(0.5,1), cex=lab_cex[i])
}
if (i %in% c(3:5)){
text(0,0.5,lab[i],adj=c(0,0.5), cex=lab_cex[i])
}
}
par(mar=c(0,6,16,0.5), pty="s")
i <- ncol(Ar_modelled)
Ar_Mod_A <- Ar_modelled[,i][rownames(Ar_modelled)%in%modsite_GfosA]
Ar_Mod_B <- Ar_modelled[,i][rownames(Ar_modelled)%in%modsite_GfosB]
plot(Ar_Mod_A~meanAr_A_red_updist, type="n",
xlim=c(min(Ar_modelled[,-1], na.rm=T),max(Ar_modelled[,-1], na.rm=T)),
ylim=c(min(Ar_modelled[,-1], na.rm=T),max(Ar_modelled[,-1], na.rm=T)),
xaxt="n",
yaxt="n",
xlab="",
ylab="",
bty="n",
asp=1)
par(xpd=T)
legend(-5,yclip,c(label_A,label_B),pch = 16, col = c(col_Gfos_A,col_Gfos_B), bty="n", cex=2)
par(xpd=F)
dev.off()
##** FIG S16 ####
##** Orthogonal distance to 1:1 line
mp_dim <- multipanel.dimensions(main.col=pla$a,main.row=pla$b,pla$x,pla$y,sub1=pla$sub1,sub2=pla$sub2,main=pla$main)
if(pdf){
pdf(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS16.pdf"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]))
}else{
png(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS16.png"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]), units="in", res=600)
}
nf <- multipanel.layout(main.col=pla$a,main.row=pla$b,pla$x,pla$y,sub1=pla$sub1,sub2=pla$sub2,main=pla$main)
for (i in 2:ncol(Hs_modelled)){
Hs_Mod_A <- Hs_modelled[,i][rownames(Hs_modelled)%in%modsite_GfosA]
Hs_Mod_B <- Hs_modelled[,i][rownames(Hs_modelled)%in%modsite_GfosB]
par(mar=c(0,0,0,0))
plot(Hs_Mod_A~meanHs_A_red_updist, type="n",
xlim=c(min(Hs_modelled[,-1],meanHs_A_red_updist,meanHs_B_updist, na.rm=T),max(Hs_modelled[,-1],meanHs_A_red_updist,meanHs_B_updist, na.rm=T)),
ylim=c(min(Hs_modelled[,-1],meanHs_A_red_updist,meanHs_B_updist, na.rm=T),max(Hs_modelled[,-1],meanHs_A_red_updist,meanHs_B_updist, na.rm=T)),
xaxt=ifelse(i%in%c(6,7,12,13,18,19), "s", "n"),
yaxt=ifelse(i%in%c(3,5,7), "s", "n"),
asp=1) # create empty plot
if (i %in% c(14,16,18)){
axis(4, labels=F)
}
if (i %in% c(2,4,6)){
axis(2, labels=F)
}
if (i %in% c(2,3)){
mtext(lab[6], side=3, line=0.8, cex=1.3)
}
if (i %in% c(8,9)){
mtext(lab[7], side=3, line=0.8, cex=1.3)
}
if (i %in% c(14,15)){
mtext(lab[8], side=3, line=0.8, cex=1.3)
}
if (i %in% c(4)){
mtext(expression(bold("Model data:")*" Expected heterozygosity"), side=2, line=1, cex=1)
}
if (i %in% c(12,13)){
mtext(expression(bold("Empirical data:")*" Expected heterozygosity"), side=1, line=3, cex=1)
}
abline(0,1, lwd=1, lty=2) # add 1:1 line
points(Hs_Mod_A~meanHs_A_red_updist,col=col_Gfos_A, pch=16)
points(Hs_Mod_B~meanHs_B_updist,col=col_Gfos_B, pch=16)
for (j in 1:length(meanHs_A_red_updist)){
point <- cbind(meanHs_A_red_updist,Hs_Mod_A)[j,]
seg <- unlist(perp.segment.coord(point[1],point[2]))
segments(seg[1],seg[2],seg[3],seg[4], col=col_Gfos_A, lty=2, lwd=0.5)
}
for (k in 1:length(meanHs_B_updist)){
point <- cbind(meanHs_B_updist,Hs_Mod_B)[k,]
seg <- unlist(perp.segment.coord(point[1],point[2]))
segments(seg[1],seg[2],seg[3],seg[4], col=col_Gfos_B, lty=2, lwd=0.5)
}
textbox(max(Hs_modelled[,-1],meanHs_A_red_updist,meanHs_B_updist, na.rm=T),min(Hs_modelled[,-1],meanHs_A_red_updist,meanHs_B_updist, na.rm=T)+0.18,paste0(measure1_short," = ",formatC(round(sum_orthdist_Hs_A[[i-1]],sum_digits),digits=sum_digits, format="f")), txt.cex=sum_cex, txt.adj=1, txt.col=col_Gfos_A, frm.col=white_transparent, frm.brd = NA, frm.siz = 0.2)
textbox(max(Hs_modelled[,-1],meanHs_A_red_updist,meanHs_B_updist, na.rm=T),min(Hs_modelled[,-1],meanHs_A_red_updist,meanHs_B_updist, na.rm=T)+0.05,paste0(measure1_short," = ",formatC(round(sum_orthdist_Hs_B[[i-1]],sum_digits),digits=sum_digits, format="f")), txt.cex=sum_cex, txt.adj=1, txt.col=col_Gfos_B, frm.col=white_transparent, frm.brd = NA, frm.siz = 0.2)
}
for (i in 1:length(lab)){
plot.new()
if (i %in% c(1:2)){
text(0.5,1,lab[i],adj=c(0.5,1), cex=lab_cex[i])
}
if (i %in% c(3:5)){
text(0,0.5,lab[i],adj=c(0,0.5), cex=lab_cex[i])
}
}
plot.new()
legend("topleft",c(label_A,label_B),pch = 16, col = c(col_Gfos_A,col_Gfos_B), bty="n", cex=2)
text(0,0,measure1, adj=0)
dev.off()
##** TABLE S17 ####
SPOrank_Hs_A <- labs_comb[order(apply(hist_Hs_A,2,sum))]
SPOrank_Hs_B <- labs_comb[order(apply(hist_Hs_B,2,sum))]
SPOrank_spo_Hs_A <- apply(hist_Hs_A,2,sum)[order(apply(hist_Hs_A,2,sum))]
SPOrank_d_Hs_A <- labs_short[,3][order(apply(hist_Hs_A,2,sum))]
SPOrank_W_Hs_A <- labs_short[,2][order(apply(hist_Hs_A,2,sum))]
SPOrank_K_Hs_A <- labs_short[,1][order(apply(hist_Hs_A,2,sum))]
SPOrank_spo_Hs_B <- apply(hist_Hs_B,2,sum)[order(apply(hist_Hs_B,2,sum))]
SPOrank_d_Hs_B <- labs_short[,3][order(apply(hist_Hs_B,2,sum))]
SPOrank_W_Hs_B <- labs_short[,2][order(apply(hist_Hs_B,2,sum))]
SPOrank_K_Hs_B <- labs_short[,1][order(apply(hist_Hs_B,2,sum))]
SPOrank_Hs <- data.frame("He_A_SPO"=SPOrank_spo_Hs_A,"He_A_d"=SPOrank_d_Hs_A,"He_A_W"=SPOrank_W_Hs_A,"He_A_K"=SPOrank_K_Hs_A,
"He_B_SPO"=SPOrank_spo_Hs_B,"He_B_d"=SPOrank_d_Hs_B,"He_B_W"=SPOrank_W_Hs_B,"He_B_K"=SPOrank_K_Hs_B)
write.csv2(SPOrank_Hs, paste0(WD,"/Analysis_",output,"/SuppFigs/Table_S17.csv"))
##** TABLE S18 ####
MPOrank_Ar_A <- labs_comb[order(apply(hist_Ar_A,2,median))]
MPOrank_Ar_B <- labs_comb[order(apply(hist_Ar_B,2,median))]
MPOrank_mpo_Ar_A <- apply(hist_Ar_A,2,median)[order(apply(hist_Ar_A,2,median))]
MPOrank_d_Ar_A <- labs_short[,3][order(apply(hist_Ar_A,2,median))]
MPOrank_W_Ar_A <- labs_short[,2][order(apply(hist_Ar_A,2,median))]
MPOrank_K_Ar_A <- labs_short[,1][order(apply(hist_Ar_A,2,median))]
MPOrank_mpo_Ar_B <- apply(hist_Ar_B,2,median)[order(apply(hist_Ar_B,2,median))]
MPOrank_d_Ar_B <- labs_short[,3][order(apply(hist_Ar_B,2,median))]
MPOrank_W_Ar_B <- labs_short[,2][order(apply(hist_Ar_B,2,median))]
MPOrank_K_Ar_B <- labs_short[,1][order(apply(hist_Ar_B,2,median))]
MPOrank_Ar <- data.frame("Ar_A_MPO"=MPOrank_mpo_Ar_A,"Ar_A_d"=MPOrank_d_Ar_A,"Ar_A_W"=MPOrank_W_Ar_A,"Ar_A_K"=MPOrank_K_Ar_A,
"Ar_B_MPO"=MPOrank_mpo_Ar_B,"Ar_B_d"=MPOrank_d_Ar_B,"Ar_B_W"=MPOrank_W_Ar_B,"Ar_B_K"=MPOrank_K_Ar_B)
write.csv2(MPOrank_Ar, paste0(WD,"/Analysis_",output,"/SuppFigs/Table_S18.csv"))
##** TABLE S19 ####
MPOrank_Ho_A <- labs_comb[order(apply(hist_Ho_A,2,median))]
MPOrank_Ho_B <- labs_comb[order(apply(hist_Ho_B,2,median))]
MPOrank_mpo_Ho_A <- apply(hist_Ho_A,2,median)[order(apply(hist_Ho_A,2,median))]
MPOrank_d_Ho_A <- labs_short[,3][order(apply(hist_Ho_A,2,median))]
MPOrank_W_Ho_A <- labs_short[,2][order(apply(hist_Ho_A,2,median))]
MPOrank_K_Ho_A <- labs_short[,1][order(apply(hist_Ho_A,2,median))]
MPOrank_mpo_Ho_B <- apply(hist_Ho_B,2,median)[order(apply(hist_Ho_B,2,median))]
MPOrank_d_Ho_B <- labs_short[,3][order(apply(hist_Ho_B,2,median))]
MPOrank_W_Ho_B <- labs_short[,2][order(apply(hist_Ho_B,2,median))]
MPOrank_K_Ho_B <- labs_short[,1][order(apply(hist_Ho_B,2,median))]
MPOrank_Ho <- data.frame("Ho_A_MPO"=MPOrank_mpo_Ho_A,"Ho_A_d"=MPOrank_d_Ho_A,"Ho_A_W"=MPOrank_W_Ho_A,"Ho_A_K"=MPOrank_K_Ho_A,
"Ho_B_MPO"=MPOrank_mpo_Ho_B,"Ho_B_d"=MPOrank_d_Ho_B,"Ho_B_W"=MPOrank_W_Ho_B,"Ho_B_K"=MPOrank_K_Ho_B)
write.csv2(MPOrank_Ho, paste0(WD,"/Analysis_",output,"/SuppFigs/Table_S19.csv"))
##** TABLE S20 ####
MPOrank_Hs_A <- labs_comb[order(apply(hist_Hs_A,2,median))]
MPOrank_Hs_B <- labs_comb[order(apply(hist_Hs_B,2,median))]
MPOrank_mpo_Hs_A <- apply(hist_Hs_A,2,median)[order(apply(hist_Hs_A,2,median))]
MPOrank_d_Hs_A <- labs_short[,3][order(apply(hist_Hs_A,2,median))]
MPOrank_W_Hs_A <- labs_short[,2][order(apply(hist_Hs_A,2,median))]
MPOrank_K_Hs_A <- labs_short[,1][order(apply(hist_Hs_A,2,median))]
MPOrank_mpo_Hs_B <- apply(hist_Hs_B,2,median)[order(apply(hist_Hs_B,2,median))]
MPOrank_d_Hs_B <- labs_short[,3][order(apply(hist_Hs_B,2,median))]
MPOrank_W_Hs_B <- labs_short[,2][order(apply(hist_Hs_B,2,median))]
MPOrank_K_Hs_B <- labs_short[,1][order(apply(hist_Hs_B,2,median))]
MPOrank_Hs <- data.frame("He_A_MPO"=MPOrank_mpo_Hs_A,"He_A_d"=MPOrank_d_Hs_A,"He_A_W"=MPOrank_W_Hs_A,"He_A_K"=MPOrank_K_Hs_A,
"He_B_MPO"=MPOrank_mpo_Hs_B,"He_B_d"=MPOrank_d_Hs_B,"He_B_W"=MPOrank_W_Hs_B,"He_B_K"=MPOrank_K_Hs_B)
write.csv2(MPOrank_Hs, paste0(WD,"/Analysis_",output,"/SuppFigs/Table_S20.csv"))
##** FIG S21 ####
##** Histogram of directed perpendicular offset to 1:1 line
main_s21=F
mp_dim <- multipanel.dimensions(main.col=pla$a,main.row=pla$b,pla$x,pla$y,sub1=pla$sub1,sub2=pla$sub2,main=main_s21, h.main=0.5)
if(pdf){
pdf(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS21.pdf"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]))
}else{
png(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS21.png"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]), units="in", res=600)
}
nf <- multipanel.layout(main.col=pla$a,main.row=pla$b,pla$x,pla$y,sub1=pla$sub1,sub2=pla$sub2,main=main_s21, h.main=0.5)
for (i in 1:ncol(hist_Ar_A)){
par(mar=c(0,0,0,0))
yclip <- 28
ylim <- 35
hist(hist_Ar_A_directed[,i],
breaks=seq(floor(min(hist_Ar_A_directed,hist_Ar_B_directed, na.rm=T)),ceiling(max(hist_Ar_A_directed,hist_Ar_B_directed, na.rm=T))-0.5,0.5),
xlim=c(floor(min(hist_Ar_A_directed,hist_Ar_B_directed, na.rm=T)),ceiling(max(hist_Ar_A_directed,hist_Ar_B_directed, na.rm=T))),
ylim=c(0,ylim),
xaxt="n",
yaxt="n",
col=col_Gfos_A,
main="")
clip(floor(min(hist_Ar_A_directed,hist_Ar_B_directed, na.rm=T)),ceiling(max(hist_Ar_A_directed,hist_Ar_B_directed, na.rm=T)), 0, yclip)
abline(v=0, lwd=1.5, lty=2)
abline(v=median(hist_Ar_A_directed[,i], na.rm=T),col=col_Gfos_A)
abline(v=median(hist_Ar_B_directed[,i], na.rm=T),col=col_Gfos_B)
clip(floor(min(hist_Ar_A_directed,hist_Ar_B_directed, na.rm=T)),ceiling(max(hist_Ar_A_directed,hist_Ar_B_directed, na.rm=T)), 0, ylim)
textbox(ceiling(max(hist_Ar_A_directed,hist_Ar_B_directed, na.rm=T)),30,paste0(measure4," = ",formatC(round(median(hist_Ar_A_directed[,i], na.rm=T),median_digits),digits=median_digits, format="f")), txt.cex=median_cex, txt.col=col_Gfos_A, txt.adj=1, frm.col="white", frm.brd = "white", frm.siz = 0.2)
textbox(ceiling(max(hist_Ar_A_directed,hist_Ar_B_directed, na.rm=T)),25,paste0(measure4," = ",formatC(round(median(hist_Ar_B_directed[,i], na.rm=T),median_digits),digits=median_digits, format="f")), txt.cex=median_cex, txt.col=col_Gfos_B, txt.adj=1, frm.col="white", frm.brd = "white", frm.siz = 0.2)
hist(hist_Ar_A_directed[,i],
breaks=seq(floor(min(hist_Ar_A_directed,hist_Ar_B_directed, na.rm=T)),ceiling(max(hist_Ar_A_directed,hist_Ar_B_directed, na.rm=T))-0.5,0.5),
xlim=c(floor(min(hist_Ar_A_directed,hist_Ar_B_directed, na.rm=T)),ceiling(max(hist_Ar_A_directed,hist_Ar_B_directed, na.rm=T))),
ylim=c(0,ylim),
xaxt="n",
yaxt="n",
col=col_Gfos_A,
main="", add=T)
hist(hist_Ar_B_directed[,i],
breaks=seq(floor(min(hist_Ar_A_directed,hist_Ar_B_directed, na.rm=T)),ceiling(max(hist_Ar_A_directed,hist_Ar_B_directed, na.rm=T))-0.5,0.5),
xlim=c(floor(min(hist_Ar_A_directed,hist_Ar_B_directed, na.rm=T)),ceiling(max(hist_Ar_A_directed,hist_Ar_B_directed, na.rm=T))),
ylim=c(0,ylim),
xaxt="n",
yaxt="n",
col=col_Gfos_B,
main="",
add=T)
if (i %in% c(1,2)){
mtext(lab[6], side=3, line=0.8, cex=1.3)
}
if (i %in% c(7,8)){
mtext(lab[7], side=3, line=0.8, cex=1.3)
}
if (i %in% c(13,14)){
mtext(lab[8], side=3, line=0.8, cex=1.3)
}
if (i %in% c(3)){
mtext("Frequencies [counts]", side=2, line=1, cex=1)
}
if (i %in% c(11,12)){
mtext(measure3, side=1, line=3, cex=1)
}
if (i %in% c(5:6,11:12,17:18)){
axis(1)
}
if (i %in% c(1,3,5)){
axis(2, at=seq(0,yclip,5), labels=F)
}
if (i %in% c(2,4,6)){
axis(2, at=seq(0,yclip,5))
}
if (i %in% c(13:18)){
axis(4, at=seq(0,yclip,5), labels=F)
}
}
if(main_s21){
plot.new()
text(0.5,0.7,paste0(lab_Ar,": Distribution of directed ",measure3a),adj=c(0.5,0.5),cex=3)
}
for (i in 1:length(lab)){
plot.new()
if (i %in% c(1:2)){
text(0.5,1,lab[i],adj=c(0.5,1), cex=lab_cex[i])
}
if (i %in% c(3:5)){
text(0,0.5,lab[i],adj=c(0,0.5), cex=lab_cex[i])
}
}
par(mar=c(0,6,16,0.5), pty="s")
i <- ncol(Ar_modelled)
Ar_Mod_A <- Ar_modelled[,i][rownames(Ar_modelled)%in%modsite_GfosA]
Ar_Mod_B <- Ar_modelled[,i][rownames(Ar_modelled)%in%modsite_GfosB]
plot(Ar_Mod_A~meanAr_A_red_updist, type="n",
xlim=c(min(Ar_modelled[,-1], na.rm=T),max(Ar_modelled[,-1], na.rm=T)),
ylim=c(min(Ar_modelled[,-1], na.rm=T),max(Ar_modelled[,-1], na.rm=T)),
xaxt="n",
yaxt="n",
xlab="",
ylab="",
bty="n",
asp=1)
par(xpd=T)
legend(-5,yclip,c(label_A,label_B),pch = 16, col = c(col_Gfos_A,col_Gfos_B), bty="n", cex=2)
par(xpd=F)
dev.off()
##** FIG S22 ####
##** Histogram of directed perpendicular offset to 1:1 line
main_s22=F
mp_dim <- multipanel.dimensions(main.col=pla$a,main.row=pla$b,pla$x,pla$y,sub1=pla$sub1,sub2=pla$sub2,main=main_s22, h.main=0.5)
if(pdf){
pdf(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS22.pdf"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]))
}else{
png(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS22.png"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]), units="in", res=600)
}
nf <- multipanel.layout(main.col=pla$a,main.row=pla$b,pla$x,pla$y,sub1=pla$sub1,sub2=pla$sub2,main=main_s22, h.main=0.5)
for (i in 1:ncol(hist_Ho_A_directed)){
par(mar=c(0,0,0,0))
yclip <- 30
ylim <- 35
# xlim <- round(max(abs(min(hist_Ho_A_directed,hist_Ho_B_directed)),max(hist_Ho_A_directed,hist_Ho_B_directed)),1)
xlim <- ceiling(max(hist_Ho_A_directed,hist_Ho_B_directed, na.rm=T)/0.1)*0.1
hist(hist_Ho_A_directed[,i],
breaks=seq(-xlim,xlim,0.05),
xlim=c(-xlim,xlim),
ylim=c(0,ylim),
xaxt="n",
yaxt="n",
col=col_Gfos_A,
main="")
clip(-xlim,xlim, 0, yclip)
abline(v=0, lwd=1.5, lty=2)
abline(v=median(hist_Ho_A_directed[,i], na.rm=T),col=col_Gfos_A)
abline(v=median(hist_Ho_B_directed[,i], na.rm=T),col=col_Gfos_B)
clip(-xlim,xlim, 0, ylim)
textbox(xlim,30,paste0(measure4," = ",formatC(round(median(hist_Ho_A_directed[,i], na.rm=T),median_digits),digits=median_digits, format="f")), txt.cex=median_cex, txt.col=col_Gfos_A, txt.adj=1, frm.col="white", frm.brd = "white", frm.siz = 0.2)
textbox(xlim,25,paste0(measure4," = ",formatC(round(median(hist_Ho_B_directed[,i], na.rm=T),median_digits),digits=median_digits, format="f")), txt.cex=median_cex, txt.col=col_Gfos_B, txt.adj=1, frm.col="white", frm.brd = "white", frm.siz = 0.2)
hist(hist_Ho_A_directed[,i],
breaks=seq(-xlim,xlim,0.05),
xlim=c(-xlim,xlim),
ylim=c(0,ylim),
xaxt="n",
yaxt="n",
col=col_Gfos_A,
main="", add=T)
hist(hist_Ho_B_directed[,i],
breaks=seq(-xlim,xlim,0.05),
xlim=c(-xlim,xlim),
ylim=c(0,ylim),
xaxt="n",
yaxt="n",
col=col_Gfos_B,
main="",
add=T)
if (i %in% c(1,2)){
mtext(lab[6], side=3, line=0.8, cex=1.3)
}
if (i %in% c(7,8)){
mtext(lab[7], side=3, line=0.8, cex=1.3)
}
if (i %in% c(13,14)){
mtext(lab[8], side=3, line=0.8, cex=1.3)
}
if (i %in% c(3)){
mtext("Frequencies [counts]", side=2, line=1, cex=1)
}
if (i %in% c(11,12)){
mtext(measure3, side=1, line=3, cex=1)
}
if (i %in% c(5:6,11:12,17:18)){
axis(1, at=as.numeric(formatC(seq(-xlim+0.1,xlim-0.1,0.1),digits=1, format="f")), labels=T)
}
if (i %in% c(1,3,5)){
axis(2, at=seq(0,yclip,5), labels=F)
}
if (i %in% c(2,4,6)){
axis(2, at=seq(0,yclip,5))
}
if (i %in% c(13:18)){
axis(4, at=seq(0,yclip,5), labels=F)
}
}
if(main_s22){
plot.new()
text(0.5,0.7,paste0(lab_Ho,": Distribution of directed ",measure3a),adj=c(0.5,0.5),cex=3)
}
for (i in 1:length(lab)){
plot.new()
if (i %in% c(1:2)){
text(0.5,1,lab[i],adj=c(0.5,1), cex=lab_cex[i])
}
if (i %in% c(3:5)){
text(0,0.5,lab[i],adj=c(0,0.5), cex=lab_cex[i])
}
}
par(mar=c(0,6,16,0.5), pty="s")
i <- ncol(Ar_modelled)
Ar_Mod_A <- Ar_modelled[,i][rownames(Ar_modelled)%in%modsite_GfosA]
Ar_Mod_B <- Ar_modelled[,i][rownames(Ar_modelled)%in%modsite_GfosB]
plot(Ar_Mod_A~meanAr_A_red_updist, type="n",
xlim=c(min(Ar_modelled[,-1], na.rm=T),max(Ar_modelled[,-1], na.rm=T)),
ylim=c(min(Ar_modelled[,-1], na.rm=T),max(Ar_modelled[,-1], na.rm=T)),
xaxt="n",
yaxt="n",
xlab="",
ylab="",
bty="n",
asp=1)
par(xpd=T)
legend(-5,yclip,c(label_A,label_B),pch = 16, col = c(col_Gfos_A,col_Gfos_B), bty="n", cex=2)
par(xpd=F)
dev.off()
##** FIG S23 ####
##** Histogram of directed perpendicular offset to 1:1 line
main_s23=F
mp_dim <- multipanel.dimensions(main.col=pla$a,main.row=pla$b,pla$x,pla$y,sub1=pla$sub1,sub2=pla$sub2,main=main_s23, h.main=0.5)
if(pdf){
pdf(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS23.pdf"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]))
}else{
png(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS23.png"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]), units="in", res=600)
}
nf <- multipanel.layout(main.col=pla$a,main.row=pla$b,pla$x,pla$y,sub1=pla$sub1,sub2=pla$sub2,main=main_s23, h.main=0.5)
for (i in 1:ncol(hist_Hs_A_directed)){
par(mar=c(0,0,0,0))
yclip <- 30
ylim <- 35
# xlim <- round(max(abs(min(hist_Hs_A_directed,hist_Hs_B_directed)),max(hist_Hs_A_directed,hist_Hs_B_directed)),1)
xlim <- ceiling(max(hist_Hs_A_directed,hist_Hs_B_directed, na.rm=T)/0.1)*0.1
hist(hist_Hs_A_directed[,i],
breaks=seq(-xlim,xlim,0.05),
xlim=c(-xlim,xlim),
ylim=c(0,ylim),
xaxt="n",
yaxt="n",
col=col_Gfos_A,
main="")
clip(-xlim,xlim, 0, yclip)
abline(v=0, lwd=1.5, lty=2)
abline(v=median(hist_Hs_A_directed[,i], na.rm=T),col=col_Gfos_A)
abline(v=median(hist_Hs_B_directed[,i], na.rm=T),col=col_Gfos_B)
clip(-xlim,xlim, 0, ylim)
textbox(xlim,30,paste0(measure4," = ",formatC(round(median(hist_Hs_A_directed[,i], na.rm=T),median_digits),digits=median_digits, format="f")), txt.cex=median_cex, txt.col=col_Gfos_A, txt.adj=1, frm.col="white", frm.brd = "white", frm.siz = 0.2)
textbox(xlim,25,paste0(measure4," = ",formatC(round(median(hist_Hs_B_directed[,i], na.rm=T),median_digits),digits=median_digits, format="f")), txt.cex=median_cex, txt.col=col_Gfos_B, txt.adj=1, frm.col="white", frm.brd = "white", frm.siz = 0.2)
hist(hist_Hs_A_directed[,i],
breaks=seq(-xlim,xlim,0.05),
xlim=c(-xlim,xlim),
ylim=c(0,ylim),
xaxt="n",
yaxt="n",
col=col_Gfos_A,
main="", add=T)
hist(hist_Hs_B_directed[,i],
breaks=seq(-xlim,xlim,0.05),
xlim=c(-xlim,xlim),
ylim=c(0,ylim),
xaxt="n",
yaxt="n",
col=col_Gfos_B,
main="",
add=T)
if (i %in% c(1,2)){
mtext(lab[6], side=3, line=0.8, cex=1.3)
}
if (i %in% c(7,8)){
mtext(lab[7], side=3, line=0.8, cex=1.3)
}
if (i %in% c(13,14)){
mtext(lab[8], side=3, line=0.8, cex=1.3)
}
if (i %in% c(3)){
mtext("Frequencies [counts]", side=2, line=1, cex=1)
}
if (i %in% c(11,12)){
mtext(measure3, side=1, line=3, cex=1)
}
if (i %in% c(5:6,11:12,17:18)){
axis(1, at=as.numeric(formatC(seq(-xlim+0.1,xlim-0.1,0.1),digits=1, format="f")), labels=T)
}
if (i %in% c(1,3,5)){
axis(2, at=seq(0,yclip,5), labels=F)
}
if (i %in% c(2,4,6)){
axis(2, at=seq(0,yclip,5))
}
if (i %in% c(13:18)){
axis(4, at=seq(0,yclip,5), labels=F)
}
}
if(main_s23){
plot.new()
text(0.5,0.7,paste0(lab_Hs,": Distribution of directed ",measure3a),adj=c(0.5,0.5),cex=3)
}
for (i in 1:length(lab)){
plot.new()
if (i %in% c(1:2)){
text(0.5,1,lab[i],adj=c(0.5,1), cex=lab_cex[i])
}
if (i %in% c(3:5)){
text(0,0.5,lab[i],adj=c(0,0.5), cex=lab_cex[i])
}
}
par(mar=c(0,6,16,0.5), pty="s")
i <- ncol(Ar_modelled)
Ar_Mod_A <- Ar_modelled[,i][rownames(Ar_modelled)%in%modsite_GfosA]
Ar_Mod_B <- Ar_modelled[,i][rownames(Ar_modelled)%in%modsite_GfosB]
plot(Ar_Mod_A~meanAr_A_red_updist, type="n",
xlim=c(min(Ar_modelled[,-1], na.rm=T),max(Ar_modelled[,-1], na.rm=T)),
ylim=c(min(Ar_modelled[,-1], na.rm=T),max(Ar_modelled[,-1], na.rm=T)),
xaxt="n",
yaxt="n",
xlab="",
ylab="",
bty="n",
asp=1)
par(xpd=T)
legend(-5,yclip,c(label_A,label_B),pch = 16, col = c(col_Gfos_A,col_Gfos_B), bty="n", cex=2)
par(xpd=F)
dev.off()
if(fst){
##** FIG S24 ####
# Fst perpendicular offset
##*** Preparing matrices####
orthdist_Fst_A <- vector()
orthdist_Fst_A_directed <- vector()
sum_orthdist_Fst_A <- vector()
hist_Fst_A <- matrix(nrow=length(fst_A_red), ncol=ncol(Ar_modelled)-1)
hist_Fst_A_directed <- matrix(nrow=length(fst_A_red), ncol=ncol(Ar_modelled)-1)
orthdist_Fst_B <- vector()
orthdist_Fst_B_directed <- vector()
sum_orthdist_Fst_B <- vector()
hist_Fst_B <- matrix(nrow=length(fst_B), ncol=ncol(Ar_modelled)-1)
hist_Fst_B_directed <- matrix(nrow=length(fst_B), ncol=ncol(Ar_modelled)-1)
# dir.create(paste0(WD,"/Analysis_",output,"/SuppFigs/Fst"), showWarnings=F)
r <- 0
for (d in 1:length(D)){ # looping over dispersal rates
for (w in 1:length(W)){ # looping over dispersal directionalities
for (k in 1:length(K)){ # looping over carrying capacities
r <- r+1
if(existing_data){
load(paste0(DF,"/02_Data_prep/Fst_data/FstData_",D[d],"_",W[w],"_",K[k],".Rdata"))
}else{
load(paste0(DF,"/02_Data_prep/",prep_folder,"/IndPopGenData_",D[d],"_",W[w],"_",K[k],".Rdata"))
}
# Distance matrix to vector
MEANFST_Mod <- meanFst_Mod
rownames(MEANFST_Mod) <- modsite
colnames(MEANFST_Mod) <- modsite
fst_match_B <- match(modsite[modsite%in%microsite_B],microsite_B)
MEANFST_Mod_B <- MEANFST_Mod[fst_match_B,fst_match_B]
meanFst_Mod_B <- MEANFST_Mod_B[upper.tri(MEANFST_Mod_B)]
fst_match_A_red <- match(modsite[modsite%in%microsite_A],microsite_A_red)
MEANFST_Mod_A_red <- MEANFST_Mod[fst_match_A_red,fst_match_A_red]
meanFst_Mod_A_red <- MEANFST_Mod_A_red[upper.tri(MEANFST_Mod_A_red)]
seg <- matrix(nrow=length(fst_A_red),ncol=4)
point <- cbind(fst_A_red,meanFst_Mod_A_red)
for (j in 1:nrow(point)){
seg[j,] <- unlist(perp.segment.coord(point[j,1],point[j,2]))
orthdist_Fst_A[j] <- euc.dist(c(seg[j,1],seg[j,2]),c(seg[j,3],seg[j,4]))
orthdist_Fst_A_directed[j] <- sign(seg[j,2]-seg[j,4])*orthdist_Fst_A[j]
}
seg <- matrix(nrow=length(fst_B),ncol=4)
point <- cbind(fst_B,meanFst_Mod_B)
for (j in 1:nrow(point)){
seg[j,] <- unlist(perp.segment.coord(point[j,1],point[j,2]))
orthdist_Fst_B[j] <- euc.dist(c(seg[j,1],seg[j,2]),c(seg[j,3],seg[j,4]))
orthdist_Fst_B_directed[j] <- sign(seg[j,2]-seg[j,4])*orthdist_Fst_B[j]
}
sum_orthdist_Fst_A[[r]] <- sum(orthdist_Fst_A)
sum_orthdist_Fst_B[[r]] <- sum(orthdist_Fst_B)
hist_Fst_A[,r] <- orthdist_Fst_A
hist_Fst_B[,r] <- orthdist_Fst_B
hist_Fst_A_directed[,r] <- orthdist_Fst_A_directed
hist_Fst_B_directed[,r] <- orthdist_Fst_B_directed
label_Mod_short <- paste0("D",D_label[d],"_W",W_label[w],"_K",K_label[k])
# if(pdf){
# pdf(paste0(WD,"/Analysis_",output,"/SuppFigs/Fst/Fig_FstMod_",label_Mod_short,".pdf"), width=6, height=6)
# }else{
# png(paste0(WD,"/Analysis_",output,"/SuppFigs/Fst/Fig_FstMod_",label_Mod_short,".png"), width=6, height=6, units="in", res=300)
# }
plot(fst_A_red,meanFst_Mod_A_red,
xlim=c(min(fst_A_red,meanFst_Mod_A_red,fst_B,meanFst_Mod_B, na.rm=T),
max(fst_A_red,meanFst_Mod_A_red,fst_B,meanFst_Mod_B, na.rm=T)),
ylim=c(min(fst_A_red,meanFst_Mod_A_red,fst_B,meanFst_Mod_B, na.rm=T),
max(fst_A_red,meanFst_Mod_A_red,fst_B,meanFst_Mod_B, na.rm=T)),
col=col_Gfos_A, asp=1)
points(fst_B,meanFst_Mod_B, col=col_Gfos_B)
abline(0,1,col="red")
mtext(label_Mod_short)
# dev.off()
} # end looping over carrying capacities
} # end looping over dispersal directionalities
} # end looping over dispersal rates
names(sum_orthdist_Fst_A) <- colnames(Ar_modelled)[-1]
names(sum_orthdist_Fst_B) <- colnames(Ar_modelled)[-1]
#### Spatial distance between populations in simulations
mod_vertices <- match(modsite,V(net)$name)
DIST_Mod <- distances(net, v=V(net)[mod_vertices], to=V(net)[mod_vertices], weights=E(net))
#### Distance matrix to vector
dist_Mod <- DIST_Mod[upper.tri(DIST_Mod)]
mp_dim <- multipanel.dimensions(main.col=pla$a,main.row=pla$b,pla$x,pla$y,sub1=pla$sub1,sub2=pla$sub2,main=pla$main)
if(pdf){
pdf(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS24.pdf"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]))
}else{
png(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS24.png"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]), units="in", res=600)
}
nf <- multipanel.layout(main.col=pla$a,main.row=pla$b,pla$x,pla$y,sub1=pla$sub1,sub2=pla$sub2,main=pla$main)
r <- 0
for (d in 1:length(D)){ # looping over dispersal rates
for (w in 1:length(W)){ # looping over dispersal directionalities
for (k in 1:length(K)){ # looping over carrying capacities
r <- r+1
if(existing_data){
load(paste0(DF,"/02_Data_prep/Fst_data/FstData_",D[d],"_",W[w],"_",K[k],".Rdata"))
}else{
load(paste0(DF,"/02_Data_prep/",prep_folder,"/IndPopGenData_",D[d],"_",W[w],"_",K[k],".Rdata"))
}
# Distance matrix to vector
MEANFST_Mod <- meanFst_Mod
meanFst_Mod <- meanFst_Mod[upper.tri(meanFst_Mod)]
# DISTDATA table construction
DISTDATA_Mod <- cbind(meanFst_Mod,dist_Mod)
colnames(DISTDATA_Mod) <- c("fst","dist")
DISTDATA_Mod <- data.frame(DISTDATA_Mod)
# Order DISTDATA according to dist
DISTDATA_Mod <- DISTDATA_Mod[ order(DISTDATA_Mod$dist), ]
# Remove NAs
DISTDATA_Mod <- DISTDATA_Mod[which(!is.na(DISTDATA_Mod$fst)),]
# Prepare DISTDATA with non-zero Fst values
DISTDATA_Mod$nonneg_fst <- DISTDATA_Mod$fst
DISTDATA_Mod$nonneg_fst[which(DISTDATA_Mod$fst<0)] <- 0
# Prepare DISTDATA with log-transformed dist values
DISTDATA_Mod$log_dist <- log(DISTDATA_Mod$dist)
#### Combined GLM of genetic differentiation by instream distance * species (power term)
power <- seq(0,1,0.01)
AICpower <- c()
for (i in 1:length(power)){
pow.mod.Fst <- lm(nonneg_fst ~ I(dist^power[i]), DISTDATA_Mod, na.action = "na.fail")
AICpower[i] <- AIC(pow.mod.Fst)
}
model <- lm.bind(nonneg_fst ~ I(dist^power[which.min(AICpower)]), DISTDATA_Mod, "fst_power", critval, step=T)
DISTDATA_Mod <- model[[1]]
lm_fst_power <- model[[2]]
slm_fst_power <- model[[3]]
DISTDATA_Mod$spec <- "Mod"
DISTDATA_Mod <- DISTDATA_Mod[,-which(colnames(DISTDATA_Mod)=="log_dist")]
DISTDATA_temp <- DISTDATA[,match(colnames(DISTDATA_Mod),colnames(DISTDATA))]
DISTDATA_temp <- rbind(DISTDATA_Mod,DISTDATA_temp)
label_Mod <- paste0("D=",D_label[d],", W_up=",W_label[w],", K=",K_label[k])
label_Mod_short <- paste0("D",D_label[d],"_W",W_label[w],"_K",K_label[k])
par(mar=c(0,0,0,0))
x <- "fst"
y <- "dist"
dat=DISTDATA_temp
model="slm_fst_power"
CI_border = F
xlabel="Instream distance [km]"
ylabel=expression('Genetic diff. [Pairwise Nei F'[ST]*']')
xax="n"
yax="n"
bty="o"
yrange=c(0,1)
pointtrans = T
trans=0.2
trans_mod=0.1
xrev=F
axislog=""
pt.cex=0.5
lwd=1
cex.lab=2
cex.axis=1.5
legend=F
cex.legend=1.5
main=F
col1=col_Gfos_A
col2=col_Gfos_B
col3=colMod
col1trans <- rgb(col2rgb(col1)[1,]/255,col2rgb(col1)[2,]/255,col2rgb(col1)[3,]/255,trans)
col2trans <- rgb(col2rgb(col2)[1,]/255,col2rgb(col2)[2,]/255,col2rgb(col2)[3,]/255,trans)
col3trans <- rgb(col2rgb(col3)[1,]/255,col2rgb(col3)[2,]/255,col2rgb(col3)[3,]/255,trans_mod)
form <- reformulate(y, response = x)
formfit <- reformulate(y, response = paste0(model,"_fit"))
formupr <- reformulate(y, response = paste0(model,"_upr"))
formlwr <- reformulate(y, response = paste0(model,"_lwr"))
xcol <- which(colnames(dat)==x)
ycol <- which(colnames(dat)==y)
lwrcol <- which(colnames(dat)==paste0(model,"_lwr"))
uprcol <- which(colnames(dat)==paste0(model,"_upr"))
DATAordered <- dat[order(dat[,ycol]),]
left <- min(DATAordered[,ycol], na.rm=T)
right <- max(DATAordered[,ycol], na.rm=T)
if (xrev==T){
xrange <- c(right,left)
}else{
xrange <- c(left,right)
}
if (pointtrans==T){
col1point <- col1trans
col2point <- col2trans
col3point <- col3trans
}else{
col1point <- col1
col2point <- col2
col3point <- col3
}
plot(form, dat, type = "n", las = 1, bty = bty,
xlab=xlabel,
ylab=ylabel,
xlim=xrange,
ylim=yrange,
log=axislog,
xaxt=xax, yaxt=yax, cex.lab=cex.lab, cex.axis=cex.axis)
polygon(c(rev(DATAordered[,ycol][DATAordered$spec=="A"]), DATAordered[,ycol][DATAordered$spec=="A"]),
c(rev(DATAordered[,lwrcol][DATAordered$spec=="A"]), DATAordered[,uprcol][DATAordered$spec=="A"]),
col = col1trans, border = NA)
polygon(c(rev(DATAordered[,ycol][DATAordered$spec=="B"]), DATAordered[,ycol][DATAordered$spec=="B"]),
c(rev(DATAordered[,lwrcol][DATAordered$spec=="B"]), DATAordered[,uprcol][DATAordered$spec=="B"]),
col = col2trans, border = NA)
polygon(c(rev(DATAordered[,ycol][DATAordered$spec=="Mod"]), DATAordered[,ycol][DATAordered$spec=="Mod"]),
c(rev(DATAordered[,lwrcol][DATAordered$spec=="Mod"]), DATAordered[,uprcol][DATAordered$spec=="Mod"]),
col = col3trans, border = NA)
points(form, data = subset(DATAordered, spec == "A"), pch = 16, col = col1point, cex=pt.cex)
points(form, data = subset(DATAordered, spec == "B"), pch = 16, col = col2point, cex=pt.cex)
points(form, data = subset(DATAordered, spec == "Mod"), pch = 16, col = col3point, cex=pt.cex)
lines(formfit, data = subset(DATAordered, spec == "A"), lwd = lwd, col=col1)
if(CI_border){lines(formupr, data = subset(DATAordered, spec == "A"), lwd = 2, lty=2, col=col1)}
if(CI_border){lines(formlwr, data = subset(DATAordered, spec == "A"), lwd = 2, lty=2, col=col1)}
lines(formfit, data = subset(DATAordered, spec == "B"), lwd = lwd, col=col2)
if(CI_border){lines(formupr, data = subset(DATAordered, spec == "B"), lwd = 2, lty=2, col=col2)}
if(CI_border){lines(formlwr, data = subset(DATAordered, spec == "B"), lwd = 2, lty=2, col=col2)}
lines(formfit, data = subset(DATAordered, spec == "Mod"), lwd = lwd, col=col3)
if(CI_border){lines(formupr, data = subset(DATAordered, spec == "Mod"), lwd = 2, lty=2, col=col3)}
if(CI_border){lines(formlwr, data = subset(DATAordered, spec == "Mod"), lwd = 2, lty=2, col=col3)}
if (r%in%c(2,4,6)){
axis(2, cex.axis=1.5)
}
if (r%in%c(5,6,11,12,17,18)){
axis(1, c(0,50,100,150,200,250), at=c(0,50000,100000,150000,200000,250000), cex.axis=1.5)
}
if (r %in% c(13,15,17)){
axis(4, labels=F)
}
if (r %in% c(1,3,5)){
axis(2, labels=F)
}
if (r %in% c(1,2)){
mtext(lab[6], side=3, line=0.8, cex=1.4)
}
if (r %in% c(7,8)){
mtext(lab[7], side=3, line=0.8, cex=1.4)
}
if (r %in% c(13,14)){
mtext(lab[8], side=3, line=0.8, cex=1.4)
}
if (r %in% c(3)){
mtext(ylabel, side=2, line=1, cex=1)
}
if (r %in% c(11,12)){
mtext(xlabel, side=1, line=3, cex=1)
}
} # end looping over carrying capacities
} # end looping over dispersal directionalities
} # end looping over dispersal rates
for (i in 1:length(lab)){
plot.new()
if (i %in% c(1:2)){
text(0.5,1,lab[i],adj=c(0.5,1), cex=lab_cex[i])
}
if (i %in% c(3:5)){
text(0,0.5,lab[i],adj=c(0,0.5), cex=lab_cex[i])
}
}
plot.new()
legend("topleft",c(label_A,label_B, label_mod),pch = 16, col = c(col_Gfos_A,col_Gfos_B, colMod), bty="n", cex=2)
dev.off()
##** FIG S25 ####
##** Histogram of directed perpendicular offset to 1:1 line
main_s25=F
median_cex <- 0.7
mp_dim <- multipanel.dimensions(main.col=pla$a,main.row=pla$b,pla$x,pla$y,sub1=pla$sub1,sub2=pla$sub2,main=main_s25, h.main=0.5)
if(pdf){
pdf(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS25.pdf"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]))
}else{
png(paste0(WD,"/Analysis_",output,"/SuppFigs/FigS25.png"), width=fig.width, height=fig.width*(mp_dim[2]/mp_dim[1]), units="in", res=600)
}
nf <- multipanel.layout(main.col=pla$a,main.row=pla$b,pla$x,pla$y,sub1=pla$sub1,sub2=pla$sub2,main=main_s25, h.main=0.5)
# min(abs(apply(hist_Fst_A_directed, 2, median)))
for (i in 1:ncol(hist_Fst_A)){
par(mar=c(0,0,0,0))
yclip <- 450
ylim <- 500
hist(hist_Fst_A_directed[,i],
breaks=seq(floor(min(hist_Fst_A_directed,hist_Fst_B_directed, na.rm=T)),ceiling(max(hist_Fst_A_directed,hist_Fst_B_directed, na.rm=T)),0.05),
xlim=c(floor(min(hist_Fst_A_directed,hist_Fst_B_directed, na.rm=T)),ceiling(max(hist_Fst_A_directed,hist_Fst_B_directed, na.rm=T))),
ylim=c(0,ylim),
xaxt="n",
yaxt="n",
col=col_Gfos_A,
main="")
clip(floor(min(hist_Fst_A_directed,hist_Fst_B_directed, na.rm=T)),ceiling(max(hist_Fst_A_directed,hist_Fst_B_directed, na.rm=T)), 0, yclip)
abline(v=0, lwd=1.5, lty=2)
abline(v=median(hist_Fst_A_directed[,i]),col=col_Gfos_A)
abline(v=median(hist_Fst_B_directed[,i]),col=col_Gfos_B)
clip(floor(min(hist_Fst_A_directed,hist_Fst_B_directed, na.rm=T)),ceiling(max(hist_Fst_A_directed,hist_Fst_B_directed, na.rm=T)), 0, ylim)
# textbox(ceiling(max(hist_Fst_A_directed,hist_Fst_B_directed, na.rm=T)),150,paste0(measure4," = ",formatC(round(median(hist_Fst_A_directed[,i]),median_digits),digits=median_digits, format="f")), txt.cex=median_cex, txt.col=col_Gfos_A, txt.adj=1, frm.col="white", frm.brd = "white", frm.siz = 0.2)
# textbox(ceiling(max(hist_Fst_A_directed,hist_Fst_B_directed, na.rm=T)),50,paste0(measure4," = ",formatC(round(median(hist_Fst_B_directed[,i]),median_digits),digits=median_digits, format="f")), txt.cex=median_cex, txt.col=col_Gfos_B, txt.adj=1, frm.col="white", frm.brd = "white", frm.siz = 0.2)
hist(hist_Fst_A_directed[,i],
breaks=seq(floor(min(hist_Fst_A_directed,hist_Fst_B_directed, na.rm=T)),ceiling(max(hist_Fst_A_directed,hist_Fst_B_directed, na.rm=T)),0.05),
xlim=c(floor(min(hist_Fst_A_directed,hist_Fst_B_directed, na.rm=T)),ceiling(max(hist_Fst_A_directed,hist_Fst_B_directed, na.rm=T))),
ylim=c(0,ylim),
xaxt="n",
yaxt="n",
col=col_Gfos_A,
main="", add=T)
hist(hist_Fst_B_directed[,i],
breaks=seq(floor(min(hist_Fst_A_directed,hist_Fst_B_directed, na.rm=T)),ceiling(max(hist_Fst_A_directed,hist_Fst_B_directed, na.rm=T)),0.05),
xlim=c(floor(min(hist_Fst_A_directed,hist_Fst_B_directed, na.rm=T)),ceiling(max(hist_Fst_A_directed,hist_Fst_B_directed, na.rm=T))),
ylim=c(0,ylim),
xaxt="n",
yaxt="n",
col=col_Gfos_B,
main="",
add=T)
textbox(ceiling(max(hist_Fst_A_directed,hist_Fst_B_directed, na.rm=T)),yclip,paste0(measure4," = ",formatC(round(median(hist_Fst_A_directed[,i]),median_digits),digits=median_digits, format="f")), txt.cex=median_cex, txt.col=col_Gfos_A, txt.adj=1, frm.col="white", frm.brd = "white", frm.siz = 0.2)
textbox(ceiling(max(hist_Fst_A_directed,hist_Fst_B_directed, na.rm=T)),yclip-50,paste0(measure4," = ",formatC(round(median(hist_Fst_B_directed[,i]),median_digits),digits=median_digits, format="f")), txt.cex=median_cex, txt.col=col_Gfos_B, txt.adj=1, frm.col="white", frm.brd = "white", frm.siz = 0.2)
if (i %in% c(1,2)){
mtext(lab[6], side=3, line=0.8, cex=1.3)
}
if (i %in% c(7,8)){
mtext(lab[7], side=3, line=0.8, cex=1.3)
}
if (i %in% c(13,14)){
mtext(lab[8], side=3, line=0.8, cex=1.3)
}
if (i %in% c(3)){
mtext("Frequencies [counts]", side=2, line=1, cex=1)
}
if (i %in% c(11,12)){
mtext(measure3, side=1, line=3, cex=1)
}
if (i %in% c(5:6,11:12,17:18)){
axis(1)
}
'%notin%' <- Negate('%in%')
if (i %notin% c(5:6,11:12,17:18)){
axis(1, labels = F)
}
if (i %in% c(1,3,5)){
axis(2, at=seq(0,yclip,200), labels=F)
}
if (i %in% c(2,4,6)){
axis(2, at=seq(0,yclip,200))
}
if (i %in% c(13:18)){
axis(4, at=seq(0,yclip,200), labels=F)
}
}
if(main_s25){
plot.new()
text(0.5,0.7,paste0(lab_Ar,": Distribution of directed ",measure3a),adj=c(0.5,0.5),cex=3)
}
for (i in 1:length(lab)){
plot.new()
if (i %in% c(1:2)){
text(0.5,1,lab[i],adj=c(0.5,1), cex=lab_cex[i])
}
if (i %in% c(3:5)){
text(0,0.5,lab[i],adj=c(0,0.5), cex=lab_cex[i])
}
}
plot.new()
legend("topleft",c(label_A,label_B),pch = 16, col = c(col_Gfos_A,col_Gfos_B), bty="n", cex=2)
dev.off()
##** TABLE S26 ####
SPOrank_Fst_A <- labs_comb[order(apply(hist_Fst_A,2,sum))]
SPOrank_Fst_B <- labs_comb[order(apply(hist_Fst_B,2,sum))]
SPOrank_spo_Fst_A <- apply(hist_Fst_A,2,sum)[order(apply(hist_Fst_A,2,sum))]
SPOrank_d_Fst_A <- labs_short[,3][order(apply(hist_Fst_A,2,sum))]
SPOrank_W_Fst_A <- labs_short[,2][order(apply(hist_Fst_A,2,sum))]
SPOrank_K_Fst_A <- labs_short[,1][order(apply(hist_Fst_A,2,sum))]
SPOrank_spo_Fst_B <- apply(hist_Fst_B,2,sum)[order(apply(hist_Fst_B,2,sum))]
SPOrank_d_Fst_B <- labs_short[,3][order(apply(hist_Fst_B,2,sum))]
SPOrank_W_Fst_B <- labs_short[,2][order(apply(hist_Fst_B,2,sum))]
SPOrank_K_Fst_B <- labs_short[,1][order(apply(hist_Fst_B,2,sum))]
SPOrank_Fst <- data.frame("Fst_A_SPO"=SPOrank_spo_Fst_A,"Fst_A_d"=SPOrank_d_Fst_A,"Fst_A_W"=SPOrank_W_Fst_A,"Fst_A_K"=SPOrank_K_Fst_A,
"Fst_B_SPO"=SPOrank_spo_Fst_B,"Fst_B_d"=SPOrank_d_Fst_B,"Fst_B_W"=SPOrank_W_Fst_B,"Fst_B_K"=SPOrank_K_Fst_B)
write.csv2(SPOrank_Fst, paste0(WD,"/Analysis_",output,"/SuppFigs/Table_S26.csv"))
##** TABLE S27 ####
MPOrank_Fst_A <- labs_comb[order(apply(hist_Fst_A,2,median))]
MPOrank_Fst_B <- labs_comb[order(apply(hist_Fst_B,2,median))]
MPOrank_mpo_Fst_A <- apply(hist_Fst_A,2,median)[order(apply(hist_Fst_A,2,median))]
MPOrank_d_Fst_A <- labs_short[,3][order(apply(hist_Fst_A,2,median))]
MPOrank_W_Fst_A <- labs_short[,2][order(apply(hist_Fst_A,2,median))]
MPOrank_K_Fst_A <- labs_short[,1][order(apply(hist_Fst_A,2,median))]
MPOrank_mpo_Fst_B <- apply(hist_Fst_B,2,median)[order(apply(hist_Fst_B,2,median))]
MPOrank_d_Fst_B <- labs_short[,3][order(apply(hist_Fst_B,2,median))]
MPOrank_W_Fst_B <- labs_short[,2][order(apply(hist_Fst_B,2,median))]
MPOrank_K_Fst_B <- labs_short[,1][order(apply(hist_Fst_B,2,median))]
MPOrank_Fst <- data.frame("Fst_A_MPO"=MPOrank_mpo_Fst_A,"Fst_A_d"=MPOrank_d_Fst_A,"Fst_A_W"=MPOrank_W_Fst_A,"Fst_A_K"=MPOrank_K_Fst_A,
"Fst_B_MPO"=MPOrank_mpo_Fst_B,"Fst_B_d"=MPOrank_d_Fst_B,"Fst_B_W"=MPOrank_W_Fst_B,"Fst_B_K"=MPOrank_K_Fst_B)
write.csv2(MPOrank_Fst, paste0(WD,"/Analysis_",output,"/SuppFigs/Table_S27.csv"))
##** d: Model performance ####
d0001_Fst_A <- hist_Fst_A[,(1:6)]
d001_Fst_A <- hist_Fst_A[,c(7:12)]
d01_Fst_A <- hist_Fst_A[,c(13:18)]
d0001_Fst_B <- hist_Fst_B[,(1:6)]
d001_Fst_B <- hist_Fst_B[,c(7:12)]
d01_Fst_B <- hist_Fst_B[,c(13:18)]
# Here we calculate the difference to d=0.01 (instead of d=0.001)
diffSPO_d001both_Fst_A <- c(apply(d001_Fst_A,2,sum)-apply(d0001_Fst_A,2,sum),apply(d001_Fst_A,2,sum)-apply(d01_Fst_A,2,sum))
diffSPO_d001both_Fst_B <- c(apply(d001_Fst_B,2,sum)-apply(d0001_Fst_B,2,sum),apply(d001_Fst_B,2,sum)-apply(d01_Fst_B,2,sum))
diffMPO_d001both_Fst_A <- c(apply(d001_Fst_A,2,median)-apply(d0001_Fst_A,2,median),apply(d001_Fst_A,2,median)-apply(d01_Fst_A,2,median))
diffMPO_d001both_Fst_B <- c(apply(d001_Fst_B,2,median)-apply(d0001_Fst_B,2,median),apply(d001_Fst_B,2,median)-apply(d01_Fst_B,2,median))
list_d001both <- c(diffMPO_d001both_Fst_A,diffMPO_d001both_Fst_B,
diffSPO_d001both_Fst_A,diffSPO_d001both_Fst_B)
total_comparison_d001both <- length(na.omit(list_d001both))
improved_fit_d001both <- sum(na.omit(list_d001both<0))
# Model performance d
improved_fit_d001both/total_comparison_d001both
##** W: Model performance ####
w00_Fst_A <- hist_Fst_A[,c(1,2,7,8,13,14)]
w05_Fst_A <- hist_Fst_A[,c(3,4,9,10,15,16)]
w10_Fst_A <- hist_Fst_A[,c(5,6,11,12,17,18)]
w00_Fst_B <- hist_Fst_B[,c(1,2,7,8,13,14)]
w05_Fst_B <- hist_Fst_B[,c(3,4,9,10,15,16)]
w10_Fst_B <- hist_Fst_B[,c(5,6,11,12,17,18)]
diffSPO_w00both_Fst_A <- c(apply(w00_Fst_A,2,sum)-apply(w05_Fst_A,2,sum),apply(w00_Fst_A,2,sum)-apply(w10_Fst_A,2,sum))
diffSPO_w00both_Fst_B <- c(apply(w00_Fst_B,2,sum)-apply(w05_Fst_B,2,sum),apply(w00_Fst_B,2,sum)-apply(w10_Fst_B,2,sum))
diffMPO_w00both_Fst_A <- c(apply(w00_Fst_A,2,median)-apply(w05_Fst_A,2,median),apply(w00_Fst_A,2,median)-apply(w10_Fst_A,2,median))
diffMPO_w00both_Fst_B <- c(apply(w00_Fst_B,2,median)-apply(w05_Fst_B,2,median),apply(w00_Fst_B,2,median)-apply(w10_Fst_B,2,median))
list_w00both <- c(diffMPO_w00both_Fst_A,diffMPO_w00both_Fst_B,
diffSPO_w00both_Fst_A,diffSPO_w00both_Fst_B)
total_comparison_w00both <- length(na.omit(list_w00both))
improved_fit_w00both <- sum(na.omit(list_w00both<0))
# Model performance W
improved_fit_w00both/total_comparison_w00both
##** K: Model performance ####
k0_Fst_A <- hist_Fst_A[,c(1,3,5,7,9,11,13,15,17)]
k1_Fst_A <- hist_Fst_A[,c(2,4,6,8,10,12,14,16,18)]
k0_Fst_B <- hist_Fst_B[,c(1,3,5,7,9,11,13,15,17)]
k1_Fst_B <- hist_Fst_B[,c(2,4,6,8,10,12,14,16,18)]
diffSPO_k0k1_Fst_A <- apply(k0_Fst_A,2,sum)-apply(k1_Fst_A,2,sum)
diffSPO_k0k1_Fst_B <- apply(k0_Fst_B,2,sum)-apply(k1_Fst_B,2,sum)
diffMPO_k0k1_Fst_A <- apply(k0_Fst_A,2,median)-apply(k1_Fst_A,2,median)
diffMPO_k0k1_Fst_B <- apply(k0_Fst_B,2,median)-apply(k1_Fst_B,2,median)
list_k0k1 <- c(diffMPO_k0k1_Fst_A,diffMPO_k0k1_Fst_B,
diffSPO_k0k1_Fst_A,diffSPO_k0k1_Fst_B)
total_comparison_k0k1 <- length(na.omit(list_k0k1))
improved_fit_k0k1 <- sum(na.omit(list_k0k1)<0)
# Model performance K
improved_fit_k0k1/total_comparison_k0k1
} |
6a3b49759e6a4a346a1a7e72529059671c2b40c3 | c504cf54212655142112685e917904d41dfe0bde | /do_not_include/new.R | 9f463f66ed45d4d4afc82214a07edbb429f27579 | [] | no_license | BorisHouenou/WeightIt | 3471ac96b4f534e00a2d50a3ad4b3b57893f242d | 593c68582c2de4fff7c5dedcdb9dfb2acc541dfb | refs/heads/master | 2021-02-05T17:26:09.958529 | 2020-02-19T06:23:01 | 2020-02-19T06:23:01 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 14,087 | r | new.R | #New methods and functions
#------Preliminary template----
weightit2XXX <- function(covs, treat...) {
stop("method = \"XXX\" isn't ready to use yet.", call. = FALSE)
}
#------Template----
weightit2XXX <- function(covs, treat, s.weights, subset, estimand, focal, moments, int, ...) {
A <- list(...)
covs <- covs[subset, , drop = FALSE]
treat <- factor(treat[subset])
covs <- cbind(covs, int.poly.f(covs, poly = moments, int = int))
covs <- apply(covs, 2, make.closer.to.1)
if (any(vars.w.missing <- apply(covs, 2, function(x) anyNA(x)))) {
missing.ind <- apply(covs[, vars.w.missing, drop = FALSE], 2, function(x) as.numeric(is.na(x)))
covs[is.na(covs)] <- 0
covs <- cbind(covs, missing.ind)
}
new.data <- data.frame(treat, covs)
new.formula <- formula(new.data)
for (f in names(formals(PACKAGE::FUNCTION))) {
if (is_null(A[[f]])) A[[f]] <- formals(PACKAGE::FUNCTION)[[f]]
}
A[names(A) %in% names(formals(weightit2optweight))] <- NULL
A[["formula"]] <- new.formula
A[["data"]] <- new.data
A[["estimand"]] <- estimand
A[["s.weights"]] <- s.weights[subset]
A[["focal"]] <- focal
A[["verbose"]] <- TRUE
if (check.package("optweight")) {
out <- do.call(PACKAGE::FUNCTION, A, quote = TRUE)
obj <- list(w = out[["weights"]], fit.obj = out)
return(obj)
}
}
#------Under construction----
#Subgroup Balancing PS
weightit2sbps <- function(covs, treat, s.weights, subset, estimand, focal, stabilize, ...) {
A <- list(...)
fit.obj <- NULL
covs <- covs[subset, , drop = FALSE]
t <- factor(treat[subset])
if (!is_binary(t)) stop("Subgroup balancing propensity score weighting is not yet compatible with non-binary treatments.", call. = FALSE)
if (any(vars.w.missing <- apply(covs, 2, function(x) anyNA(x)))) {
missing.ind <- apply(covs[, vars.w.missing, drop = FALSE], 2, function(x) as.numeric(is.na(x)))
covs[is.na(covs)] <- 0
covs <- cbind(covs, missing.ind)
}
covs <- apply(covs, 2, make.closer.to.1)
smd <- function(x, t, w, estimand, std = TRUE) {
m <- vapply(levels(t), function(t.lev) w.m(x[t==t.lev], w = w[t==t.lev]), numeric(1L))
mdiff <- abs(diff(m))
if (check_if_zero(mdiff)) return(0)
else {
if (!std) sd <- 1
else if (estimand == "ATT") sd <- sd(x[t==1])
else if (estimand == "ATC") sd <- sd(x[t==0])
else sd <- sqrt(.5 * (var(x[t==1]) + var(x[t==0])))
return(mdiff/sd)
}
}
loss <- A[["loss"]]
loss <- match_arg(loss, c("weighting", "matching"))
if (loss == "matching") {
F_ <- function(covs, sub, t, w) {
#Overall Balance of covs
Mk <- apply(covs, 2, function(x) smd(x, t, w, estimand))
#Subgroup Balance
Mkr <- unlist(lapply(levels(sub), function(s) {apply(covs, 2,
function(x) smd(x[sub==s], t[sub==s], w[sub==s], estimand))}))
return(sum(c(Mk, Mkr) ^ 2))
}
}
else if (loss == "weighting") {
F_ <- function(covs, sub, t, w) {
#Overall Balance of covs
Mk <- apply(covs, 2, function(x) smd(x, t, w, estimand))
#Overall balance of subgroups
Mr <- vapply(levels(sub), function(s) {smd(as.numeric(sub == s), t, w, std = FALSE)}, numeric(1L))
#Subgroup Balance
Mkr <- unlist(lapply(levels(sub), function(s) {apply(covs, 2,
function(x) smd(x[sub==s], t[sub==s], w[sub==s], estimand))}))
return(sum(c(Mk, Mr, Mkr) ^ 2))
}
}
else stop()
#Process subgroup
subgroup <- process.by(by = A[["subgroup"]], data = covs, treat = t, by.arg = "subgroup")$by.factor
overall.weights <- subgroup.weights <- NULL
if (is_not_null(A[["overall.ps"]])) {
if ((is.matrix(A[["overall.ps"]]) || is.data.frame(A[["overall.ps"]])) &&
ncol(A[["overall.ps"]]) == nlevels(t) && all(colnames(A[["overall.ps"]] %in% levels(t)))) {
ps.mat <- A[["overall.ps"]]
}
else if (is.numeric(A[["overall.ps"]])) {
ps.mat <- matrix(NA_real_, nrow = length(t), ncol = nlevels(t), dimnames = list(NULL, levels(t)))
ps.mat[, 2] <- A[["overall.ps"]]
ps.mat[, 1] <- 1 - A[["overall.ps"]]
}
else {
stop()
}
overall.weights <- get_w_from_ps(ps.mat, t, estimand, focal)
}
if (is_not_null(A[["subgroup.ps"]])) {
if ((is.matrix(A[["subgroup.ps"]]) || is.data.frame(A[["subgroup.ps"]])) &&
ncol(A[["subgroup.ps"]]) == nlevels(t) && all(colnames(A[["subgroup.ps"]] %in% levels(t)))) {
ps.mat <- A[["subgroup.ps"]]
}
else if (is.numeric(A[["subgroup.ps"]])) {
ps.mat <- matrix(NA_real_, nrow = length(t), ncol = nlevels(t), dimnames = list(NULL, levels(t)))
ps.mat[, 2] <- A[["subgroup.ps"]]
ps.mat[, 1] <- 1 - A[["subgroup.ps"]]
}
else {
stop()
}
subgroup.weights <- get_w_from_ps(ps.mat, t, estimand, focal)
}
if (is_not_null(A[["overall.weights"]])) {
if (!is.numeric(A[["overall.weights"]])) {
stop()
}
overall.weights <- A[["overall.weights"]]
}
if (is_not_null(A[["subgroup.weights"]])) {
if (!is.numeric(A[["subgroup.weights"]])) {
stop()
}
subgroup.weights <- A[["subgroup.weights"]]
}
if (is_null(overall.weights) || is_null(subgroup.weights)) {
#Process w.method
w.method <- A[["w.method"]]
check.acceptable.method(w.method, msm = FALSE, force = FALSE)
if (is.character(w.method)) {
w.method <- method.to.proper.method(w.method)
attr(w.method, "name") <- w.method
}
else if (is.function(w.method)) {
w.method.name <- paste(deparse(substitute(w.method)))
check.user.method(w.method)
attr(w.method, "name") <- w.method.name
}
if (loss == "matching") {
t.bin <- binarize(t)
overall.fit <- weightit.fit(covs = covs, treat = t, method = "ps",
treat.type = "binary", s.weights = s.weights,
by.factor = factor(rep(1, length(t))), estimand = estimand,
focal = focal, stabilize = stabilize,
ps = NULL, moments = 1, int = FALSE)
overall.ps <- overall.fit$ps
overall.match <- Matching::Match(Tr = t.bin, X = matrix(c(overall.ps, as.numeric(subgroup)), ncol = 2),
estimand = estimand, caliper = .25,
M = 1, replace = FALSE, exact = c(FALSE, TRUE), ties = TRUE)
overall.weights <- cobalt::get.w(overall.match)
subgroup.fit <- weightit.fit(covs = covs, treat = t, method = "ps",
treat.type = "binary", s.weights = s.weights,
by.factor = subgroup, estimand = estimand,
focal = focal, stabilize = stabilize,
ps = NULL, moments = 1, int = FALSE)
subgroup.ps <- subgroup.fit$ps
subgroup.match <- Matching::Match(Tr = t.bin, X = matrix(c(subgroup.ps, as.numeric(subgroup)), ncol = 2),
estimand = estimand, caliper = .25,
M = 1, replace = FALSE, exact = c(FALSE, TRUE), ties = TRUE)
subgroup.weights <- cobalt::get.w(subgroup.match)
}
if (loss == "weighting") {
#Estimate overall weights
overall.fit <- weightit.fit(covs = covs, treat = t, method = w.method,
treat.type = "binary", s.weights = s.weights,
by.factor = factor(rep(1, length(t))), estimand = estimand,
focal = focal, stabilize = stabilize,
ps = NULL, moments = 1, int = FALSE)
overall.weights <- overall.fit$w
#Estimate subgroup weights
subgroup.fit <- weightit.fit(covs = covs, treat = t, method = w.method,
treat.type = "binary", s.weights = s.weights,
by.factor = subgroup, estimand = estimand,
focal = focal, stabilize = stabilize,
ps = NULL, moments = 1, int = FALSE)
subgroup.weights <- subgroup.fit$w
}
}
#Find combinations that minimize loss
n.subgroups <- nunique(subgroup)
if (n.subgroups > 8) {
#Stochastic search
L1 <- 10
L2 <- 5
S_ <- setNames(rep("overall", nlevels(subgroup)),
levels(subgroup))
rep <- 0
no.change.streak <- 0
current.loss <- Inf
while (rep <= L1 && no.change.streak <= L2) {
rep <- rep + 1
if (is_null(get0("last.loss"))) last.loss <- Inf
else last.loss <- current.loss
rand.subs <- sample(levels(subgroup))
S__ <- setNames(sample(c("overall", "subgroup"), length(S_), replace = TRUE), rand.subs)
for (i in 1:length(S__)) {
S__[i] <- "overall"
to.overall <- subgroup %in% rand.subs[S__[rand.subs] == "overall"]
w_ <- subgroup.weights
w_[to.overall] <- overall.weights[to.overall]
loss.o <- F_(covs, subgroup, t, w_)
S__[i] <- "subgroup"
to.overall <- subgroup %in% rand.subs[S__[rand.subs] == "overall"]
w_ <- subgroup.weights
w_[to.overall] <- overall.weights[to.overall]
loss.s <- F_(covs, subgroup, t, w_)
if (loss.o < loss.s) {
S__[i] <- "overall"
if (loss.o < current.loss) {
current.loss <- loss.o
attr(current.loss, "S") <- S__
}
}
else {
S__[i] <- "subgroup"
if (loss.s < current.loss) {
current.loss <- loss.s
attr(current.loss, "S") <- S__
}
}
}
to.overall <- subgroup %in% rand.subs[S__[rand.subs] == "overall"]
w_ <- subgroup.weights
w_[to.overall] <- overall.weights[to.overall]
current.loss <- F_(covs, subgroup, t, w_)
if (check_if_zero(current.loss - last.loss)) no.change.streak <- no.change.streak + 1
print(current.loss)
print(S__)
}
best.S <- attr(current.loss, "S")
to.overall <- subgroup %in% rand.subs[best.S[rand.subs] == "overall"]
w <- subgroup.weights
w[to.overall] <- overall.weights[to.overall]
}
else {
S <- setNames(do.call("expand.grid", lapply(integer(n.subgroups), function(x) (c("overall", "subgroup")))),
levels(subgroup))
print(S)
w.list <<- lapply(seq_len(nrow(S)), function(i) {
to.overall <- subgroup %in% levels(subgroup)[S[i, levels(subgroup)] == "overall"]
w_ <- subgroup.weights
w_[to.overall] <- overall.weights[to.overall]
return(w_)
})
loss.val <- vapply(w.list, function(w_) F_(covs, subgroup, t, w_), numeric(1L))
best.loss <- which.min(loss.val)
w <- w.list[[best.loss]]
if (is_not_null(overall.fit$ps)) {
to.overall <- subgroup %in% levels(subgroup)[S[best.loss, levels(subgroup)] == "overall"]
p.score <- subgroup.fit$ps
p.score[to.overall] <- overall.fit$ps[to.overall]
}
else p.score <- NULL
}
obj <- list(w = w
, ps = p.score
#, fit.obj = fit.obj
)
return(obj)
}
#------Ready for use, but not ready for CRAN----
#KBAL
weightit2kbal <- function(covs, treat, s.weights, subset, estimand, focal, ...) {
A <- list(...)
covs <- covs[subset, , drop = FALSE]
treat <- factor(treat)[subset]
covs <- apply(covs, 2, make.closer.to.1)
if (any(vars.w.missing <- apply(covs, 2, function(x) anyNA(x)))) {
missing.ind <- apply(covs[, vars.w.missing, drop = FALSE], 2, function(x) as.numeric(is.na(x)))
covs[is.na(covs)] <- 0
covs <- cbind(covs, missing.ind)
}
if ("kbal.method" %in% names(A)) {
names(A)[names(A) == "kbal.method"] <- "method"
}
for (f in names(formals(KBAL::kbal))) {
if (is_null(A[[f]])) A[[f]] <- formals(KBAL::kbal)[[f]]
}
A[names(A) %nin% setdiff(names(formals(KBAL::kbal)), c("X", "D"))] <- NULL
if (check.package("KBAL")) {
if (hasName(A, "method")) {
if (A[["method"]] == "el") check.package(c("glmc", "emplik"))
}
if (estimand == "ATT") {
w <- rep(1, length(treat))
control.levels <- levels(treat)[levels(treat) != focal]
fit.list <- setNames(vector("list", length(control.levels)), control.levels)
covs[treat == focal,] <- covs[treat == focal, , drop = FALSE] * s.weights[subset][treat == focal] * sum(treat == focal)/sum(s.weights[subset][treat == focal])
for (i in control.levels) {
treat.in.i.focal <- treat %in% c(focal, i)
treat_ <- ifelse(treat[treat.in.i.focal] == i, 0L, 1L)
covs_ <- covs[treat.in.i.focal, , drop = FALSE]
colinear.covs.to.remove <- colnames(covs_)[colnames(covs_) %nin% colnames(make_full_rank(covs_[treat_ == 0, , drop = FALSE]))]
covs_ <- covs_[, colnames(covs_) %nin% colinear.covs.to.remove, drop = FALSE]
kbal.out <- do.call(KBAL::kbal, c(list(X = covs_, D = treat_), args))
w[treat == i] <- (kbal.out$w / s.weights[subset])[treat_ == 0L]
fit.list[[i]] <- kbal.out
}
}
else if (estimand == "ATE") {
w <- rep(1, length(treat))
fit.list <- setNames(vector("list", nlevels(treat)), levels(treat))
for (i in levels(treat)) {
covs_i <- rbind(covs, covs[treat==i, , drop = FALSE])
treat_i <- c(rep(1, nrow(covs)), rep(0, sum(treat==i)))
colinear.covs.to.remove <- colnames(covs_i)[colnames(covs_i) %nin% colnames(make_full_rank(covs_i[treat_i == 0, , drop = FALSE]))]
covs_i <- covs_i[, colnames(covs_i) %nin% colinear.covs.to.remove, drop = FALSE]
covs_i[treat_i == 1,] <- covs_i[treat_i == 1,] * s.weights[subset] * sum(treat_i == 1) / sum(s.weights[subset])
kbal.out_i <- do.call(KBAL::kbal, c(list(X = covs_i, D = treat_i), args))
w[treat == i] <- kbal.out_i$w[treat_i == 0] / s.weights[subset][treat == i]
fit.list[[i]] <- kbal.out_i
}
}
}
obj <- list(w = w)
return(obj)
}
|
8b6038c1eb0e95bba11102f7842bc9a3f37e3528 | ffdea92d4315e4363dd4ae673a1a6adf82a761b5 | /data/genthat_extracted_code/DiagrammeR/examples/trav_both_edge.Rd.R | df9aea57ce9b84ec06148ea819c972ed1b859911 | [] | no_license | surayaaramli/typeRrh | d257ac8905c49123f4ccd4e377ee3dfc84d1636c | 66e6996f31961bc8b9aafe1a6a6098327b66bf71 | refs/heads/master | 2023-05-05T04:05:31.617869 | 2019-04-25T22:10:06 | 2019-04-25T22:10:06 | null | 0 | 0 | null | null | null | null | UTF-8 | R | false | false | 3,716 | r | trav_both_edge.Rd.R | library(DiagrammeR)
### Name: trav_both_edge
### Title: Traverse from one or more selected nodes onto adjacent edges
### Aliases: trav_both_edge
### ** Examples
# Set a seed
set.seed(23)
# Create a simple graph
graph <-
create_graph() %>%
add_n_nodes(
n = 2,
type = "a",
label = c("asd", "iekd")) %>%
add_n_nodes(
n = 3,
type = "b",
label = c("idj", "edl", "ohd")) %>%
add_edges_w_string(
edges = "1->2 1->3 2->4 2->5 3->5",
rel = c(NA, "A", "B", "C", "D"))
# Create a data frame with node ID values
# representing the graph edges (with `from`
# and `to` columns), and, a set of numeric values
df <-
data.frame(
from = c(1, 1, 2, 2, 3),
to = c(2, 3, 4, 5, 5),
values = round(rnorm(5, 5), 2))
# Join the data frame to the graph's internal
# edge data frame (edf)
graph <-
graph %>%
join_edge_attrs(df = df)
# Show the graph's internal edge data frame
graph %>%
get_edge_df()
# Perform a simple traversal from nodes to
# adjacent edges with no conditions on the
# nodes traversed to
graph %>%
select_nodes_by_id(nodes = 3) %>%
trav_both_edge() %>%
get_selection()
# Traverse from node `2` to any adjacent
# edges, filtering to those edges that have
# NA values for the `rel` edge attribute
graph %>%
select_nodes_by_id(nodes = 2) %>%
trav_both_edge(
conditions = is.na(rel)) %>%
get_selection()
# Traverse from node `2` to any adjacent
# edges, filtering to those edges that have
# numeric values greater than `6.5` for
# the `rel` edge attribute
graph %>%
select_nodes_by_id(nodes = 2) %>%
trav_both_edge(
conditions = values > 6.5) %>%
get_selection()
# Traverse from node `5` to any adjacent
# edges, filtering to those edges that
# have values equal to `C` for the `rel`
# edge attribute
graph %>%
select_nodes_by_id(nodes = 5) %>%
trav_both_edge(
conditions = rel == "C") %>%
get_selection()
# Traverse from node `2` to any adjacent
# edges, filtering to those edges that
# have values in the set `B` and `C` for
# the `rel` edge attribute
graph %>%
select_nodes_by_id(nodes = 2) %>%
trav_both_edge(
conditions = rel %in% c("B", "C")) %>%
get_selection()
# Traverse from node `2` to any adjacent
# edges, and use multiple conditions for the
# traversal
graph %>%
select_nodes_by_id(nodes = 2) %>%
trav_both_edge(
conditions =
rel %in% c("B", "C") &
values > 4.0) %>%
get_selection()
# Traverse from node `2` to any adjacent
# edges, and use multiple conditions with
# a single-length vector
graph %>%
select_nodes_by_id(nodes = 2) %>%
trav_both_edge(
conditions =
rel %in% c("B", "C") |
values > 4.0) %>%
get_selection()
# Traverse from node `2` to any adjacent
# edges, and use a regular expression as
# a filtering condition
graph %>%
select_nodes_by_id(nodes = 2) %>%
trav_both_edge(
conditions = grepl("B|C", rel)) %>%
get_selection()
# Create another simple graph to demonstrate
# copying of node attribute values to traversed
# edges
graph <-
create_graph() %>%
add_path(n = 4) %>%
select_nodes_by_id(nodes = 2:3) %>%
set_node_attrs_ws(
node_attr = value,
value = 5)
# Show the graph's internal edge data frame
graph %>%
get_edge_df()
# Show the graph's internal node data frame
graph %>%
get_node_df()
# Perform a traversal from the nodes to
# the adjacent edges while also applying
# the node attribute `value` to the edges (in
# this case summing the `value` of 5 from
# all contributing nodes adding as an edge
# attribute)
graph <-
graph %>%
trav_both_edge(
copy_attrs_from = value,
agg = "sum")
# Show the graph's internal edge data frame
# after this change
graph %>%
get_edge_df()
|
3c821f4119fcef35e12ce910df26531bfe5c7a67 | 33464d95ff20ff7feda3ad658fb3f2de5b075308 | /code/raw/good_turing.R | bba6200e8fd54a76e20cdadab86e5752ee3abf6b | [] | no_license | YevgenyY/DS_Capstone | 514cd599fa61450f7eec84e89e293de12b229376 | 883fbd590dc61135e5c9d2417460a8eef10d3b39 | refs/heads/master | 2021-01-10T14:41:52.539675 | 2016-04-22T10:54:11 | 2016-04-22T10:54:11 | 53,430,751 | 1 | 0 | null | null | null | null | UTF-8 | R | false | false | 890 | r | good_turing.R | library(quanteda)
library(parallel)
library(foreach)
setwd("~/Coursera/DS_Capstone/")
source("code/raw/stats_helper.R")
#load(file="data/freq.Rda")
load(file="data/f12345.Rda")
# use parallel computation
tmp <- names(f2)
no_cores <- detectCores()-1
cl <- makeCluster(no_cores)
########### Good-Turing smoothing ##########
# Find Nc for 1-grams
clusterExport(cl, "f1")
f1Nc <- parSapply(cl, f1, function(x) {
t <- f1 == x
return(sum(t))
})
# find discount for 1-grams
clusterExport(cl, "f1Nc")
f1Dc <- parSapply(cl, names(f1), function(x) {
Nc <- f1Nc[x] # "said" has c=76778, but Nc=1
cp1<- f1[x] + 1 # c + 1
#i <- match(cp1, f1)
for(i in 1:length(f1)) {
if(cp1 <= f1[i])
break
}
if (i == length(f1))
Ncp1 <- 0
else {
j <- names(f1[i])
Ncp1 <- f1Nc[j]
}
return(Ncp1/Nc)
})
pf1 <- (f1+1)*f1Dc
save(pf1, file="data/good_turing.pf1.Rda")
|
03f7751647d1a3bb791a8576fa8a55c201787bfb | 839db0fc473db0c3b9cb169a091bbb6046213ff2 | /validation-scripts.previous/bench-merge-above5500/Rscripts/cmp_extension_assembly.R | 2a9fa77c9d4fdfceff39314b62ea07d35ad64e16 | [] | no_license | Transipedia/KaMRaT | 3a5754a14119cda0baf9fdd13f2aa6b5f735583f | 6da06e353d9dd861285ac057ba19e790463cf4ed | refs/heads/master | 2023-06-10T09:09:59.111236 | 2023-05-22T18:47:16 | 2023-05-22T18:47:16 | 241,427,846 | 6 | 1 | null | null | null | null | UTF-8 | R | false | false | 5,238 | r | cmp_extension_assembly.R | rm(list = ls())
library(Biostrings)
library(tidyr)
library(ggplot2)
library(patchwork)
# work.dir <- "/data/work/I2BC/haoliang.xue/kamrat-new-res/Results/bench-merge-above5500/"
work.dir <- "/store/plateformes/CALCUL/SSFA_KaMRaT/Results/bench-merge-above5500/"
# work.dir <- "../../../../Ariticles/KaMRaT/RevisedAnalysis/bench-merge/"
min_len <- 61
evaluate_perfalign <- function(ctg.fa, align.res) {
perfect.align <- (align.res$qlen == align.res$qend &
align.res$qstart == 1 &
align.res$qlen == align.res$length &
align.res$qlen == align.res$nident &
align.res$pident == 100 &
align.res$qlen > min_len)
return(sum(perfect.align) / sum(width(ctg.fa) > min_len) * 100)
}
evaluate_compalign <- function(ctg.fa, align.res) {
return(sum(align.res$qcovs == 100 & align.res$qlen > min_len) / sum(width(ctg.fa) > min_len) * 100)
}
abd <- 1
stats.res <- NULL
for (dpt in c(0.05, 0.2, 0.4, 0.6, 0.8, 1)) {
# KaMRaT none
cat(dpt, "KaMRaT none", ":")
ctg.path <- paste0(work.dir, "kamrat_res_err-free_1-", abd, "/depth_", dpt, "/ctg-seq.none.fa")
align.path <- paste0(work.dir, "kamrat_res_err-free_1-", abd, "/depth_", dpt, "/ctg-aligned.none.tsv")
ctg.fa <- readDNAStringSet(ctg.path)
align.res <- read.table(align.path, header = TRUE, row.names = 1)
cat("\t", length(ctg.fa), nrow(align.res), "\n")
stats.res <- rbind(stats.res,
data.frame("depth" = dpt,
"mode" = "KaMRaT none",
"nb.ctg" = length(ctg.fa),
"ctg.median.len" = median(nchar(ctg.fa)),
"perf.align" = evaluate_perfalign(ctg.fa, align.res),
"comp.align" = evaluate_compalign(ctg.fa, align.res)))
# KaMRaT with intervention
for (mode in c("pearson", "spearman", "mac")) {
cat(dpt, "KaMRaT", paste(mode, 0.2, sep = ":"), ":")
ctg.path <- paste0(work.dir, "kamrat_res_err-free_1-", abd, "/depth_", dpt, "/ctg-seq.", mode, "_0.2", ".fa")
align.path <- paste0(work.dir, "kamrat_res_err-free_1-", abd, "/depth_", dpt, "/ctg-aligned.", mode, "_0.2", ".tsv")
ctg.fa <- readDNAStringSet(ctg.path)
align.res <- read.table(align.path, header = TRUE, row.names = 1)
cat("\t", length(ctg.fa), nrow(align.res), "\n")
stats.res <- rbind(stats.res,
data.frame("depth" = dpt,
"mode" = paste0("KaMRaT ", mode, ":0.2"),
"nb.ctg" = length(ctg.fa),
"ctg.median.len" = median(nchar(ctg.fa)),
"perf.align" = evaluate_perfalign(ctg.fa, align.res),
"comp.align" = evaluate_compalign(ctg.fa, align.res)))
}
# rnaSPAdes
for (mode in c("allreads", paste0("allkmers-1-", abd))) {
cat(dpt, "SPAdes", mode, ":")
ctg.path <- paste0(work.dir, "spades_res/err-free/depth_", dpt, "/", mode, "/transcripts.fasta")
align.path <- paste0(work.dir, "spades_res/err-free/depth_", dpt, "/", mode, "/blastn_align.tsv")
ctg.fa <- readDNAStringSet(ctg.path)
align.res <- read.table(align.path, header = TRUE, row.names = 1)
cat("\t", length(ctg.fa), nrow(align.res), "\n")
stats.res <- rbind(stats.res,
data.frame("depth" = dpt,
"mode" = paste0("rnaSPAdes ", strsplit(mode, split = "-")[[1]][1]),
"nb.ctg" = length(ctg.fa),
"ctg.median.len" = median(nchar(ctg.fa)),
"perf.align" = evaluate_perfalign(ctg.fa, align.res),
"comp.align" = evaluate_compalign(ctg.fa, align.res)))
}
}
write.csv(stats.res, paste0(work.dir, "results/2_newcmp_with_without_intervention_1-", abd, ".csv"),
quote = FALSE)
pdf(paste0(work.dir, "results/2_newcmp_with_without_intervention.pdf"),
width=9, height=7)
plt1 <- ggplot(data = stats.res, aes(x = depth, y = perf.align, color = mode)) +
geom_line(linewidth = 1) +
geom_point() +
scale_x_continuous(breaks = c(0.05, 0.2, 0.4, 0.6, 0.8, 1)) +
scale_color_manual(values = c("KaMRaT none" = "#e66101",
"KaMRaT mac:0.2" = "#fdb863",
"KaMRaT pearson:0.2" = "#b2abd2",
"KaMRaT spearman:0.2" = "#5e3c99",
"rnaSPAdes allkmers" = "#808080",
"rnaSPAdes allreads" = "#000000")) +
ylim(c(70, 100)) +
ylab("%perfect alignment") +
theme_light() +
theme(text = element_text(size = 15, family = "sans"),
panel.grid.major = element_blank(), panel.grid.minor = element_blank())
plt2 <- ggplot(data = stats.res, aes(x = depth, y = comp.align, color = mode)) +
geom_line(linewidth = 1) +
geom_point() +
scale_x_continuous(breaks = c(0.05, 0.2, 0.4, 0.6, 0.8, 1)) +
scale_color_manual(values = c("KaMRaT none" = "#e66101",
"KaMRaT mac:0.2" = "#fdb863",
"KaMRaT pearson:0.2" = "#b2abd2",
"KaMRaT spearman:0.2" = "#5e3c99",
"rnaSPAdes allkmers" = "#808080",
"rnaSPAdes allreads" = "#000000")) +
ylab("%complete alignment") +
theme_light() +
theme(text = element_text(size = 15, family = "sans"),
panel.grid.major = element_blank(), panel.grid.minor = element_blank())
plot(plt1/plt2)
dev.off()
|
e26717063d1fccd5c984a6c05d306f20a5c8a2f6 | 4343c3a44a0eb0effaeb45a4f695a1b42386860d | /man/searchData.Rd | dbb6328390d3da7625a1e52b81aea0e58adc3f23 | [
"Apache-2.0"
] | permissive | abossenbroek/GeneralTree | 1b19a86cc23bf713e3352311e69d3edbcf9fbf79 | 3efe2d9cc5a9082cba6995961b19159bfc9224a9 | refs/heads/master | 2020-12-25T16:57:33.837972 | 2016-09-09T12:27:45 | 2016-09-09T12:27:45 | 58,137,112 | 3 | 1 | null | 2016-09-09T16:05:57 | 2016-05-05T14:32:37 | R | UTF-8 | R | false | true | 499 | rd | searchData.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/GeneralTree.R
\name{searchData}
\alias{searchData}
\title{Search for an id in starting at a point in the tree and return the data
matching the id.}
\usage{
searchData(self, id)
}
\arguments{
\item{self}{the node where to start searching.}
\item{id}{the id to look for.}
}
\value{
The data associated with an id.
}
\description{
Search for an id in starting at a point in the tree and return the data
matching the id.
}
|
693418832238b7075b2d29f72e4aa8f07c1df438 | 727e274d8dfbfc6eda1166844e185052133a8c32 | /R/PublicationPlots/promoterCapSeq_parallelPlots.R | f142a3de144e42d3bc0ce7e06d44b813bd14ee69 | [] | no_license | JCSMR-Tremethick-Lab/MCF10APromoterAnalysis | 871c6162fbd42f114c75f386bf4b15d6d89fea88 | 218f8a241b5645796b2a4d8a6bb33caf38a146c1 | refs/heads/master | 2021-03-30T17:45:18.443603 | 2021-01-29T09:44:25 | 2021-01-29T09:44:25 | 56,828,470 | 0 | 0 | null | 2020-05-20T06:53:47 | 2016-04-22T05:26:30 | R | UTF-8 | R | false | false | 1,971 | r | promoterCapSeq_parallelPlots.R | library(data.table)
library(ggparallel)
# Figure 1 ----------------------------------------------------------------
setwd("/home/sebastian/Data/Collaborations/FSU/PromoterSeqCap/sortingTSVS for Tremethick paper /Figure 1/")
sourceFiles <- list.files(".", pattern = ".tsv")
dataList <- lapply(sourceFiles, function(x){
tab <- data.table::fread(x)
return(tab)
})
names(dataList) <- unlist(lapply(strsplit(sourceFiles, "\\."), function(x) x[1]))
d1 <- do.call("data.table", dataList)
d1 <- subset(d1, select = c(1, grep("group1", colnames(d1))))
colNames <- colnames(d1)[2:8]
colNames <- unlist(lapply(strsplit(colNames, "_"), function(x) paste(x[1:2], collapse = "_")))
colnames(d1) <- c("gene", colNames)
ggparallel(list("A_Inp", "A_H2AZ", "CA1a_Inp", "CA1a_H2AZ", "shH2AZ_Inp", "TGFb_Inp", "TGFb_H2AZ"), data = d1)
ggparallel(list("A_Inp", "CA1a_Inp", "shH2AZ_Inp", "TGFb_Inp"), data = d1)
ggparallel(list("A_H2AZ", "CA1a_H2AZ", "TGFb_H2AZ"), data = d1)
ggparallel(list("A_H2AZ", "CA1a_H2AZ", "TGFb_H2AZ"), data = d1, method = "hammock", ratio = 0.1)
ggparallel(list("A_Inp", "CA1a_Inp", "shH2AZ_Inp", "TGFb_Inp"), data = d1, method = "hammock", ratio = 0.1)
# attempt at using plot.ly for interactive vis
library(plotly)
p <- m1 %>%
plot_ly(width = 1920, height = 1080) %>%
add_trace(type = 'parcoords',
line = list(showscale = TRUE,
reversescale = TRUE,
color = ~order.x,
colorscale = 'Jet',
cmin = 0,
cmax = 20000),
dimensions = list(
list(tickvals = c(1:7),
label = "group1",
values = ~group1.x),
list(tickvals = c(1:7),
label = "group2",
values = ~group1.y),
list(tickvals = c(1:7),
label = "group3",
values = ~group1)
)
)
as_widget(p)
|
b5fb203b27fd53f74394bc02b8ac037eaa83b41a | 03715ef672cc49b9de1e46bab7ebe68eb69b90ef | /man/rawtocomposite.Rd | 616d71f7f9938472e0f1f8da34eba1f8fff09752 | [] | no_license | cran/waveformlidar | 501b867be68524862ec10fb12a4aefb9837b19ec | 7a0fee0455142f1ade951ccd191a2299f2d8ed1e | refs/heads/master | 2021-07-11T16:10:17.124894 | 2020-08-01T08:20:03 | 2020-08-01T08:20:03 | 184,310,247 | 2 | 0 | null | null | null | null | UTF-8 | R | false | true | 2,494 | rd | rawtocomposite.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/rawtocomposite.R
\name{rawtocomposite}
\alias{rawtocomposite}
\title{rawtocomposite}
\usage{
rawtocomposite(voxr, inten_index = 2)
}
\arguments{
\item{voxr}{the object from the waveformvoxel.}
\item{inten_index}{the value (1,2,3,4,...) to represnt the intensity of composite waveforms.It is a integer from 1 to 4 and default is 2.
1: the number of intensity of the voxel (generally is not useful); 2: the maximum intensity of the waveform voxel; 3: the mean intensity of the waveform voxel;
4: the total intensity of voxel(the last one is also not prefered in most cases)}
}
\value{
A dataframe with first three columns including geolocation xyz of the first Non-NA intensity (Highest position) and intensities along the height bins, other non-NA values are intensities for the rest columns.
\item{x}{The x position of the first Non-NA intensity or highest intensity position in one waveform}
\item{y}{The y position of the first Non-NA intensity or highest intensity position in one waveform}
\item{z}{The z position of the first Non-NA intensity or highest intensity position in one waveform}
\item{intensity 1}{The intnesity of first height bin}
\item{intensity 2}{The intensity of second height bin}
\item{...}{Intensities along the height bin}
}
\description{
The function allows you to convert point cloud after waveformvoxel or raw waveforms into composite waveforms (with vertical distribution of intensity)
by reducing the effect of off-naid angle of emitted laser.
The conversion is based on the waveform voxellization product. Four kinds of values you can chose to represent the intensity of composite waveform:
the number of intensity (generally is not useful), mean intensity, maximum intensity and total intensity (the last one is also not prefered in most of cases).
}
\examples{
data(return) ###import raw return waveforms
data(geo) ###import corresponding reference geolocation
colnames(geo)[2:9]<-c("x","y","z","dx","dy","dz","or","fr")
### you should know which columns corresponding to above column names before
### run the hyperpointcloud when you used your own new datasets.
hpr<-hyperpointcloud(waveform=return,geo=geo)
##beofre run waveformvoxel, we need to create hyperpointcloud first
##this exampel we just used 100000 points to reduce processing time
voxr<-waveformvoxel(hpc=hpr,res=c(1,1,0.3))
rtc<-rawtocomposite(voxr)
}
|
9a7d433030fbf6dfb7d87b074cb8ee50dc48ad37 | 514e2fc6b95ccaa013c4000ed7001002e42c72b8 | /man/plot.landscape.Rd | ceb60b49d2739c6356dc629d3d2df8f8f52aced2 | [
"MIT"
] | permissive | fdschneider/caspr | 7493a32dbd8df8977135c4f57706e2b4bcc5bbec | e191c7cfaebe1499801e08d5fda1728d4697051f | refs/heads/master | 2021-01-15T14:50:33.742811 | 2016-01-13T16:13:20 | 2016-01-13T16:13:20 | 37,334,564 | 4 | 1 | null | 2015-06-26T16:13:08 | 2015-06-12T17:13:56 | R | UTF-8 | R | false | true | 1,217 | rd | plot.landscape.Rd | % Generated by roxygen2: do not edit by hand
% Please edit documentation in R/landscape.R
\name{plot.landscape}
\alias{plot.landscape}
\title{Plotting an objects of class "landscape"}
\usage{
\method{plot}{landscape}(x, cols = "auto", grid = FALSE, axis = FALSE,
add = FALSE, ani = FALSE, ...)
}
\arguments{
\item{x}{A landscape object.}
\item{cols}{A color vector. If \code{"auto"}, then a grayscale vector will be
used.}
\item{grid}{If TRUE, plot a grid over the cells. Defaults to FALSE.}
\item{axis}{If TRUE, plot x and y axis with coordinates. Defaults to FALSE.}
\item{add}{If TRUE, no primary plot will be called. The change in cells will
be plotted over an existing plot. Used for animated plotting in the screen
plotting device.}
\item{ani}{If TRUE, an adjustment constant is added when producing a pixel
accurate png or gif file. Required when the function is used to plot
animated figures.}
}
\value{
A landscape object of dimensions \code{width} x \code{height} with
random distribution of \code{states}, in the relative ratio given in
\code{cover}.
}
\description{
Plotting an objects of class "landscape"
}
\examples{
obj <- init_landscape(c("+","0","-"), c(0.5,0.25,0.25))
plot(obj)
}
|
17b2140542e8e7101d458a38a6bb27c1774da963 | 458e881217581e0b823ad6661ea4f1aa53a34cbf | /cachematrix.R | aadf36c32ea818fa504f2dc8f5f8d16f3479e495 | [] | no_license | course-student/ProgrammingAssignment2 | 4cb833e777a335ac757a2df0f2171fa6e8661007 | 03086008e8e81694f55b37ee374f1ca7936cf563 | refs/heads/master | 2020-12-25T21:34:07.403889 | 2015-07-20T20:26:41 | 2015-07-20T20:26:41 | 39,405,555 | 0 | 0 | null | 2015-07-20T19:59:19 | 2015-07-20T19:59:19 | null | UTF-8 | R | false | false | 1,040 | r | cachematrix.R | ## These functions calculates the inverse of a matrix and caches it
## makeCacheMatrix creates a special "matrix" that contains the inverse of the original matrix cached
makeCacheMatrix <- function(x = matrix()) {
inverse <- NULL
set <- function(y){
x <<- y
inverse <<- NULL
}
get <- function() x
setinverse <- function(s) inverse <<- s
getinverse <- function() inverse
list(set = set, get = get, setinverse = setinverse, getinverse = getinverse)
}
## cacheSolve calculates de inverse of the matrix, retorning the cached value if it exists
## assuming that the matrix supplied is always invertible
cacheSolve <- function(x, ...) {
## Return a matrix that is the inverse of 'x'
inverse <- x$getinverse()
if(!is.null(inverse)){
message("getting cached data")
return(inverse)
}
data <- x$get()
inverse <- solve(data, ...)
x$setinverse(inverse)
inverse
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.