blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
6c3162b5bba55f238bfa6130559b49c9d5aca67f
|
afcd366c47419daf0687137c06e94c9b32117bdb
|
/man/peg_partido.Rd
|
8d7906cce955dec8118f53ce917b12f188ad6a29
|
[] |
no_license
|
luizmartins1980/apida
|
1963bcfff0be75805f057efb60d2af0b3be21a80
|
98677327fafaf5bc648f55b25bd179a3cae766fb
|
refs/heads/master
| 2021-04-25T06:40:43.017069
| 2017-07-22T03:52:37
| 2017-07-22T03:52:37
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 335
|
rd
|
peg_partido.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/peg.R
\name{peg_partido}
\alias{peg_partido}
\title{Pegar informações de um ou mais partidos}
\usage{
peg_partido(id_partido)
}
\arguments{
\item{id_partido}{Identificador(es) do(s) partido(s)}
}
\description{
Pegar informações de um ou mais partidos
}
|
90162bd42162cc36b8e827e84463b408056e2422
|
a92c154607a68a36905e86843bae56065bed5a6b
|
/addOccFromGTM.R
|
1522e96f1bfa0277f1525c0e9ce8073f3cc1b448
|
[] |
no_license
|
fabiolexcastro/centralAmericaCocoa
|
41c7af22ef3bbde8d55750ac06741fb613e59e33
|
de0c4bc0d4168fc27c8fcdfe732afe515f1855e2
|
refs/heads/master
| 2021-05-13T18:36:03.720454
| 2018-01-09T21:05:45
| 2018-01-09T21:05:45
| 116,870,527
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,222
|
r
|
addOccFromGTM.R
|
require(raster)
require(rgdal)
require(tidyverse)
require(spdplyr)
require(rgeos)
require(gtools)
options(scipen = 999)
path <- 'W:/_cam'
shp1 <- shapefile(paste0(path, '/_points/_new/_second/_shp/coordsWGS.shp'))
shp2 <- shapefile(paste0(path, '/_points/_new/_second/_shp/coordsGTM_WGS_clip.shp'))
df <- as_data_frame(rbind(coordinates(shp1), coordinates(shp2))) %>%
rename(Lon = coords.x1,
Lat = coords.x2)
write.csv(df, paste0(path, '/_points/_new/_second/coordsAllWGS.csv'), row.names = F)
# Add to global presences (Central America)
lyrs <- paste0(path, '/_raster/_climate/_current/_asc') %>%
list.files(full.names = T, pattern = '.asc$') %>%
grep('bio', ., value = T) %>%
mixedsort() %>%
stack()
occ <- read_csv(paste0(path, '/_points/_csv/2_occ_swd.csv')) %>%
dplyr::select(Longitude, Latitude) %>%
rename(Lon = Longitude,
Lat = Latitude)
occ_all <- rbind(occ, df)
occ_all <- raster::extract(lyrs, occ_all[,1:2]) %>%
cbind(., occ_all[,1:2])
occ_all <- dplyr::select(occ_all, Lon, Lat, bio_1:bio_33)
write.csv(occ_all, paste0(path, '/_points/_csv/3_occ_swd.csv'), row.names = FALSE)
|
704ecd7764c758891843649e56c5db82cfceb3b7
|
a4ef53e8d087ce99848c7c4960af09927e0a56cf
|
/adaBoost/regresion adaboost con gbm.R
|
61aa60ca7632322c60176f18b9a0e7470f0a4afa
|
[] |
no_license
|
fhernanb/modelos-predictivos
|
0ba16c309a1f726e9e8ca36d37fe7add3a7d0cb5
|
909d65c40ba666fd473b98cb6610b698eff87aad
|
refs/heads/master
| 2023-02-19T05:57:33.799881
| 2021-01-22T12:46:22
| 2021-01-22T12:46:22
| 296,086,556
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,306
|
r
|
regresion adaboost con gbm.R
|
# -------------------------------------------------------------------------
# En este ejemplo se busca encontrar un modelo de regresion que explique
# la variable respuesta y en función de las covariables x1 a x11, los datos
# provienen del ejercicio 9.5 del libro de Montgomery, Peck and Vining (2003).
# El paquete MPV (Braun 2019) contiene todos los datos que acompañan al libro.
# -------------------------------------------------------------------------
# Los datos a usar estan disponibles en un repositorio de github
file <- "https://raw.githubusercontent.com/fhernanb/datos/master/table_b3_MPV.txt"
datos <- read.table(file, header=TRUE)
head(datos)
# Exploremos las filas 23 y 25 porque hay NA
datos[c(23, 25), ]
# Vamos a eliminar las filas que tienen nNA
datos <- datos[-c(23, 25), ]
# Para ver la dimension de los datos
dim(datos)
# Vamos a construir el modelo
library(gbm)
mod <- gbm(y ~ x1 + x2, data=datos,
n.trees=180,
n.minobsinnode=3)
# Estimando y usando los datos de entrenamiento
y_hat <- predict(mod, datos)
# Para ver algunas medidas de desempeño
cor(datos$y, y_hat)
MLmetrics::R2_Score(y_pred=y_hat, y_true=datos$y)
MLmetrics::MSE(y_pred=y_hat, y_true=datos$y)
# Agregando el modelo estimado al diagrama originar
plot(x=datos$y, y=y_hat, las=1)
|
f0f99629606fbbfdeb0cc9f67eb6f84ffd686da7
|
2f9663e05f7f2c48e3246626bcfcb3bb40f7876f
|
/data-raw/nyccollisions.R
|
6cd0fc782ede9c93cc36e1796e7c696dd0fa889d
|
[] |
no_license
|
sboysel/nyccollisions
|
6444abfb876aa878cc323f21dd54ac4804447b99
|
bd84dfb5b876b15bac099ecfd362aef6e029e6d6
|
refs/heads/master
| 2021-01-19T03:19:50.950492
| 2016-07-13T02:17:22
| 2016-07-13T02:17:22
| 53,287,792
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,503
|
r
|
nyccollisions.R
|
library(dplyr)
library(jsonlite)
library(sp)
# Fetch NYPD Motor Vehicle Collisions data for July, 2015
# Source: https://data.cityofnewyork.us/Public-Safety/NYPD-Motor-Vehicle-Collisions/h9gi-nx95
collisions.src.url <- paste0("https://data.cityofnewyork.us/resource/qiz3-axqb.json?",
"$limit=50000&",
"$where=date between '2015-07-01T00:00:00' and '2015-08-01T00:00:00'")
collisions.src.url <- gsub(" ", "%20", collisions.src.url)
collisions.raw <- jsonlite::fromJSON(collisions.src.url, flatten = TRUE)
# Clean data
collisions <- collisions.raw %>%
dplyr::rename(vehicle_type_code_1 = vehicle_type_code1,
vehicle_type_code_2 = vehicle_type_code2) %>%
dplyr::filter(!is.na(longitude),
!is.na(latitude)) %>%
dplyr::mutate(longitude = as.numeric(longitude),
latitude = as.numeric(latitude),
date = as.Date(date),
datetime = as.character(strptime(paste(date, time), "%Y-%m-%d %H:%M"))) %>%
dplyr::mutate_each(funs(as.numeric(.)), starts_with("number")) %>%
dplyr::select(-location.coordinates,
-location.type)
# Reorder columns
collisions <- collisions[sort(names(collisions))]
# Convert to SpatialPointsDataFrame
sp::coordinates(collisions) <- ~ longitude + latitude
sp::proj4string(collisions) <- "+proj=longlat +datum=WGS84 +no_defs +ellps=WGS84 +towgs84=0,0,0"
# Save
save(collisions, file = "data/nyccollisions.RData", compress = "xz")
|
484e85b5fd3a82ae0ff860ee323038fbe4e26918
|
c0c7c24ac4d773c9295cc9dd38fb6ea5cfe1d586
|
/R/rocRanks.R
|
84b182d4866a74502d5891d02937e0032d86a906
|
[] |
no_license
|
rstojnic/PWMEnrich-supplementary
|
f3f3ed73db3aeb0c76ac5c94a95e4ff2f029bcf9
|
1c8fe278aee1c1a4f13466c83e2f172ebc0b3cb5
|
refs/heads/master
| 2020-12-24T13:18:10.626712
| 2015-09-18T16:52:08
| 2015-09-18T16:52:08
| 42,733,871
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,002
|
r
|
rocRanks.R
|
# roc curve for ranked data
#' Draw a ROC curve for ranks
#'
#' @param ranks a list of expected TFs and their ranks
#' @param num.motif the total number of motifs
rocRanks = function(ranks, num.motifs){
uranks = unlist(ranks)
max.rank = max(uranks)
roc = matrix(0, ncol=2, nrow=max.rank+2)
colnames(roc) = c("fpr", "tpr")
for(r in 1:max.rank){
tp = sum(uranks <= r)
fn = sum(uranks > r)
fp = sum(sapply(ranks, function(x) r - sum(x <= r)))
tn = sum(sapply(ranks, function(x) (num.motifs-r) - sum(x > r)))
stopifnot(sum(tp, fn, fp, tn) == (num.motifs*length(ranks)))
roc[r+1, 2] = tp / (tp+fn)
roc[r+1, 1] = fp / (fp+tn)
}
roc[max.rank+2,] = 1
roc
}
#' Draw a PRC curve for ranks
#'
#' @param ranks a list of expected TFs and their ranks
#' @param num.motif the total number of motifs
prcRanks = function(ranks, num.motifs){
uranks = unlist(ranks)
max.rank = num.motifs
prc = matrix(0, ncol=4, nrow=max.rank)
colnames(prc) = c("prec", "recall", "tp", "fp")
for(r in 1:max.rank){
tp = sum(uranks <= r)
fn = sum(uranks > r)
fp = sum(sapply(ranks, function(x) r - sum(x <= r)))
tn = sum(sapply(ranks, function(x) (num.motifs-r) - sum(x > r)))
stopifnot(sum(tp, fn, fp, tn) == (num.motifs*length(ranks)))
prc[r, "prec"] = tp / (tp+fp)
prc[r, "recall"] = tp / (tp+fn)
prc[r, "tp"] = tp
prc[r, "fp"] = fp
}
prc
}
#' Calculate the ROC curve from score
#'
#' @param scores a matrix of scores, where columns are motifs and rows sequences
#' @param expected.tfs a list of expected TF names for each of the sequences
rocScore = function(scores, expected.tfs){
stopifnot(length(expected.tfs) == nrow(scores))
r = range(scores)
p = seq(r[1], r[2], length.out=100)
roc = matrix(0, ncol=2, nrow=length(p))
colnames(roc) = c("fpr", "tpr")
motif.names = colnames(scores)
for(i in 1:length(p)){
tp = 0
fp = 0
fn = 0
tn = 0
for(j in 1:nrow(scores)){
pos = motif.names[scores[j,] > p[i]]
tp = tp + sum(pos %in% expected.tfs[[j]])
fp = fp + length(pos) - sum(pos %in% expected.tfs[[j]])
neg = motif.names[scores[j,] <= p[i]]
tn = tn + sum(!(neg %in% expected.tfs[[j]]))
fn = fn + length(neg) - sum(!(neg %in% expected.tfs[[j]]))
}
stopifnot(sum(tp, fn, fp, tn) == nrow(scores)*ncol(scores))
roc[i, "tpr"] = tp / (tp+fn)
roc[i, "fpr"] = fp / (fp+tn)
}
roc[nrow(roc):1,]
}
#' Calculate AUC from ROC curve
#'
#' @param roc two column matrix with fpr and tpr rates
calcAuc = function(roc){
auc = 0
for(i in 2:nrow(roc)){
h = (roc[i,2] + roc[i-1,2])/2
w = roc[i,1] - roc[i-1,1]
auc = auc + h*w
}
names(auc) = "AUC"
auc
}
#' Plot the ROC curve for ranked data
#'
#' @param ranks a list of expected TFs and their ranks
#' @param num.motif the total number of motifs
#' @param add if to add to existing plot
plotRocRanks = function(ranks, num.motifs, add=FALSE, xlim=c(0,1), ylim=c(0,1), ...){
roc = rocRanks(ranks, num.motifs)
if(add){
lines(roc, ...)
} else{
plot(roc, type="l", xlim=xlim, ylim=ylim, ...)
}
abline(0, 1, col="black", lty=2)
calcAuc(roc)
}
#' Plot the ROC curve for ranked data
#'
#' @param ranks a list of expected TFs and their ranks
#' @param num.motif the total number of motifs
#' @param add if to add to existing plot
plotPrcRanks = function(ranks, num.motifs, add=FALSE, xlim=c(0,1), ylim=c(0,1), prc=NULL, rank.pch=19, rank.cex=0.6, ...){
if(is.null(prc))
prc = prcRanks(ranks, num.motifs)
# perform correct interpolation by interpolating on true positive rate
# generate tp vector
# a list of intervals in increment of one
seq.list = apply(cbind(prc[1:(nrow(prc)-1),"tp"], prc[2:nrow(prc),"tp"]), 1, function(x) seq(x[1], x[2]))
# remove the last element unless the list is of length one
tp = sapply(seq.list, function(x){
if(length(x) == 1)
x
else
x[-length(x)]
})
tp = c(unlist(tp), prc[nrow(prc), "tp"]) # we always skip the last one, so include
fp = rep(0, length(tp))
# do the interpolation calculation
for(i in 1:length(tp)){
# value already present in the table, just use it
if(tp[i] %in% prc[,"tp"]){
# there might be multiple values!
if(length(which(tp == tp[i])) != length(which(prc[,"tp"] %in% tp[i])))
browser()
fp[which(tp == tp[i])] = prc[ prc[,"tp"] %in% tp[i] ,"fp"]
} else {
# find interval
start = max(which(tp[i] > prc[,"tp"]))
end = min(which(tp[i] < prc[,"tp"]))
stopifnot( (end-start) == 1)
skew = (prc[end, "fp"] - prc[start, "fp"]) / (prc[end, "tp"] - prc[start, "tp"])
fp[i] = prc[start, "fp"] + skew * (tp[i] - prc[start, "tp"])
}
}
prec = tp / (tp + fp)
recall = tp / max(tp)
if(add){
lines(prec, recall, ...)
} else{
#colnames(prc) = c("precision", "recall")
plot(prec, recall, type="l", xlim=xlim, ylim=ylim, xlab="precision", ylab="recall", ...)
}
points(prc[,1:2], cex=rank.cex, pch=rank.pch, ...)
#abline(0, 1, col="black", lty=2)
structure(calcAuc(cbind(sort(prec), recall[order(prec)])), names="AUC-PR")
}
|
387ddee8654556a2cf8aec35f389515fafc5e47b
|
802cd19e325687a8d5021af2966aeefcba4e104f
|
/covid_rent_burden/global.R
|
46dfd4b4b47428af81aea1a4a0a269c602a36fca
|
[] |
no_license
|
murray-cecile/renter-credit
|
a47ecf9bfda2af073610c7485c57c952e09588e6
|
7ea015d359a3e666680f310c7b065a299b992e34
|
refs/heads/master
| 2021-04-18T13:27:17.891577
| 2020-05-22T14:55:19
| 2020-05-22T14:55:19
| 249,549,513
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,256
|
r
|
global.R
|
#===============================================================================#
# DEFINE OBJECTS AVAILABLE TO BOTH UI AND SERVER
#
# Cecile Murray
#===============================================================================#
library(tidyverse)
library(tidycensus)
#===============================================================================#
# DATA LOADING
#===============================================================================#
load("data/industry_burden_table.Rdata")
load("data/geo_vulnerable_shares.Rdata")
load("data/geo_rent_by_burden.Rdata")
load("data/geo_age_by_burden.Rdata")
load("data/geo_raceth_by_burden.Rdata")
# create list of places for dropdown menu
geo_list <- geo_vulnerable_shares %>%
distinct(GEOID, NAME) %>%
filter(as.numeric(GEOID) < 60)
#===============================================================================#
# DEFINE VULNERABLE SECTORS
#===============================================================================#
vulnerable_sectors <- c("Non-essential retail",
"Food service",
"Mining",
"Entertainment",
"Non-essential manufacturing",
"Non-essential travel/transportation",
"Other services")
#===============================================================================#
# AESTHETIC THEMING
#===============================================================================#
# Terner colors
terner_gray <- "#5B6770"
terner_blue <- "#4E748B"
terner_gold <- "#B7B09D"
terner_navy <- "#011E41"
terner_red <- "#E74C39"
# define theme
terner_theme <- function(...) {
theme(panel.background = element_blank(),
panel.grid.major = element_line(color = "gray75",
size = rel(0.75),
linetype = "dotted"),
text = element_text(family = "Lato", size = 11),
axis.text = element_text(size = 11),
legend.text = element_text(size = 10),
plot.title = element_text(size = 14),
plot.subtitle = element_text(size = 12),
axis.ticks.x = element_blank(),
legend.background = element_blank()) +
theme(...)
}
|
55b16f501179ddbfe977cd6c600cb5a1758a72bf
|
68c863b9d62695383d15e7ffdc8a13a38afafdc3
|
/Exploratory_Data_Analysis/W4_CourseProject_2/plot4.r
|
1d20c0bb8c7f6af22680bef1cea9b483abe455f3
|
[] |
no_license
|
tonoplast/datasciencecoursera
|
4349e99109df4189d4dc5b79c023919aa412622b
|
31373c3157307637e0e6fcaf53d23d454e18ab85
|
refs/heads/master
| 2020-08-05T21:26:30.389511
| 2020-04-15T08:27:48
| 2020-04-15T08:27:48
| 212,716,963
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,924
|
r
|
plot4.r
|
# Fine particulate matter (PM2.5) is an ambient air pollutant for which there is
# strong evidence that it is harmful to human health. In the United States,
# the Environmental Protection Agency (EPA) is tasked with setting national ambient
# air quality standards for fine PM and for tracking the emissions of this pollutant
# into the atmosphere. Approximatly every 3 years, the EPA releases its database on
# emissions of PM2.5. This database is known as the National Emissions Inventory (NEI).
# You can read more information about the NEI at the EPA National Emissions Inventory web site.
#
# For each year and for each type of PM source, the NEI records how many tons of PM2.5
# were emitted from that source over the course of the entire year. The data that you will
# use for this assignment are for 1999, 2002, 2005, and 2008.
## a placeholder for current directory (if re-running the code)
if (exists("pwd")) {
setwd(pwd)
}
# clear environment
rm(list=ls())
# packages used
library(dplyr)
library(ggplot2)
library(stringr)
## a placeholder for current directory
pwd <- getwd()
# set working directory
setwd("C:/Users/schung/Documents/Coursera/ExploratoryDataAnalysis_W4_Course_Project_2")
# downloading zip file
this_file <- "pm25_data.zip"
this_Url <- "https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2FNEI_data.zip"
download.file(this_Url, this_file, method= 'curl')
# unzip file
unzip(this_file)
# reading data
NEI <- readRDS("summarySCC_PM25.RDS") %>%
mutate(year = as.factor(year)) # making year factor to not mess with graph later
SCC <- readRDS("Source_Classification_Code.RDS") %>%
mutate(SCC = as.character(SCC)) # making SCC character to avoid warning when merging later
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~#
# Question 4.
# Across the United States, how have emissions from coal combustion-related sources changed from 1999–2008?
# Subset SCC dataset by selecting columns containing 'comb' (in SCC.Level.One) and 'coal' (in SCC.Level.Four) and attach it to NEI
NEI_SCC_coal_combust <- SCC %>%
select(SCC, SCC.Level.One, SCC.Level.Four) %>%
filter(str_detect(SCC.Level.One, fixed('comb', ignore_case=TRUE)) &
str_detect(SCC.Level.Four, fixed('coal', ignore_case=TRUE))) %>%
inner_join(NEI, by = c("SCC")) # joining data with NEI
png(filename = "plot4.png", width = 6, height = 6, units = "in", res = 300)
# ggplot
g <- ggplot(aes(x = year, y = Emissions / 10^5), data = NEI_SCC_coal_combust)
g+geom_bar(stat="identity") +
labs(x = "Year", y = expression("PM2.5 Emissions " ~ (10^{5} ~ " tons"))) +
labs(title = "PM2.5 Emissions from Coal Combustion-related Sources in the US") +
theme(plot.title = element_text(hjust = 0.5, size=11, face="bold"))
dev.off()
# Answer: The emissions from coal combustion-related sources have decreased from 1999 to 2008 in the US.
|
1a4dcc94b3f1bbb840bacda86b374b28533bef4d
|
6d0dad9419cea35d2c5f8472455efd6448c99e67
|
/tests/testthat/test-DGR.R
|
95b0a89e0ba7c2dfe3e40340681a8bebcb1e99f6
|
[
"MIT"
] |
permissive
|
ralmond/CPTtools
|
224f0d720adf4003bd3a3cf76ca5ba8d1f315305
|
8ecbacd3997dd8a566048ade135b6c836fcc54a2
|
refs/heads/master
| 2023-08-07T18:45:07.408649
| 2023-07-18T15:56:35
| 2023-07-18T15:56:35
| 239,853,879
| 2
| 1
|
MIT
| 2023-07-18T15:56:38
| 2020-02-11T20:07:01
|
R
|
UTF-8
|
R
| false
| false
| 302
|
r
|
test-DGR.R
|
test_that("calcDPCTable", {
})
test_that("calcDPCFrame", {
})
test_that("calcDPCTable GR", {
})
test_that("calcDPCTable PC", {
})
test_that("calcDPCTable PC local-Q", {
})
test_that("calcDPCTable normalLink no parents", {
})
test_that("calcDPCTable normalLink parents", {
})
|
6fefcde71a6f9c7c42b5b67227c3d3cab353a730
|
c52c9afa5dc0edf982b26fd2c6fba3d8ad5f3eeb
|
/R/alert20-package.r
|
739a1b33425dba04de8781b800e2ce0ec8885bda
|
[] |
no_license
|
kevin--zhao/alert20
|
3eaa615be9f0eab821f341bb024b9cf83d63cd7b
|
e5e76af8d02858145c8dfc54594d151874b85e9d
|
refs/heads/master
| 2016-09-05T22:30:21.695751
| 2015-01-05T10:04:25
| 2015-01-05T10:04:25
| 28,801,461
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 56
|
r
|
alert20-package.r
|
#' alert20
#'
#' @name alert20
#' @docType package
NULL
|
73298fccebaa1c5f0754da260c6123b286936fdb
|
34f5587daf6b1a7b34b9b694d900128923ec7d05
|
/R/ANN_predictor.R
|
6de0d44c14b5795ff9e8e6bf6eab1e04e897c0cf
|
[] |
no_license
|
BJLIYANLIANG/ids-1
|
2c6bd2ae6fc66fc4bc774b987193bfbb310b67ee
|
7c57610651c7c5aa494988a7a1b031e442477b98
|
refs/heads/master
| 2023-03-22T17:39:49.515208
| 2015-12-13T00:30:04
| 2015-12-13T00:30:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,556
|
r
|
ANN_predictor.R
|
#!/usr/bin/env Rscript
#---- Dependencies ----
dependencies <- c("neuralnet")
for (d in dependencies) {
#cat(d, "\n")
# Try to install if not found
if (!(d %in% rownames(installed.packages()))) {
install.packages(d)
library(d, character.only = TRUE)
}
#---- Config ----
configure <- function(day = 14) {
c <- NULL
c$day <- day
# net_file_prefix <- "M:/UiB/ATAI/UNB/net"
net_file_prefix <- "models/net_"
net_file_suffix <- ".rds"
c$net_file <- paste0(net_file_prefix, day, net_file_suffix)
c$input_colnames <- c(
"duration",
"protocol_type",
"service",
"flag",
"src_bytes",
"dst_bytes",
"land",
"wrong_fragment",
"urgent",
"count",
"srv_count",
"serror_rate",
"srv_serror_rate",
"rerror_rate",
"srv_rerror_rate",
"same_srv_rate",
"diff_srv_rate",
"srv_diff_host_rate",
"dst_host_count",
"dst_host_srv_count",
"dst_host_same_srv_rate",
"dst_host_diff_srv_rate",
"dst_host_same_src_port_rate",
"dst_host_srv_diff_host_rate",
"dst_host_serror_rate",
"dst_host_srv_serror_rate",
"dst_host_rerror_rate",
"dst_host_srv_rerror_rate",
"src_ip",
"src_port",
"dst_ip",
"dst_port",
"end_time"
)
# Column classes
ch <- "character"; nu <- "numeric"
c$input_colclasses <-
c(nu, ch, ch, ch, rep(nu, 24), ch, nu, ch, nu, ch)
# Overwrite global conf
conf <<- c
}
configure()
#---- Load files ----
#Load net & other things from training
load_net <- function(fname) {
if (missing(fname)) {
fname <- conf$net_file
}
cat("Loading net & preprocessing params from '", fname, "'...", sep = "")
# RDS is enough for one object
net <- readRDS(fname)
if (! ("day" %in% names(net)))
net$day <- conf$day
# Check presence of mandatory columns
mandatory_cols <- c("cols_used", "target_col", "neuralnet")
missing_cols <- setdiff(mandatory_cols, names(net))
if (length(missing_cols) != 0) {
stop(paste("Missing mandatory columns:", missing_cols))
}
cat("Done.\n\n")
return(net)
}
#---- Prediction ----
preprocess <- function(net, data) {
# This is maybe just an overhead for one row
data <- data[, names(data) %in% net$cols_used]
# Normalize some columns
norm_cols <- names(net$norm_mins)
data[,norm_cols] <-
as.data.frame(scale(
data[,norm_cols], center = net$norm_mins, scale = net$norm_scales
))
# Binarize protocol_type values
if ("levels_protocol_type" %in% names(net)) {
protocol_types = data.frame(Reduce(cbind,
lapply(net$levels_protocol_type,
function(x) {
(data$protocol_type == x) + 0
})))
names(protocol_types) = paste0("protocol_type_", net$levels_protocol_type)
data <- cbind(data, protocol_types)
# Remove original column
data$protocol_type <- NULL
}
# Binarize flag values
if ("levels_flag" %in% names(net)) {
flags = data.frame(Reduce(cbind,
lapply(net$levels_flag,
function(x) {
(data$flag == x) + 0
})))
names(flags) = paste0("flag_", net$levels_flag)
data <- cbind(data, flags)
# Remove original column
data$flag <- NULL
}
# Binarize service values
if ("levels_service" %in% names(net)) {
services = data.frame(Reduce(cbind,
lapply(net$levels_service,
function(x) {
(data$service == x) + 0
})))
names(services) = paste0("service_", net$levels_service)
data <- cbind(data, services)
# Remove original column
data$service <- NULL
}
return(data)
}
# Prediction
comp <- function(net, line) {
data <- read.table(
text = line, sep = ",",
colClasses = conf$input_colclasses,
header = FALSE,
col.names = conf$input_colnames
)
data <- preprocess(net, data)
results <- compute(net$neuralnet, data)
return(results)
}
#---- Test ----
test_it <- function() {
configure(14)
net <- load_net()
line <-
"0,tcp,http_443,OTH,55,0,0,0,0,0,11,0.00,0.00,0.00,0.00,0.00,0.00,1.00,0,11,0.00,0.00,0.00,1.00,0.00,0.00,0.00,0.00,192.168.0.2,33208,199.16.156.120,443,2015-11-28T17:38:59"
result <- comp(net$neuralnet, line)
print(result)
}
#test_it()
#---- Main ----
main <- function () {
isRStudio <- Sys.getenv("RSTUDIO") == "1"
if (isRStudio) {
stop("This should be run from command line, RStudio detected")
}
# Load nets
attacks=c("Local","DoS","DDoS","BFSSH")
days <- c(13,14,15,17)
nets <- NULL
for (i in days)
{
configure(i)
nets[[length(nets)+1]] <- load_net()
}
results <- NULL
f <- file("stdin")
open(f)
while (length(line <- readLines(f,n = 1)) > 0) {
results<-NULL
for (i in 1:length(days))
{
configure(nets[[i]]$day)
result <- comp(nets[[i]], line)
results[[length(results)+1]]=result$net.result
}
# Threshold
res <- ifelse(sum(results>0.95)==0,"Normal",attacks[which.max(results)])
#cat(line,",", sep="")
#print(results)
a <- paste(round(results, 8), sep=" ")
cat(res, a, "\n", sep=" ")
}
close(f)
cat("Dovi dopi ci")
}
main()
|
34cd55b23af8140e03aad4203dc1cb6d383f4994
|
1bfe8a5f4c781641ed35b230d90d2da1f9503ee4
|
/Renxu/evaluate_reproducibility_09.03.2017.R
|
41fabf031825676a892880d47b2f2be020752864
|
[] |
no_license
|
chang-che/Work
|
7b62814d0f718778fb6883e8f186ccc966ce6428
|
e9c94e58eb91d2af994287c1c229a5a486b29883
|
refs/heads/master
| 2020-03-10T20:50:25.235845
| 2018-05-06T19:16:07
| 2018-05-06T19:16:07
| 128,847,851
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,449
|
r
|
evaluate_reproducibility_09.03.2017.R
|
## compare the estimated log2FoldChange from DESeq2 output
library(glmnet)
load(file = '/export/home/xurren/WTCProject/Results/lfc.RData')
load(file='/export/home/pfkuan/WTCproject/Epigenetics/Results/Results_Feb2017_RNASeq/cv.fit_DemoCellAdj_17Feb2017.RData')
dataPathRNASeq <- '/export/home/pfkuan/WTCproject/Epigenetics/Data/RNASeq/ProcessedData_533Samples/'
load(file=paste(dataPathRNASeq,'ExonCounts533.RData',sep=''))
genecounts = exoncounts
genes448 = rownames(coef(cv.fit))[-1] ## get 448 genes used as input for glmnet
ind = which(coef(cv.fit)!=0)
genes30 = rownames(coef(cv.fit))[ind]
genes30 = genes30[-1] ## get 30 selected genes by glmnet
genes448_ind = ( rownames(genecounts) %in% genes448 )
genes30_ind = ( rownames(genecounts) %in% genes30 )
# we have the log fold change from:
# a) on subset of 195 training data from the 330 samples
# b) on subset of 135 test data
# c) on 330 samples
# d) on training data from 203 samples
# e) on test data from 203 samples
# f) on 203 samples
# (a) vs (b)
# 1) all the genes
cor(lfc195, lfc135, use = "pairwise.complete.obs")
# 2) 448 genes
cor(lfc195[genes448_ind], lfc135[genes448_ind], use = "pairwise.complete.obs")
# 3) 30 genes
cor(lfc195[genes30_ind], lfc135[genes30_ind], use = "pairwise.complete.obs")
# (d) vs (e)
# 1) all the genes
cor(lfc102, lfc101, use = "pairwise.complete.obs")
# 2) 448 genes
cor(lfc102[genes448_ind], lfc101[genes448_ind], use = "pairwise.complete.obs")
# 3) 30 genes
cor(lfc102[genes30_ind], lfc101[genes30_ind], use = "pairwise.complete.obs")
# (c) vs (f)
# 1) all the genes
cor(lfc330, lfc203, use = "pairwise.complete.obs")
# 2) 448 genes
cor(lfc330[genes448_ind], lfc203[genes448_ind], use = "pairwise.complete.obs")
# 3) 30 genes
cor(lfc330[genes30_ind], lfc203[genes30_ind], use = "pairwise.complete.obs")
# log2FoldChange for FKBP5
lfc195[which(rownames(genecounts)=="FKBP5")]
lfc135[which(rownames(genecounts)=="FKBP5")]
lfc330[which(rownames(genecounts)=="FKBP5")]
lfc102[which(rownames(genecounts)=="FKBP5")]
lfc101[which(rownames(genecounts)=="FKBP5")]
lfc203[which(rownames(genecounts)=="FKBP5")]
##########################################################################################################################
# sign of lfc
# (a) vs (b)
table(train = (lfc195 > 0), test = (lfc135 > 0))
# (d) vs (e)
table(train = (lfc102 > 0), test = (lfc101 > 0))
# (c) vs (f)
table(train = (lfc330 > 0), test = (lfc203 > 0))
|
88707f0e56873d5297109bce44168340a523eadb
|
a66ce47010e69b5279e5c3df0b619a1fa27b7f41
|
/set up.R
|
1b1786450a5bca6ffe79a3f1c1ff698e85dbd2c8
|
[] |
no_license
|
Moohan/mode_of_birth_dashboard
|
b9696b3d2d20317378e3c88f34edf426e9c5c1a6
|
7a310287c58224b56164264e310b3e787a6b43d5
|
refs/heads/master
| 2020-05-28T06:43:45.785966
| 2019-09-30T15:13:11
| 2019-09-30T15:13:11
| 188,911,929
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 779
|
r
|
set up.R
|
library(tidyverse)
library(janitor)
library(tidylog)
data <-
read_csv(
"https://www.opendata.nhs.scot/dataset/df10dbd4-81b3-4bfa-83ac-b14a5ec62296/resource/8654b6d8-9765-4ced-8e9b-4611aa4596eb/download/12.1_delivery.csv"
)
ca_names <-
read_csv(
"https://www.opendata.nhs.scot/dataset/9f942fdb-e59e-44f5-b534-d6e17229cc7b/resource/967937c4-8d67-4f39-974f-fd58c4acfda5/download/geography_codes_and_labels_ca2011_01042019.csv"
) %>%
clean_names() %>%
group_by(ca2011) %>%
summarise(
council = first(ca2011name),
health_board = first(hb2014name)
) %>%
ungroup()
data <- data %>%
clean_names() %>%
select(-simd_quintile_qf, -simd_version) %>%
filter(ca2011 != "RA2704") %>%
left_join(ca_names, by = "ca2011")
write_rds(data, "data.rds")
|
cdc576174df3cde34f6eca15a803e37abb3545ed
|
7204e3bfeea08327b4bda576b082b9dd5e254046
|
/man/BootstrapEdgeDistn.Rd
|
6d86103de5d956f9441965df4342d5a0a601a007
|
[] |
no_license
|
EpidemiologyDVM/duke-rnet-quick
|
fd2717c3146b2b4a38af494820bd28b1033a3d80
|
b979af770522aff93d736c0d0c4a9f0f7b17c0a7
|
refs/heads/main
| 2023-05-01T16:26:25.930895
| 2021-05-26T16:35:46
| 2021-05-26T16:35:46
| 371,102,574
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,469
|
rd
|
BootstrapEdgeDistn.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/Bootstrap_Edge_distn.R
\name{BootstrapEdgeDistn}
\alias{BootstrapEdgeDistn}
\title{Penalized Partial Correlation Distribution Estimates}
\usage{
BootstrapEdgeDistn(
x,
L1,
vertices = NULL,
subset = NULL,
B = 500,
n_b = 0.5,
replace = TRUE,
seed = NULL,
...
)
}
\arguments{
\item{x}{dataset for estimating MRFs. Supplied as data.frame.}
\item{L1}{Regularization penalty for inducing sparsity in networks}
\item{vertices}{Vertices to include in MRF. Must be a subset of names(x)}
\item{subset}{An expression to select a subset of records/rows}
\item{B}{The number of subsamples to draw}
\item{n_b}{The size of the subsamples. May be provided as an integer less than the number of rows in x, or as a proportion.}
\item{replace}{Logical. Is subsampling done with (T) or without (F) replacement.}
\item{seed}{Random seed value for reproducibility.}
\item{...}{other arguments to be passed to Rnet().}
}
\value{
A numeric matrix containing the estimated penalized partial correlations corresponding to the MRF edges (column) in each subsample (row).
}
\description{
Estimates MRFs from bootstrapped subsamples of data to approximate penalized partial correlation distibutions
}
\examples{
BootstrapEdgeDistn(
x = NARMS_EC_DATA,
L1 = 0.25,
vertices = c('AMP', 'AMC', 'AXO', 'FOX', 'TIO', 'TET', 'CHL', 'GEN', 'STR'),
subset = expression(Year == 2009)
)
}
|
cf43b12ca153d1980a1cf70a0bab35d75ee10ce4
|
94b0d2bfb62c711572df8198b2f1529e8ce7927c
|
/experimenting with dist mat.R
|
3094278cec6e39d1292de89f7cc161b58a354349
|
[] |
no_license
|
GeneralRaspberry/Breaking-down-the-epidemic-code-for-the-purpose-of-diagnosing-the-dispersal-kernel-Chapter-1-
|
1b2e03ffa82eebfa99cc77175c7a028fc1c44c5b
|
45ba019509539b76d6a4c235ae6e059e630e3ca6
|
refs/heads/main
| 2023-05-31T01:34:16.324493
| 2021-06-13T18:51:48
| 2021-06-13T18:51:48
| 374,421,096
| 0
| 0
| null | 2021-06-13T17:10:02
| 2021-06-06T17:21:20
|
R
|
UTF-8
|
R
| false
| false
| 5,310
|
r
|
experimenting with dist mat.R
|
packagedelivery<-function(fry,leela){
if(fry == TRUE){
require(leela,character.only = TRUE)
} else{
x<-grepl(leela,search())
n<-0
for (j in x){
n<-n+1
if (j == TRUE){
detach(pos=n, unload=TRUE, character.only = TRUE)
}
}
}
}
packagedelivery(TRUE,"spatstat")
packagedelivery(TRUE, "dplyr")
packagedelivery(TRUE, "ggplot2")
packagedelivery(TRUE, "ggpubr")
packagedelivery(TRUE,"RColorBrewer")
packagedelivery(TRUE, "rdist")
#generate a marks object
radiusCluster<-100
lambdaParent<-.02
lambdaDaughter<-30
randmod<-1
hosts<-1000
dim<-2000
numbparents<-rpois(1,lambdaParent*dim)
xxParent<-runif(numbparents,0+radiusCluster,dim-radiusCluster)
yyParent<-runif(numbparents,0+radiusCluster,dim-radiusCluster)
numbdaughter<-rpois(numbparents,(lambdaDaughter))
sumdaughter<-sum(numbdaughter)
#theta<-2*pi*runif(sumdaughter)
thetaLandscape<-2*pi*runif(sumdaughter)
rho<-radiusCluster*sqrt(runif(sumdaughter))
# xx0=rho*cos(theta)
# yy0=rho*sin(theta)
xx0=rho*cos(thetaLandscape)
yy0=rho*sin(thetaLandscape)
xx<-rep(xxParent,numbdaughter)
yy<-rep(yyParent,numbdaughter)
xx<-xx+xx0
yy<-yy+yy0
cds<-data.frame(xx,yy)
is_outlier<-function(x){
x > dim| x < 0
}
cds<-cds[!(is_outlier(cds$xx)|is_outlier(cds$yy)),]
while (nrow(cds)<hosts){
dif<-hosts-nrow(cds)
extraparentxx<-sample(xxParent,dif,replace = TRUE)
extraparentyy<-sample(yyParent,dif,replace = TRUE)
extrathetaLandscape<-2*pi*runif(dif)
extrarho<-radiusCluster*sqrt(runif(dif))
newextracoodsxx<-extrarho*cos(extrathetaLandscape)
newextracoodsyy<-extrarho*sin(extrathetaLandscape)
extraxx<-extraparentxx+newextracoodsxx
extrayy<-extraparentyy+newextracoodsyy
cdsextra<-data.frame(xx=extraxx,yy=extrayy)
cds<-rbind(cds,cdsextra)
}
#cds<-rbind(cds,cdsextra)
sampleselect<-sample(1:nrow(cds),hosts,replace=F)
cds<-cds%>%slice(sampleselect)
randfunction<-function(x){
x<-runif(length(x),0,dim)
}
randselect<-sample(1:nrow(cds),floor(hosts*randmod),replace=F)
cds[randselect,]<-apply(cds[randselect,],1,randfunction)
landscape<-ppp(x=cds$xx,y=cds$yy,window=owin(xrange=c(0,dim),yrange=c(0,dim)))
marks(landscape) <- sample(c(TRUE, rep(FALSE, hosts-1)))
###################################changing the status of a mark#############################################
##################################using pairdist##############################################################
#dist.mat <- exp(-pairdist(ppp)^b / theta^b)
#diag(dist.mat) <- NA
landscape$marks[16]<-TRUE
landscape$marks[502]<-FALSE
dist.mat<-pairdist(landscape)
dl<-data.frame(landscape)
which(landscape$marks)
dist.mat.refined<-dist.mat[landscape$marks,]
dl<-cbind(dl,dist.mat.refined)
#################################plotting with ggplot########################################################
################using colour brewer#########################################################################
myPalette <- colorRampPalette(brewer.pal(11, "Spectral"))
ggplot(dl)+geom_point(aes(x,y,colour=dist.mat.refined))+coord_equal()+theme_minimal()+
scale_color_gradientn(colors = myPalette(1000))
#################################creating a dispersal kernel################################################
dispersalgraphgenerator<-function (x,theta,beta,normtype){
dist.mat<-pairdist(landscape)
dl<-data.frame(landscape)
dist.mat.refined<-dist.mat[landscape$marks,]
dl<-cbind(dl,dist.mat.refined)
dist.mat.kernel<-exp(-dist.mat/theta)*beta
dist.mat.kernel.refined<-dist.mat.kernel[landscape$marks,]
dl<-cbind(dl,dist.mat.kernel.refined)
if (normtype==1){
normkernel<-dist.mat.kernel.refined*normfactor
dl<-cbind(dl,normkernel,normfactor)
} else {
denominator <- 0
for(i in 1:length(landscape$marks))
{
for(j in 1:length(landscape$marks))
{
if(i != j)
{
denominator <- denominator + dist.mat.kernel[i,j]
}
}
}
normFactor2 <- length(landscape$marks) / denominator
normkernel<-dist.mat.kernel.refined*normFactor2
dl<-cbind(dl,normkernel,normFactor2)
}
}
normtype<-2
theta<-500
beta<-50
alphasqr<-1/(theta*theta)
normfactor<-alphasqr*1/(2*pi)
dl<-dispersalgraphgenerator(landscape,theta,beta,normtype)
plot_data_column<-function(data,column){
ggplot(data)+geom_point(aes(x,y,colour=column))+coord_equal()+theme_minimal()+
scale_color_gradientn(colors=myPalette(1000))
}
myplots<-lapply(dl[,4:7], plot_data_column, data=dl)
plot.theta500.beta50<-ggarrange(myplots[[1]],myplots[[2]],myplots[[3]],myplots[[4]],nrow = 2,ncol = 2)
ggsave("theta500beta50.png",plot.theta500.beta50,width=50,height = 50, units= "cm")
################################recognising that the dist.mat function is simply an index call#############
landscape$marks[16]<-TRUE
landscape$marks[725]<-TRUE
dist.mat<-pairdist(landscape)
dist.mat.refined<-data.frame(dist.mat[landscape$marks,!landscape$marks])
##############################checking pdist functionality################################################
xtest<-c(3,4,56,6,4,46,4,4,6,4,5,64,4,5)
ytest<-c(3,4,56,6,4,46,4,4,6,4,5,64,4,5)
dftest<-data.frame(xtest,ytest)
distcheck<-pdist(dftest)
|
7c9cfd44edc929c07092d66797bd845954de9ac1
|
11e0c11a69bed57c00859cb80b61a41d437c06f3
|
/users/analysis/SUS/non_param_tests.R
|
18c56ab19ec04f7e460b2b4aa0d9bb2d96813cc6
|
[] |
no_license
|
janisso/BeatBopper
|
7771a12b9c791873d3f8f0b2dd0dcff8a971212f
|
5bea60bbb273b49caea71de70978bf4cd08b67cf
|
refs/heads/master
| 2022-03-04T12:29:26.167493
| 2019-11-20T10:24:45
| 2019-11-20T10:24:45
| 157,095,197
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 606
|
r
|
non_param_tests.R
|
#NON parametric tests
#taken from http://rcompanion.org/handbook/F_01.html
rm(list=ls(all=TRUE))
.rs.restartR()
library(psych)
library(effsize)
library(coin)
library(dplyr)
library(BSDA)
A = Naive#c(2,4,3,1,2,3,3,2,3,1)
B = Phase#c(3,5,4,2,4,3,5,5,3,2)
cliff.delta(A, B)
wilcox.test(A,B)
g = factor(c(rep("A", length(A)), rep("B", length(B))))
v = c(A, B)
#r = rank(v)
wilcox_test(v ~ g, distribution="exact")
r = rank(v)
dat = data.frame(g, r)
lapply((split(dat, dat$g)), mean)
rA = dat$r[dat$g=="A"]
rB = dat$r[dat$g=="B"]
mean(rA)
mean(rB)
2.3369/sqrt(20)
SIGN.test(Phase,
md = 3)
|
3bbd82b3cae4dd6a218bf7693f0c00678bb646ee
|
4caeaa501d9497ddddbf033904e019974e0e1b7e
|
/assets/2.temporal_variation.R
|
8d1e59fc49005a81e8983c06a51cdb11c859633c
|
[] |
no_license
|
macroevolution/workshop-OSU
|
b545e8612cb8d0185ec78e711c5e23482be04b09
|
a43b40a6868a4a2730f86eac1ea9470763d1517a
|
refs/heads/master
| 2021-01-11T14:01:22.563366
| 2017-09-05T16:59:19
| 2017-09-05T16:59:19
| 94,931,335
| 3
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 980
|
r
|
2.temporal_variation.R
|
#----------------------------------------------
# Exercise 4: Gamma statistic
warbs <- read.tree("data/warblers/warbs.tre")
gammaStat(warbs)
skinks <- read.tree("data/skinks/skinks216.tre")
gammaStat(skinks)
#-----------------------------------------------
# Exercise 4b: Simulate trees with time-dependent
# speciation and extinction rates
#
library(TESS)
lambda_fx <- function(x) return(1 * exp(x * -0.2))
# look at the rates that this function would generate:
tvec <- seq(0, 10, length.out=100)
rates <- lambda_fx(tvec)
plot(rates ~ tvec)
# We can simulate trees conditioned on a particular age:
tree <- tess.sim.age(n=1, age = 10, lambda = lambda_fx, mu = 0)
plot(tree[[1]])
gammaStat(tree[[1]])
# We can simulate trees dependent on a certain number of tips:
# argument max is the maximum possible age of the tree we allow:
tree2 <- tess.sim.taxa(n=1, nTaxa = 100, lambda = lambda_fx, mu = 0, max=25)
gammaStat(tree2[[1]])
|
a91ff6be94c4ffd82a35b06a14caf7427b650446
|
90f1fe26d2c514e8e07e9afb668a5bc7e48a8dce
|
/Complex_Predictors_Demo.R
|
ef96da76822c7a12229b9586c74021e34692bd93
|
[] |
no_license
|
awahl1/Stats_Course_Supplements
|
b7889c29fd732620d9594ab073747dd1e8d5edeb
|
4dd9903597394c9d261e6e7c7f924851216eecc0
|
refs/heads/master
| 2021-06-19T21:47:05.264559
| 2017-07-26T14:07:23
| 2017-07-26T14:07:23
| 98,427,959
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,762
|
r
|
Complex_Predictors_Demo.R
|
library(ggplot2)
library(car)
library(pastecs)
library(effects)
library(gvlma)
###Interaction between two interval/ratio predictors (multiple regression)
#Outcome variable is how crazy someone is
#Predictor 1: how many beers per week that person drinks
#Predictor 2: how many years they've been working at the university
crazy_data <-read.table("crazy_data.csv",sep="\t",header=TRUE,comment.char="",quote="")
head(crazy_data)
#We have added an interaction between two predictors using the ":"
crazy_model <- lm(craziness ~ beers_per_week + years_working_at_uni + beers_per_week:years_working_at_uni, data=crazy_data)
summary(crazy_model)
#Relationship between coefficients and prediction; how crazy are you after 10 beers when you've worked for 15 years?
# y = b0 + b1*beers_per_week + b2*years_working_at_uni + b3*beers_per_week*years_working_at_uni
1997.56 + 30.14*10 + 17.42*15 + -3.03*10*15
#Must drop an interaction before the main effects that make it up
drop1(crazy_model, test="F")
###Check Assumptions!
durbinWatsonTest(crazy_model)
#homoscedasticity normality, and high influence points
par(mfrow=c(2,2))
par(mar=c(4,4,4,4))
par(mfrow=c(1,1))
plot(crazy_model)
#No severe multicollinearity
vif(crazy_model) #Don't worry about multicollinearity for interaction; it's going to be high since the interaction is a product of 2 predictors which were already included in the model as separate terms
###Graphing
plot(allEffects(crazy_model))
#Create vectors of beers_per_week and years_working_at_uni values
xgrid <- seq(min(crazy_data$beers_per_week), max(crazy_data$beers_per_week), 0.5)
ygrid <- seq(min(crazy_data$years_working_at_uni), max(crazy_data$years_working_at_uni), 0.5)
#create dataframe of all possible combinations of the values in these two vectors
all_combos <- expand.grid(beers_per_week=xgrid, years_working_at_uni=ygrid)
head(all_combos)
predicted_outcomes <- predict(crazy_model, newdata=all_combos)
#Put the predicted outcomes into the data frame as a new column
all_combos$craziness <- predicted_outcomes
head(all_combos)
#Now we plot!
ggplot(all_combos, aes(x=beers_per_week, y=years_working_at_uni, z=craziness)) + geom_tile(aes(fill = craziness)) + scale_fill_gradient(low="white", high="black") + labs(x="Beers per Week", y="Years Working at University")
#Note that main effects can't be interpreted independent of the interaction
#Reporting: The final model’s formula was craziness ~ beers_per_week + years_working_at_uni + beers_per_week:years_working_at_uni. All main effects and the two-way interaction were very significant: p<0.001. When years_working_at_uni was at its baseline level (i.e., 0), there was a positive relationship between beers_per_week and craziness. However, this relationship eventually reversed as years_working_at_uni increased. The model was highly significant overall (F(3,96)=36060, p<0.001) and achieved a high variance explanation (mult. R2=0.9991, adj. R2=0.9991). All regression coefficients, as well as their standard errors, t scores, and p-values, are provided in the appendix, and checking of model assumptions revealed no problems.
###Numerical + categorical predictor example
RTs <- read.table("RTs.csv", header=T, sep="\t", row.names=1) # </_inputfiles/05-2_reactiontimes.csv>
head(RTs)
#check levels; hi is ref level since it is alphabetically first
levels(RTs$FAMILIARITY)
#change default level to lo
RTs$FAMILIARITY <- relevel(RTs$FAMILIARITY, "lo"); levels(RTs$FAMILIARITY)
contrast1 <- c(-2, 1, 1)
contrast2 <- c(0, -1, 1)
contrasts(RTs$FAMILIARITY) <- cbind(contrast1,contrast2)
contrasts(RTs$FAMILIARITY)
# contrast1 contrast2
#lo -2 0
#hi 1 1
#med 1 -1
rt_model_planned <- lm(RT ~ FREQUENCY + FAMILIARITY + FREQUENCY:FAMILIARITY, data=RTs)
summary(rt_model_planned)
#How do we use the coefficients to make predictions about the outcome variable?
#644.244 + -16.091*FREQUENCY + -26.859*contrast1 ... 8.320*FREQUENCY*contrast1
#What does the p-value of the coefficient (beta) of contrast 1 tell us?
#What does the p-value of the coefficient (beta) of FREQUENCY:FAMILIARITYcontrast1 tell us?
plot(allEffects(rt_model_planned))
#Model selection
drop1(rt_model_planned, test="F")
rt_model_planned_2 <- lm(RT ~ FREQUENCY + FAMILIARITY, data=RTs)
summary(rt_model_planned_2)
plot(allEffects(rt_model_planned_2))
drop1(rt_model_planned_2, test="F")
#Remember that when we used dummy contrasts, we violated the multicollinearity assumption here, which could inflate our standard errors. Let's check it again.
vif(rt_model_planned_2)
#And we can get statistics for the whole FAMILIARITY predictor:
Anova(rt_model_planned_2, type="III")
#Reporting: The final model’s formula was RT ~ FREQUENCY + FAMILIARITY. The overall effect of FAMILIARITY was significant (p=0.015). However, the only planned contrast that was significant was between the low familiarity condition and the medium and high familiarity conditions (p=0.004). Specifically, reaction times in the medium and high familiarity conditions were lower than in the low condition. While the reaction times in the medium condition were higher than in the high condition, this difference was not significant (p=0.52). There was also a marginally significant effect of FREQUENCY (p=0.051); as Frequency increased, reaction times decreased. Finally, the model was significant overall (F(3,51)=6.959, p<0.001) but did not explain a large amount of variance (mult. R2=0.2904, adj. R2=0.2487). All regression coefficients, as well as their standard errors, t scores, and p-values, are provided in the appendix. In addition, the appendix includes the results of an F test to investigate the overall effect of the FAMILIARITY predictor. Checking of model assumptions revealed no problems.
###2 Binary predictors
Goggles <- read.table("goggles.csv", sep=",", header=T, comment.char="", quote="")
head(Goggles)
Goggles <- subset(Goggles, alcohol!="2 Pints")
levels(Goggles$alcohol) <- c("4 pints","4 pints","none")
Goggles$alcohol <- relevel(Goggles$alcohol, "none")
goggles_model <- lm(attractiveness ~ gender + alcohol + gender:alcohol, data=Goggles)
summary(goggles_model)
#By now, we've talked about contrasts several times
#Contrasts are just numbers we assign to levels of our categorical predictors (female = 0, male = 1)
#They allow us to place our non-numerical categories in a numerical multi-dimensional space
#We want to do this so we can connect our categories' means with lines
#The slopes of these lines tell us about how different the means are
#
#Different kinds: dummy, custom
#R uses dummy contrasts by default--THIS IS FINE FOR FINAL ASSIGNMENT
#With dummy contrasts, REFERENCE CATEGORY is at position 0 and each non-reference category is at position 1 on its own axis
#Thus, every slope (coefficient) of a main effect represents the difference between the reference category mean and the mean of that category, when the other main effects are at their reference levels as well
#
#Intercept: all predictors at their reference level
#genderMale: slope (difference) from female to male WHEN ALCOHOL AT ITS REFERENCE LEVEL
#alcohol4 pints: slope (difference) from none to 4 pints when ALCOHOL AT ITS REFERENCE LEVEL
#Well what about difference from female to male when alcohol = 4 pints?
#Or, what about difference from none to 4 pints when gender = male?
#If there is not a significant interaction, then there is no difference in the slope
#If there is a significant interaction, then the slope must be adjusted by the value of the interaction coefficient
plot(allEffects(goggles_model))
#Note that our x-axis is gender and our grouping variable is alcohol
#In the left panel, the line represents the difference between males and females when alcohol=none (this corresponds to 6.250 in the table)
#In the right panel, the line represents the difference between females and males when alcohol=4 pints. We don't get a coefficient for this in our table. Rather, to get this line's slope, we subtract:
6.250-28.125
#-21.875
#This is a really easy plot to generate and gives you lots of information, such as interactions.
#However, it arbitrarily chooses an x-axis variable and a grouping variable.
#But the interaction coefficient is an adjustment on both main effects.
#Thus, the difference between no alcohol and 4 pints when gender=male would be:
-3.125-28.125
#-31.25
#It's easier to see this relationship if we put alcohol on the x-axis and use gender as the grouping variable
ggplot(Goggles, aes(alcohol, attractiveness)) + stat_summary(fun.y=mean, geom="point") + stat_summary(fun.y=mean, geom="line", aes(group=1)) + stat_summary(fun.data=mean_cl_boot, geom="errorbar", width=0.2, colour="Blue") + facet_wrap(~gender)
#means for each combination of factor levels
by(Goggles$attractiveness, list(Goggles$gender, Goggles$alcohol), stat.desc)
#female none: 60.625
#male none: 66.875
#female 4 pints: 57.5
#male 4 pints: 35.625
35.625-66.875
35.625-57.5
#Problem with dummy contrasts: Significance of coefficients can change depending on what categories you use as your reference categories.
drop1(goggles_model, test="F")
#Can't drop anything, but if we were going to, we would remove the interaction
goggles_model_2 <- lm(attractiveness ~ gender + alcohol, data=Goggles); summary(goggles_model_2)
drop1(goggles_model_2, test="F")
#The final model’s formula was attractiveness ~ gender + alcohol + gender:alcohol. The main effects were not sigificant (p>0.5), though the interaction between gender and alcohol was significant (p<0.001). Specifically, compared to females, males showed a significant decrease in the attractiveness of the people they talked to as they moved from no beers to 4 beers. Put another way, compared to no beers, attractiveness of the people talked to when 4 beers were consumed decreased from females to males. The model was highly significant overall (F(2,29)=11.26, p<0.001, mult. R2=0.4371, adj. R2=0.3983). All regression coefficients, as well as their standard errors, t scores, and p-values, are provided in the appendix. Checking of model assumptions revealed no problems.
###Nominal Predictor + Binary Predictor
Goggles <- read.table("goggles.csv", sep=",", header=T, comment.char="", quote="")
head(Goggles)
levels(Goggles$alcohol) #2 pints is the baseline; switch to None
Goggles$alcohol <- relevel(Goggles$alcohol, "None"); levels(Goggles$alcohol)
levels(Goggles$gender) #Female is the reference level; this is fine
###Dummy contrasts
goggles_model <- lm(attractiveness ~ gender + alcohol + gender:alcohol, data=Goggles)
summary(goggles_model)
plot(allEffects(goggles_model))
#None versus 2 pints interaction plots
Goggles_2p <- subset(Goggles, alcohol!="4 Pints")
line_plot <- ggplot(Goggles_2p, aes(alcohol, attractiveness, colour=gender)) + stat_summary(fun.y=mean, geom="point") + stat_summary(fun.y=mean, geom="line", aes(group=gender)) + stat_summary(fun.data=mean_cl_boot, geom="errorbar", width=0.2, aes(group=gender)); line_plot
#Can also place gender on the x-axis and group by amount of alcohol
line_plot <- ggplot(Goggles_2p, aes(gender, attractiveness, colour=alcohol)) + stat_summary(fun.y=mean, geom="point") + stat_summary(fun.y=mean, geom="line", aes(group=alcohol)) + stat_summary(fun.data=mean_cl_boot, geom="errorbar", width=0.2, aes(group=alcohol)); line_plot
#None versus 4 pints interaction plots
Goggles_4p <- subset(Goggles, alcohol!="2 Pints")
line_plot <- ggplot(Goggles_4p, aes(alcohol, attractiveness, colour=gender)) + stat_summary(fun.y=mean, geom="point") + stat_summary(fun.y=mean, geom="line", aes(group=gender)) + stat_summary(fun.data=mean_cl_boot, geom="errorbar", width=0.2, aes(group=gender)); line_plot
###Homogeneity of variance assumption
leveneTest(Goggles$attractiveness, interaction(Goggles$alcohol, Goggles$gender), center=median)
###Model selection
drop1(goggles_model, test="F")
###Overall p-values for the predictors
Anova(goggles_model, type="III")
###Reporting for dummy contrasts
#The final model’s formula was attractiveness ~ gender + alcohol + gender:alcohol. The main effects were not sigificant (p>0.5), though the interaction between gender and alcohol was significant (p<0.001). Planned contrasts revealed no significant differences between consuming no beer and 2 pints, and between consuming no beer and 4 pints. There was also no significant interaction between gender and consuming no beer versus 2 beers. However, there was a significant interaction between gender and consuming no beers versus 4 beers; specifically, compared to females, males showed a significant decrease in the attractiveness of the people they talked to as they moved from no beers to 4 beers. The model was highly significant overall (F(5,42)=13.2, p<0.001) and achieved a high variance explanation (mult. R2=0.6111, adj. R2=0.5648). All regression coefficients, as well as their standard errors, t scores, and p-values, are provided in the appendix. In addition, the appendix includes the results of F tests to investigate the overall effects of the 2 predictors. Checking of model assumptions revealed no problems.
###Planned contrasts
F_vs_M <- c(-1, 1)
contrasts(Goggles$gender) <- cbind(F_vs_M); contrasts(Goggles$gender)
None_vs_two_and_four <- c(-2, 1, 1)
two_vs_four <- c(0, -1, 1)
contrasts(Goggles$alcohol) <- cbind(None_vs_two_and_four, two_vs_four); contrasts(Goggles$alcohol)
goggles_model_planned <- lm(attractiveness ~ gender + alcohol + gender:alcohol, data=Goggles)
summary(goggles_model_planned)
#Linear model equation
#58.33 + -1.875*F_vs_M + -2.708*None_vs_two_and_four + -9.062*two_vs_four + -2.5*F_vs_M*None_vs_two_and_four + -6.562*F_vs_M*two_vs_four
#How do we actually interpret the coefficients of interactions?
#-2.5*F_vs_M*None_vs_two_and_four
#Female None
-2.5*-1*-2
#Male None
-2.5*1*-2
#Female 2 pints
-2.5*-1*1
#Male 2 pints
-2.5*1*1
###In sum, interaction coefficients can be very hard to interpret just by looking at them! They are NOT slopes! Also helpful to look at plots and means!
###Plots
#Create a new factor corresponding to "None" vs "Alcohol"
head(Goggles)
Goggles$None_vs_alc <- rep("None",dim(Goggles)[1])
indexes <- which(Goggles$alcohol!="None")
Goggles$None_vs_alc[indexes] <- "Alcohol"
Goggles$None_vs_alc <- factor(Goggles$None_vs_alc, levels=c("None", "Alcohol"))
#F_vs_M:None_vs_two_and_four
line_plot <- ggplot(Goggles, aes(None_vs_alc, attractiveness)) + stat_summary(fun.y=mean, geom="point") + stat_summary(fun.y=mean, geom="line", aes(group=1)) + stat_summary(fun.data=mean_cl_boot, geom="errorbar", width=0.2); line_plot
line_plot + stat_summary(fun.y=mean, geom="point", aes(group=gender, colour=gender)) + stat_summary(fun.y=mean, geom="line", aes(group=gender, colour=gender)) + stat_summary(fun.data=mean_cl_boot, geom="errorbar", width=0.2, aes(group=gender, colour=gender))
###F_vs_M:two_vs_four
Goggles_alc <- subset(Goggles, alcohol!="None")
head(Goggles_alc)
line_plot_2 <- ggplot(Goggles_alc, aes(alcohol, attractiveness)) + stat_summary(fun.y=mean, geom="point") + stat_summary(fun.y=mean, geom="line", aes(group=1)) + stat_summary(fun.data=mean_cl_boot, geom="errorbar", width=0.2); line_plot_2
line_plot_2 + stat_summary(fun.y=mean, geom="point", aes(group=gender, colour=gender)) + stat_summary(fun.y=mean, geom="line", aes(group=gender, colour=gender)) + stat_summary(fun.data=mean_cl_boot, geom="errorbar", width=0.2, aes(group=gender, colour=gender))
by(Goggles$attractiveness, Goggles$gender, stat.desc)
#Female mean: 60.2083
#Male mean: 56.4583
by(Goggles$attractiveness, Goggles$alcohol, stat.desc)
#None mean: 63.75
#2 pints mean: 64.6875
#4 pints mean: 46.5625
#Helpful to have the mean of 2 and 4 pint conditions since the None_vs_two_and_four contrast collapses these two levels
(64.6875 + 46.5625)/2
#55.625
by(Goggles$attractiveness, list(Goggles$gender, Goggles$alcohol), stat.desc)
#Female 2 pints: 62.5
#Female 4 pints: 57.5
#Female 2 and 4 pints:
(62.5+57.5)/2
#60
#Male 2 pints: 66.875
#Male 4 pints: 35.625
#Male 2 and 4 pints:
(66.875+35.625)/2
#51.25
#Female None: 60.625
#Male None: 66.875
#Get p-values for whole predictors; note t test results are same as f test results for a single degree of freedom
Anova(goggles_model_planned, type="III")
#Reporting
#The final model’s formula was attractiveness ~ gender + alcohol + gender:alcohol. The main effect of gender was not sigificant (p>0.05) while the main effect of alcohol was significant (p<0.001). Also, the interaction between gender and alcohol was significant (p<0.001). Planned contrasts revealed significant differences between consuming no beer versus consuming 2 or 4 pints, as well as between consuming 2 versus 4 pints. In both cases, drinking more beer led to a decrease in attractiveness of the person being talked to.
#However, these main effects must be interpreted in the context of significant interactions between both of these effects and the effect of gender. Specifically, for the contrast between no alcohol and 2 or 4 pints, the negative trend becomes more severe for men, but less severe for women. In fact, examining the interaction plots and confidence intervals suggests that, for females, the negative trend disappears altogether.
#For the contrast between 2 a 4 pints, again the negative trend becomes more severe for men and less severe for females. And again, the overlap between the confidence intervals for females in the 2 pint and 4 pint conditions suggests that the negative trend is not even reliable.
#Finally, the model overall was significant (F(5,42)=13.2, p<.001) and achieved a high level of variance explanation (multiple r2=0.6111, adjusted r2=0.5648). All regression coefficients, as well as their standard errors, t scores, and p-values, are provided in the appendix. In addition, the appendix includes the results of F tests to investigate the overall effects of the 2 predictors, as well as all the means under comparison. Checking of model assumptions revealed no problems.
###Random Intercepts
PressureData <- read.table("BloodPressure.csv", sep="\t", header=T, comment.char="", quote="")
head(PressureData)
#Subjects measured their blood pressure during the week leading up to an election.
ggplot(PressureData, aes(Hour, BloodPressure)) + geom_point() + geom_smooth(method="lm")
#What is the slope and intercept of the line of best fit?
summary(lm(BloodPressure~Hour, data=PressureData))
#Problem: violation of independence assumption! (each subject provides multiple data points)
#Therefore, variance is shared across datapoints; some people may just have higher/lower baseline blood pressure.
#To account for this, we can give each subject their own intercept (and thus their own regression line).
#Run a mixed model with random intercepts
library(lme4)
library(lmerTest)
m2 <- lmer(BloodPressure ~ Hour + (1|Subject), data=PressureData)
summary(m2)
#Calculate r2
library(MuMIn)
r.squaredGLMM(m2)
#you can get the random intercepts like this
coef(m2)
ggplot(PressureData, aes(Hour, BloodPressure, color=Subject)) + geom_point() + geom_smooth(method="lm") + geom_abline(intercept=65.72, slope=.59, color="red") + geom_abline(intercept=89.86, slope=.59, color="red") + geom_abline(intercept=101.13, slope=.59, color="red") + geom_abline(intercept=156.29, slope=.59, color="red") + geom_abline(intercept=128.95, slope=.59, color="red") + geom_abline(intercept=108.39, slope=.59, color="green")
#Can you only use random intercepts for subjects? NO!
#items are often also treated as a random effect
#e.g., 20 pictures that participants must look at in an MRI experiment
|
93a7ff625fb0d4c814c8d3c2b1a99dfa94a658e3
|
b8712324e3195a76d138f8063ad3638ff7dc2e8a
|
/twitter_bot.R
|
b631b95315c151425129b34b7aa7266ccf4f7e22
|
[] |
no_license
|
albnd/lgbtrights_bot
|
c4d9678b119b86dfd374e3360c09c20ed2540efd
|
579fe8c20143b69f81ceb1b810f2f55b644ab849
|
refs/heads/master
| 2020-03-19T06:53:03.567250
| 2018-06-04T08:03:44
| 2018-06-04T08:03:44
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,094
|
r
|
twitter_bot.R
|
#Assuming that the table was nicely formatted, which it isn't
library(rtweet)
library(here)
library(jsonlite)
library(rvest)
setwd(here())
wait_in_r <- T
wait_duration <- 211*60 #Number of seconds to wait if wait_in_r == true
flag <- F
continents_vec <- c('https://en.wikipedia.org/wiki/Template:LGBT_rights_table_Africa',
'https://en.wikipedia.org/wiki/Template:LGBT_rights_table_Americas',
'https://en.wikipedia.org/wiki/Template:LGBT_rights_table_Asia',
'https://en.wikipedia.org/wiki/Template:LGBT_rights_table_Europe',
'https://en.wikipedia.org/wiki/Template:LGBT_rights_table_Oceania')
#Flags files and json sourced from https://github.com/hjnilsson/country-flags
codes <- fromJSON('countrycodes.json', simplifyVector = FALSE)
countries <- as.list(names(unlist(codes)))
names(countries) <- unlist(codes)
##twitter token should be generated with the instructions here(http://rtweet.info/articles/auth.html), but I found it easier to just load the token rather than making it an environment variable
twitter_token <- readRDS('twitter_token.RDS')
A <- FALSE
while(A==FALSE){
table_list <- html_table(read_html(continents_vec[sample(length(continents_vec),1)]), fill=TRUE)
table_list <- table_list[which(unlist(lapply(table_list, nrow)>1))]
df <- table_list[[sample(length(table_list),1)]]
#Chose the values to tweet
column_chosen <- sample(seq(2,ncol(df)), 1)
row_chosen <- sample(nrow(df), 1)
string <- df[row_chosen, column_chosen]
string <- gsub('\\[.+\\]', '', string) #Get rid of any citations
string <- gsub('\\\n', ' ', string) #get rid of any newlines
string <- gsub('\\/', '', string)
string <- tolower(string)
string <- gsub(' un ', 'UN', string)
if(is.na(string)){
next()#skip any truly empty fields
}
if(nchar(string)<2){
next()#Skip any fields which are blank
}
rights_name <- gsub('\\.', ' ', colnames(df)[column_chosen])
country <- df[row_chosen, 1]
country <- gsub(' \\(.+$', '', country)
country <- gsub('\\(.+$', '', country)
country <- gsub('\\\n', ' ', country)
outstring <- paste(country, '. ', rights_name, ': ', string, ' #LGBTQ #equality', sep = '')
if(nchar(outstring)<=240){
tweetable <- T
}else{
tweetable <- F
}
if(!is.null(countries[[country]])){ #If we find the country name in the list of countries
flag <- T
flagname <- paste('png1000px/',countries[[country]] ,'.png', sep='')
}
#Now to send the tweet
if(flag==T){
post_tweet(status = outstring, token = twitter_token,
in_reply_to_status_id = NULL, media = flagname)
}else{
post_tweet(status = outstring, token = twitter_token,
in_reply_to_status_id = NULL)
}
flag <- F #Reset the flag variable in case we can't find it in the next iteration
print(outstring)
#End, or wait for next iteration
print(Sys.time())
if(wait_in_r==TRUE){
Sys.sleep(wait_duration) #The number of seconds to sleep for
}else{
A <- TRUE
}
}
|
ca2fb6be43c9285c87f799b8afc1cd2054d20c7e
|
bb8f905a8fd6c482f5e5201eda2148de4ebebcea
|
/src/R/load_exac.R
|
5cbf376d25d004ad0e4c1349888ac56fa5c24285
|
[] |
no_license
|
birndle/ExAC_analysis
|
6063d85524838119dfa23dd69618e23363919af0
|
ad4840de84792d9d02630c356e210fa0728f9f1f
|
refs/heads/master
| 2021-01-25T05:34:17.918872
| 2015-04-07T05:34:50
| 2015-04-07T05:34:50
| 32,942,036
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 460
|
r
|
load_exac.R
|
exac_all <- get(load('R_data/exac_pass.RData'))
exac_lof <- subset(exac, lof == 'HC' & is.na(lof_flags) & use)
pops <- c('afr', 'fin', 'nfe', 'amr', 'sas', 'eas')
pop_sizes <- apply(exac_lof[,paste("an",pops,sep="_")], 2, max)/2 # in people number
world_pop_sizes <- data.frame(
row.names=c("eas", "sas", "nfe", "Middle Eastern", "afr", "amr", "Oceanic", "DiverseOther", "AfricanEuropeanAdmixed"),
world=c(1932, 2085, 1145, 410, 1022, 529, 38, NA, NA)
)
|
1102b2c9c948ec17a9bf4a3a17b24721f94ed460
|
665f3842c37fc5730c9cefee95ff480cabf0ae36
|
/man/estimateTau.Rd
|
5d66219b9ae8ce98e6a47d0e696f4a072bba4848
|
[] |
no_license
|
BarkleyBG/multilevelMatching
|
c0bc128c81d1513c52dda7eafa3b6bc7b6ee1aa6
|
58e7a0b2a612671d4d0214f0d545ca90bd599d70
|
refs/heads/develop
| 2021-01-23T04:13:49.838974
| 2018-03-03T16:54:11
| 2018-03-03T16:54:11
| 86,175,312
| 0
| 0
| null | 2018-01-26T17:04:46
| 2017-03-25T17:25:14
|
R
|
UTF-8
|
R
| false
| true
| 1,540
|
rd
|
estimateTau.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/estimateTau.R
\name{estimateTau}
\alias{estimateTau}
\title{Calculate the estimates of population-level estimands (e.g., tau).}
\usage{
estimateTau(trt_levels, mean_Yiw, num_trts, num_contrasts, N, M_matches, Yiw,
Kiw, sigsqiw, W, ...)
}
\arguments{
\item{trt_levels}{vector of the unique levels of treatment W}
\item{mean_Yiw}{vector of the estimated mean w.r.t. each treatment w}
\item{num_trts}{a scalar, the number of treatment levels}
\item{num_contrasts}{a scalar, the number of tau contrasts to estimate}
\item{N}{A scalar for the number of rows in the data}
\item{M_matches}{Number of matches per unit for imputing potential outcomes,
as in Abadie and Imbens 2006. Currently can only support M=1.}
\item{Yiw}{Matrix of all imputed potential outcomes}
\item{Kiw}{Vector of times each unit is matched to}
\item{sigsqiw}{Estimated sigma squared, from AI2006}
\item{W}{a treatment vector (1 x n) with numerical values indicating
treatment groups}
\item{...}{the dots argument}
}
\value{
A list, including the tidy dataframes estimates of target estimands
}
\description{
This is a major plumbing function for the package. All matching procedures
are carried out in \code{\link{matchImputePO}} (for point estimates) and
\code{\link{estSigSq}} (for variance), which are subfunctions of
\code{\link{matchAllTreatments}}. Most of the necessary arguments to this
function are output from these two subfunctions.
}
\seealso{
\code{\link{multiMatch}}
}
|
d9638f8742487445ae97411f54c7ee86b57dc371
|
0ead36631dc316f9f7577dd59b4fa7cb610829f3
|
/man/ET.default.Rd
|
eebbdd57d7d196dbf3496b8a1f73c4eff81aab41
|
[] |
no_license
|
cran/Evapotranspiration
|
81d2a94ac383d12fdbc9ccf1960caba62f23ee01
|
8e4a3936d6747728d4812f9415cdf01d47167329
|
refs/heads/master
| 2022-01-24T11:17:42.959617
| 2022-01-10T04:22:41
| 2022-01-10T04:22:41
| 17,679,056
| 8
| 12
| null | 2017-06-07T03:24:09
| 2014-03-12T18:50:23
|
R
|
UTF-8
|
R
| false
| false
| 570
|
rd
|
ET.default.Rd
|
\name{ET.default}
\alias{ET.default}
\title{ET Formulations(Internal)
}
\description{
And internal function to select the ET formulation to use when \code{\link{ET}} is called, without user-specified ET model.
See \code{\link{ET}} for details.
}
\author{
Danlu Guo
}
\seealso{
\code{\link{ET}}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{evapotranspiration}
\keyword{open-water evaporation}
\keyword{potential evapotranspiration}
\keyword{internal}
% __ONLY ONE__ keyword per line
|
d19308569f445079aeee7d8018b0f82df4156b5a
|
dbc6954f25fb1fa4f584eb3e6c7f6b90d1766fbb
|
/MVE_BASED/models.r
|
50a31ed583099f0352fff79182c26e8afb113fca
|
[] |
no_license
|
RannieWan/diversity_in_e
|
05da7390903972e3574861d61fa3344f3c668611
|
72649009420c0ebc068ecd81c2f36a4dc6f3b6a6
|
refs/heads/master
| 2023-03-10T17:07:20.147042
| 2021-03-01T08:54:23
| 2021-03-01T08:54:23
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,460
|
r
|
models.r
|
library(raster)
library(rgdal)
library(rgeos)
library(MASS)
library(cluster)
library(dplyr)
source("addEllipse.R")
source("genCircle.R")
NDquntil <- function(nD, level) {
n <- floor(nD * level)
if (n > nD)
n <- nD
return(n)
}
in_Ellipsoid <- stats::qchisq(0.95, 2)
args = commandArgs(trailingOnly=TRUE)
group<-args[1]
setwd("/media/huijieqiao/Speciation_Extin/Sp_Richness_GCM/Script/diversity_in_e")
if (is.na(group)){
group<-"Amphibians"
}
GCMs<-c("EC-Earth3-Veg", "MRI-ESM2-0", "UKESM1")
SSPs<-c("SSP119", "SSP245", "SSP585")
VARs<-c("pr", "tasmax", "tasmin")
start_range<-c(2000:2014)
start_layer_df<-expand.grid(GCM=GCMs, SSP=SSPs[1], VAR=VARs, Y=start_range)
start_layer_df$VAR2<-"sum"
start_layer_df[which(start_layer_df$VAR=="tasmax"), "VAR2"]<-"max"
start_layer_df[which(start_layer_df$VAR=="tasmin"), "VAR2"]<-"min"
start_layer_df$names<-sprintf("%s_%s_%d", start_layer_df$GCM, start_layer_df$VAR,
start_layer_df$Y)
var_tamplate<-"../../Raster/ENV/Annually/%s_%s_%s_%d_%s_eck4.tif"
start_layer_files<-sprintf(var_tamplate, start_layer_df$GCM, start_layer_df$SSP, start_layer_df$VAR,
start_layer_df$Y, start_layer_df$VAR2)
start_layers<-stack(start_layer_files)
names(start_layers)<-start_layer_df$names
df_list<-readRDS(sprintf("../../Objects/IUCN_List/%s.rda", group))
i=1
var_pair<-start_layer_df[which(start_layer_df$VAR=="pr"),]
var_pair1<-left_join(var_pair, start_layer_df[which(start_layer_df$VAR=="tasmax"),], by=c("GCM", "SSP", "Y"))
var_pair2<-left_join(var_pair, start_layer_df[which(start_layer_df$VAR=="tasmin"),], by=c("GCM", "SSP", "Y"))
var_pair1<-var_pair1%>%dplyr::select(GCM, VAR.x, Y, VAR.y)
colnames(var_pair1)<-c("GCM", "PR", "Y", "TEMP")
var_pair2<-var_pair2%>%dplyr::select(GCM, VAR.x, Y, VAR.y)
colnames(var_pair2)<-c("GCM", "PR", "Y", "TEMP")
var_pair<-bind_rows(var_pair1, var_pair2)
var_pair$PR_NAME<-paste(gsub("-", ".", var_pair$GCM), var_pair$PR, var_pair$Y, sep="_")
var_pair$TEMP_NAME<-paste(gsub("-", ".", var_pair$GCM), var_pair$TEMP, var_pair$Y, sep="_")
for (i in c(1:nrow(df_list))){
item<-df_list[i,]
item$sp<-gsub(" ", "_", item$sp)
if (item$area<=0){
next()
}
target_folder<-sprintf("../../Objects/Niche_Models/%s/%s", group, item$sp)
if (dir.exists(target_folder)){
next()
}
dir.create(target_folder, showWarnings = F)
print(paste(i, nrow(df_list), item$sp))
occ<-readRDS(sprintf("../../Objects/IUCN_Distribution/%s/%s.rda", group, item$sp))
v<-extract(start_layers, occ[, c("x", "y")])
v<-data.frame(v)
col<-colnames(v)
j=1
all_v<-NULL
for (j in c(1:nrow(var_pair))){
var_item<-var_pair[j,]
v_item<-data.frame(PR=v[, var_item$PR_NAME], TEMP=v[, var_item$TEMP_NAME], X=occ$x, Y=occ$y)
if (is.null(all_v)){
all_v<-v_item
}else{
all_v<-bind_rows(all_v, v_item)
}
}
fit <- cov.rob(all_v[, c("PR", "TEMP")], quantile.used=NDquntil(nrow(all_v), 0.95), method = "mve")
saveRDS(fit, sprintf("%s/fit.rda", target_folder))
all_v$dist <- stats::mahalanobis(all_v[, c("PR", "TEMP")], center = fit$center,
cov = fit$cov)
all_v$in_out<-0
all_v[which(all_v$dist<in_Ellipsoid), "in_out"]<-1
if (F){
colors<-c("red", "blue")
plot(all_v$PR, all_v$TEMP, col=colors[all_v$in_out+1])
addEllipse(fit$center, fit$cov, col="red", p.interval=0.95)
}
saveRDS(all_v, sprintf("%s/occ_with_env.rda", target_folder))
}
|
b965feb1b6bd6c14b6f782522ada75516da98268
|
ba77311895a3bfc6e4006e9600041d3d2185c4bd
|
/r-workshop.R
|
abc1286699117ce06ad0733e4e62bd80f2f4b284
|
[] |
no_license
|
milesc06/r-workshop
|
80421e3f205c6e9f11235a5fc692d971187b2c4f
|
98b01c7455fb6e086563629883e0c117c7d83a9c
|
refs/heads/master
| 2020-12-21T21:41:39.005296
| 2020-01-28T19:03:10
| 2020-01-28T19:03:10
| 236,571,232
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 123
|
r
|
r-workshop.R
|
#new file
install.packages("usethis")
library(usethis)
use_git_config(user.name="milesc06",user.email="miles@rstudio.com")
|
edf5bf9e5a91892b69a2eb45c24721fee651802d
|
4efbe1d55ea0f650168d1323d1813ea0eaa2ca8b
|
/misc/Stepwise_OFPGAM.R
|
aa684e083b0d70cceb10f79a3d40ac010c9c0201
|
[] |
no_license
|
panders225/semiparametric-regression
|
d8e7470576f7d3fd988ba7e677eaceed3216caea
|
1fd5a22f4283daf856aad61af2f4abccfa4fc324
|
refs/heads/master
| 2021-05-11T08:32:43.127850
| 2018-04-24T00:55:53
| 2018-04-24T00:55:53
| 118,055,099
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,724
|
r
|
Stepwise_OFPGAM.R
|
########## R script: Stepwise_OFPGAM.R ##########
# Stepwise fit for the Poisson generalized additive model
# fit to the physician office visits data on both the
# link scale and the response scale.
#
# I do only quasilikelihood in this analysis
#
# Because the stepwise regression uses gam and not mgcv, you do not want
# to get into the business of mixing them up. If you want to compare the
# GLM fit to the mgcv GAM fit, do not do it here. This code is just
# for help with model selection.
################################################################################
# Clear the workspace
################################################################################
rm(list = ls())
################################################################################
# Set the seed
################################################################################
set.seed(4428967)
################################################################################
# Set the working directory
################################################################################
setwd("C:\\Users\\Carroll\\Documents\\My_Documents\\2018_SemiPar\\Data_Sets\\OFP_Poisson")
################################################################################
# Get the necessary files
################################################################################
library(HRW)
library(gam)
#library(mgcv)
library(Ecdat)
###########################################################
# If you have played with mgcv as part of a session,
# you need to detach it
###########################################################
detach("package:mgcv", unload=TRUE)
################################################################################
# Get the Data
################################################################################
data(OFP)
OFPforAna <- OFP
OFPforAna$age <- 10*OFPforAna$age
OFPforAna <- OFPforAna[OFPforAna$age <= 95,]
################################################################################
# Obtain an ordinary GLM fit to the variables
################################################################################
fitGLMOFP_Poisson = gam(ofp ~ age + school + adldiff + black
+ sex + maried + privins + medicaid + region + hlth,
family = poisson, scale = -1, data = OFPforAna)
summary(fitGLMOFP_Poisson)
################################################################################
# Now try to do a stepwise GAM
################################################################################
################################################################################
# Now code and run the stepwise regression
# The statement s(x,5) means the effective degrees of freedom = 5
################################################################################
stepFit = step.gam(fitGLMOFP_Poisson, scope =
list("age" = ~1 + age + s(age,5),
"school" = ~1 + school + s(school,5),
"adldiff" = ~1 + adldiff,
"black" = ~1 + black,
"sex" = ~1 + sex,
"maried" = ~1 + maried,
"privins" = ~1 + privins,
"medicaid" = ~1 + medicaid,
"region" = ~1 + region,
"hlth" = ~1 + hlth))
################################################################################
# What is the final model?
################################################################################
print(names(stepFit$"model")[-1])
|
bbd64c9a8cee355fb349367a5a908400a1e6623c
|
49bc4a40b43070e91367f520db0215d40faf5eb0
|
/createNodes.r
|
8cd4620237582606395cb635d5c711633a817dce
|
[
"MIT"
] |
permissive
|
bevvvvv/DS220Proj2WineReview
|
d8846324036a1e914bf933aac93c815f4cd0b4c4
|
bfc7a4c5195cb3c17d0eecaa051598c502722d9d
|
refs/heads/master
| 2020-05-07T11:24:33.381625
| 2019-04-23T19:54:24
| 2019-04-23T19:54:24
| 180,459,872
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 989
|
r
|
createNodes.r
|
library(data.table)
library(mdsr)
wineInfo <- fread(file = "D:\\GitRepos\\DS220Proj2WineReview\\winemag-data-130k-v2.csv")
wineInfo <- wineInfo[1:10000,]
wineInfo[taster_name %in% c(""),10] <- "Unknown"
wines <- wineInfo %>%
select(variety, price, winery, taster_name, title)
wines <- wines[!duplicated(wines[,c('variety', 'price')])]
wineries <- wineInfo %>%
select(variety, winery, region_1, region_2, province, designation, country)
wineries <- wineries[!duplicated(wineries$winery)]
reviewers <- wineInfo %>%
select(variety, winery, taster_name, taster_twitter_handle, title)
reviewers <- reviewers[!duplicated(reviewers$taster_name),]
write.csv(wines, file = "D:\\GitRepos\\DS220Proj2WineReview\\winesImport.csv")
write.csv(wineries, file = "D:\\GitRepos\\DS220Proj2WineReview\\wineriesImport.csv")
write.csv(reviewers, file = "D:\\GitRepos\\DS220Proj2WineReview\\reviewersImport.csv")
write.csv(wineInfo, file = "D:\\GitRepos\\DS220Proj2WineReview\\reviewsImport.csv")
|
c0c685f2ebf87ad6192b500ffac974d00fc63814
|
63f8a1d7f06526022d65534ff044376f0a15cb9a
|
/man/preproc.Rd
|
0c41e9abf11642307da31f0599344c955bd6b7f1
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
mgondan/rolog
|
b28722e9913540c51cb53ece0bb125a53bc83f41
|
96ccd6a5e815cb8304566a61f9de9758b6efe176
|
refs/heads/main
| 2023-08-11T16:51:53.486382
| 2023-07-21T07:34:36
| 2023-07-21T07:34:36
| 372,880,585
| 6
| 2
| null | 2023-07-21T07:33:00
| 2021-06-01T15:37:38
|
Prolog
|
UTF-8
|
R
| false
| true
| 578
|
rd
|
preproc.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/preproc.R
\name{preproc}
\alias{preproc}
\title{Default hook for preprocessing}
\usage{
preproc(query = quote(1 <= sin))
}
\arguments{
\item{query}{the R call representing the Prolog query.}
}
\value{
The default hook translates the inequality and smaller-than-or-equal-to from
R (!=, <=) to Prolog (\=, =<). Moreover, primitive functions are converted to
regular functions.
}
\description{
Default hook for preprocessing
}
\seealso{
[rolog_options()] for fine-grained control over the translation
}
|
0fbf07d9d622d32dbd627f7e5746efad48ab75bb
|
f42d1165f3ebcff06bc2d555d42a2d4770db687c
|
/R/get_png_logos.R
|
419782ccba5c2a7d7d6730edfbc637d0ff7a9ea2
|
[] |
no_license
|
IvoVillanueva/mlbstatsR
|
11fc14d7d6a070d46dcde76fc451e204333afe66
|
2f20733d75be7cc11d88c452e4eea7d47788d750
|
refs/heads/main
| 2023-08-04T01:33:34.286957
| 2021-09-09T17:57:27
| 2021-09-09T17:57:27
| 361,255,437
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,642
|
r
|
get_png_logos.R
|
#' @title 4 png Logos & colors for MLB (baseball)
#'
#' @description MLB Profesional league baseball data table , logos and colors
#'
#' @return Logos and colors
#' @export
#' @importFrom jsonlite fromJSON
#' @import dplyr purrr
#' @importFrom utils sessionInfo
#' @importFrom janitor clean_names
#' @examples
#'
#'
#' \donttest{get_png_logos()}
#'
get_png_logos <- function() {
message("4 MLB ESPN logos in png!")
team_url <- "https://site.api.espn.com/apis/site/v2/sports/baseball/mlb/teams?&limit=50"
raw_teams <- jsonlite::read_json(team_url)
raw_teams$sports[[1]]$leagues[[1]]$teams %>%
tibble::enframe() %>%
dplyr::select(-"name") %>%
tidyr::unnest_wider("value") %>%
tidyr::unnest_wider("team") %>%
dplyr::select(-"record", -"links") %>%
dplyr::as_tibble() %>%
dplyr::mutate("logoDefault" = purrr::map_chr(.data$logos, function(df) df[[1]][[1]]),
"logoDark" = purrr::map_chr(.data$logos, function(df) df[[2]][[1]]),
"logoScoreboard" = purrr::map_chr(.data$logos, function(df) df[[3]][[1]]),
"logoDarkScoreboard" = purrr::map_chr(.data$logos, function(df) df[[4]][[1]])) %>%
dplyr::select("id", "name":"alternateColor",-"shortDisplayName", "logoDefault":"logoDarkScoreboard") %>%
purrr::set_names(
nm = c(
"uid", "team_name", "team_nickname", "full_name", "team_color",
"alternate_color", "logologodefault", "logodark", "logoscoreboard", "logodarkscoreboard"
)) %>%
dplyr::mutate(
"team_color" = paste0("#", .data$team_color),
"alternate_color" = paste0("#", .data$alternate_color)
)
}
|
b3b91b92bc36bcfc26dab47600c48298781acc9a
|
6f6a9faa7cea6501b170a3b584b7d55abff6546f
|
/stan_examples/gp_stan.R
|
14bf38447967a3d593abb8e0e1b7f345b506261d
|
[] |
no_license
|
jiunsiew/bayesian_machine_learning_r
|
6e7e9c869122c163da227105e84b66921bf1e6d9
|
54089bb063396b690c4b6be46383596adcda5fae
|
refs/heads/master
| 2021-01-15T03:40:39.423569
| 2020-03-16T04:05:34
| 2020-03-16T04:05:34
| 242,866,515
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,447
|
r
|
gp_stan.R
|
library(data.table)
library(magrittr)
library(rstan)
library(stringr)
rm(list = ls())
rstan_options(auto_write = TRUE)
options(mc.cores = parallel::detectCores()-1)
## run the stan file
nSamples <- 25
nNew <- 1
x.star <- as.matrix(seq(-5, 5, len=nSamples))
stanData <- list(N=nNew,
nr_1=nSamples,
nc_1=1,
x1=x.star,
nr_2=nSamples,
nc_2=1,
x2=x.star,
theta_1=1,
theta_2=1,
sigma_n=0.5)
## sample --> just generating some random samples
simu_fit <- stan(file='gp_stan.stan',
data=stanData, iter=1,
chains=1, seed=494838, algorithm="Fixed_param")
f_total <- matrix(extract(simu_fit)$f, nrow = nNew)
y_total <- matrix(extract(simu_fit)$y, nrow = nNew)
priorSamples <- data.table(t(f_total)) %>%
.[, x := x.star] %>%
cbind(data.table(t(y_total))) %>%
setnames(., c(paste0("f_", 1:nNew), "x", paste0("y_", 1:nNew))) %>%
melt.data.table(., id.vars = c("x")) %>%
.[, type := str_sub(variable, 1,1)] %>%
.[, sample_idx := str_sub(variable, 3,-1)] %>%
.[, variable := NULL] %>%
dcast(x+sample_idx~type, value.var = "value")
ggplot(priorSamples, aes(x=x, y=f)) +
geom_line(aes(group=sample_idx, colour = sample_idx), lty = 2) +
geom_point(aes(x = x, y = y, colour = sample_idx), alpha = 0.5) +
theme_bw() +
xlab("input, x")
|
2541395c2b573bda021b7af766155803cb65af08
|
884903fd3978d5a35e531d4738154fe225a32657
|
/Euphrates/GWAS experiment/Composite functional mapping.R
|
2c5ab091a98137fe66c68112ad609e0c5cc99a51
|
[] |
no_license
|
CCBBeijing/PPMultilayerNetwork
|
ec1b9f7b430f0e7299ddc1336a54779c6e948332
|
794fc4a0bc0529ddf551d5c1ae7ec1388f21ff13
|
refs/heads/main
| 2023-04-07T12:46:08.534260
| 2022-05-31T00:24:05
| 2022-05-31T00:24:05
| 359,453,869
| 1
| 0
| null | 2021-06-13T23:39:49
| 2021-04-19T12:37:19
| null |
UTF-8
|
R
| false
| false
| 3,642
|
r
|
Composite functional mapping.R
|
library(mvtnorm)
get_miu3 =function(B,t){B[1]/(1+exp((4*B[2]*(B[3]-t)/B[1])+2))}
SAD1_get_matrix = function(par, times = t, options=list()) {
n <- ifelse (is.vector(times), length(times), NCOL(times) )
phi<- par[1]
v2 <- par[2]
tmp <- (1-phi^2)
sigma <- array(1, dim=c(n,n))
for(i in 1:n)
{
sigma[i,i:n] <- phi^( c(i:n) - i ) * (1-phi^(2*i))/tmp
sigma[i:n,i] <- sigma[i,i:n]
}
sigma <- sigma * abs(v2)
return(sigma);
}
get_u = function(A,t){get_miu3(A[1:3],t)-get_miu3(A[4:6],t)}
get_sig= function(par,times = t, options=list()){SAD1_get_matrix(par = c(par[1],par[2]),times = t) + SAD1_get_matrix(par = c(par[3],par[4]),times = t)
}
get_initial_par <- function(pheno,t){
mean0 <- apply(pheno[,-1],2,mean)
c(max(mean0),
max((mean0[-1]-mean0[-length(mean0)])/(t[-1]-t[-length(t)])),
t[which.max(((mean0[-1]-mean0[-length(mean0)])/(t[-1]-t[-length(t)])))]-mean0[which.max(((mean0[-1]-mean0[-length(mean0)])/(t[-1]-t[-length(t)])))]/max((mean0[-1]-mean0[-length(mean0)])/(t[-1]-t[-length(t)])))
}
H0 = function(yt,t,par){
miu=get_u(par[1:6],t)
sigma=get_sig(par = par[7:10],times=t)
L0 = c()
L0 = sum(dmvnorm(yt,miu,sigma,log = TRUE))
return(-L0)
}
H1 = function(yt,t,m0,m1,m2,par){
p1.0 <- yt[c(m0),]
p1.1 <- yt[c(m1),]
p1.2 <- yt[c(m2),]
miu0 = get_u(par[1:6],t)
miu1 = get_u(par[7:12],t)
miu2 = get_u(par[13:18],t)
sigma = get_sig(par = par[19:22],times=t)
L1.0 = sum(dmvnorm(p1.0,miu0,sigma,log = TRUE))
L1.1=sum(dmvnorm(p1.1,miu1,sigma,log = TRUE))
L1.2=sum(dmvnorm(p1.2,miu2,sigma,log = TRUE))
L1= L1.1+L1.0+L1.2
return(-L1)
}
par = c(2.55,9.276692e-03,-1.044284e+02,2.35,8.580885e-03,-1.045999e+02,1.112310e+00,1.354376e-05,9.777184e-01,9.046421e-02)
yt = (dat$diam_ck[,-1]-dat$diam_salt[,-1])
t = dat$t
parl0 <- optim(par = c(2.55,9.276692e-03,-1.044284e+02,2.35,8.580885e-03,-1.045999e+02,1.112310e+00,1.354376e-05,9.777184e-01,9.046421e-02),H0,yt = (dat$diam_ck[,-1]-dat$diam_salt[,-1]),t = dat$t)
parl0 <- optim(par = parl0$par,H0,yt = (dat$diam_ck[,-1]-dat$diam_salt[,-1]),t = dat$t)
optim_diff <- function(pheno_ck0,pheno_salt0,pheno_ck1,pheno_salt1,pheno_diff,t,m0,m1,m2){
itime <- 100
itimes <- 1
par0 <-as.numeric(c(parl0$par[1:6],parl0$par[1:6],parl0$par))
repeat{
a <- optim(par = par0 ,H1,yt = pheno_diff, t = t,m0=m0,m1=m1,m2=m2)
b <- optim(a$par,H1,yt = pheno_diff, t = t,m0=m0,m1=m1,m2=m2)
# cat("Logistic_diff",itimes,b$value,'\n')
itimes <- itimes + 1
if(all( abs(a$value-b$value) < 1 )||itimes == itime){
break
}else{
par0 <- b$par
}
}
b
}
get_lr <- function(SNP,control,stress){
vnp=SNP
m0 <- which( vnp == 0)
m1 <- which( vnp == 1)
m2 <- which( vnp == 2)
pheno_ck <- control[,-1]
pheno_salt <- stress[,-1]
pheno_ck0 <- pheno_ck[m0,]
pheno_ck1 <- pheno_ck[m1,]
pheno_salt0 <- pheno_salt[m0,]
pheno_salt1 <- pheno_salt[m0,]
parl1 <- optim_diff(pheno_ck0,pheno_salt0,pheno_ck1,pheno_salt1,(control[,-1]-stress[,-1]),t,m0,m1,m2)
LR_plastic3 <- c(-2*(-parl0$value+parl1$value),parl1$par)
}
LR_plastic <- apply(dat$hysnp,1,get_lr,control=dat$diam_ck,stress=dat$diam_salt)
lr <- c()
for (i in 1:1000) {
mm <- sample(1:73,73)
dat$diam_ck <- dat$diam_ck[,mm]
dat$diam_salt <- dat$diam_salt[,mm]
LR_plasticper <- apply(dat$hysnp,1,get_lr,control=dat$diam_ck,stress=dat$diam_salt)
lr[i] <- max(LR_plasticsa[1,])
}
Threshold_value <- sort(lr,decreasing = T)[50]
|
fc28ad000b2ee717edb9f92a16a0086ef5e65bc2
|
a5bbcb2b8c60e803c0bc6c5f3b6acd6f76f608cd
|
/man/popover.Rd
|
c0fe0e610178f04f9e41efd6bc88f5f5f20553ca
|
[] |
no_license
|
DataXujing/shinyBS
|
fdfaf0784b40c3693e43ade945bec22efa411bd1
|
6bfa2a44b6d05cebd251d7470b039878510fce3d
|
refs/heads/master
| 2021-07-05T21:12:42.048441
| 2015-01-23T15:12:03
| 2015-01-23T15:12:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,039
|
rd
|
popover.Rd
|
\name{addPopover}
\alias{addPopover}
\alias{bsPopover}
\alias{removePopover}
\title{
Twitter Bootstrap Popovers
}
\description{
Functions to add Twitter Bootstrap popovers in shiny.
}
\usage{
addPopover(session, id, title = "", content,
placement = "right", trigger = "click")
bsPopover(id, title = "", content,
placement = "right", trigger = "click")
removePopover(session, id)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{session}{
The \code{session} object passed to function given to \code{shinyServer}
}
\item{id}{
The \code{inputId} of the object to add the popover to
}
\item{title}{
The title of the popover
}
\item{content}{
The main content of the popover
}
\item{placement}{
Where the popover should appear in relation to the object. Acceptable values are \code{bottom}, \code{top}, \code{left}, and \code{right}.
}
\item{trigger}{
What action will make the popover appear. Common values are \code{hover} or \code{click}.
}
}
\details{
\code{bsPopover} is called from \code{ui.R} and will add a \code{<script>} element to the page that adds a popover on page load. \cr\cr
\code{addPopover} is called from \code{server.R} and uses shiny's \code{customMessageHandler} to add a popover after page load.
}
\references{
\href{http://getbootstrap.com/2.3.2/javascript.html}{Popovers for Twitter Bootstrap 2.3.2}
}
\note{
Run \code{bsDemo()} for a live example of popovers.
}
\author{
Eric Bailey
}
\examples{
\dontrun{
## From ui.R: Adds a popover to element with inputId = "someInput"
## with title, "Popover", and text, "This is an input.", that appears to the left on click.
bsPopover(id = "someInput", title="Popover", content = "This is an input",
placement = "left", trigger = "click")
## From server.R: Add the same popover as above
addPopover(session, id="someInput", title="popover", content = "This is an input.",
placement = "left", trigger = "click")
}
}
|
3722ea46fc7d8224052c747c89225333c4abdae6
|
dd521637dcf91fe0591cd7aa5bccf7e0b7eb15c5
|
/191015 Lecture 5/Class05.R
|
ec647e6077461a695715a397e3455635ebfa8476
|
[] |
no_license
|
Mark-Jacob/BIMM143
|
60bcd19509526f9bd7d024801ef0556de51fc2c8
|
2dba92d53460034d9ad641487fd190ac667c4c87
|
refs/heads/master
| 2020-09-01T00:29:32.004799
| 2019-12-07T06:43:15
| 2019-12-07T06:43:15
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,875
|
r
|
Class05.R
|
#' ---
#' title: "Class 05: Data exploration and visualization in R"
#' author: "Mark Jacob"
#' date: "October 31st, 2019"
#' output: github_document
#' ---
#Class5 Data Visualization
x <- rnorm(1000)
#Looking for help
#EZ commands to use in the future
mean(x)
sd(x)
#Numbers will differ among people due to different parameters for each system as rnorm indicates
#random numbers for each person
summary(x)
#Making the actual boxplot
boxplot(x)
#o shiiiiiiii here comes a histogram
hist(x)
#If we want to keep these graphs, we can export them to word or whateva
#rug(x) indicates the actual data points under the graph in order to represent where the data is lying
rug(x)
#Data visualization A practical introduction Kieran Healy - prolly should read
#Most important point: Make it understandable and visualizaed easily
#welp that didn't work
baby <- read.table("bimm143_05_rstats/weight_chart.txt",
header=TRUE)
#if you have headers, make sure to set it to true otherwise it will default
read.table("bimm143_05_rstats/weight_chart.txt", header=FALSE)
#Issa time to make plots boiyo
plot(baby$Age, baby$Weight, type="o",
lwd="1",
pch="2",
cex=5,
ylim=c(2,10),
xlab="Age",
ylab="Weight(kg)",
main="Age v. Weight",
col="indianred2")
#Type l is a line graph, Type p is is just a dot graph, Type b is a dash and dot graph, and o is linegraph with scatterplot
#lwd parameter is a modifer for line / dot thickness in the graph. The larger the value, the more dark and thick it gets.
#pch gives you an option of changing the characters of the data points into numbers.It only takes the first character though if you give it an alphabet
#pch will give you a designated list of characters to use like squares or triangles if you know the character code
#you can also use emojis for bullet points
#ylim/xlim can change the axis limits manually
#xlab/ylab can change the x axis label
feat <- read.table("bimm143_05_rstats/feature_counts.txt", header=TRUE,
sep="\t")
#the sep= parameter indicates what kind of separation is occuring in read.table
mouse <- read.delim("bimm143_05_rstats/feature_counts.txt", header=TRUE)
#read.delim is a special text reader for tab spaced files, woo
#par() controls area
#mar=() controls margins
par(mar=c(5,15,5,5))
barplot(mouse$Count,
horiz=TRUE,
names.arg =mouse$Feature,las=1)
#You must run par with the barplot for par to take place
#SECTION 3 JEEZUS CHRIST
par(mar=c(4,5,5,5))
obama <- read.delim("bimm143_05_rstats/male_female_counts.txt")
#Rainbow obama plot
barplot(obama$Count,
names.arg =obama$Sample,
las=2,
ylab="Counts",
col=rainbow(nrow(obama)))
#red and blue obama plot
barplot(obama$Count,
names.arg =obama$Sample,
las=2,
ylab="Counts",
col=c("red","blue"),
xlab="Samples")
|
ffdba39901e9cf3868b6920965f8a8d326958db4
|
143f25eb68b88b975eb576ea95c731e932b55a38
|
/man/getScriptWords.Rd
|
acbc283545d0964763cb6bee86d2d505c188d660
|
[] |
no_license
|
lockedata/TextAnalysis
|
87fe2dd3b752cb9547ffd2606fb24f27bd8c43d3
|
607203fb3f80abe470cc904454eb8ee666659f2f
|
refs/heads/master
| 2021-01-23T08:00:11.059081
| 2019-11-21T18:24:51
| 2019-11-21T18:24:51
| 86,470,408
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| true
| 521
|
rd
|
getScriptWords.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/getScriptWords.R
\name{getScriptWords}
\alias{getScriptWords}
\title{Get words used in Monty Python movie lines, with stop words removed}
\usage{
getScriptWords(offline = FALSE)
}
\arguments{
\item{offline}{Use an offline copy instead of fetching data}
}
\value{
data.frame of words from Monty Python scripts
}
\description{
Get words used in Monty Python movie lines, with stop words removed
}
\examples{
head(getScriptWords(offline=TRUE))
}
|
98e5323b9bf1a9411c5ea64a32262035098fb830
|
38434b7cc32750b7c6d6d133e5657bb3757a7230
|
/auxiliary.R
|
57078e2ce150effb8caaf52e98e9d8b3281a981f
|
[] |
no_license
|
tanujitdey/causal-me
|
896fb7e2e1b0043a010de645c66dec352515c7c3
|
f64b243c7230e2474acd71a26c25d27b33ac5e53
|
refs/heads/master
| 2023-08-14T09:59:57.038265
| 2021-10-01T02:11:35
| 2021-10-01T02:11:35
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 814
|
r
|
auxiliary.R
|
# regression calibration function
pred <- function(s, star, w, sl.lib = c("SL.mean", "SL.glm", "SL.glm.interaction", "SL.ranger", "SL.earth")){
# set up evaluation points & matrices for predictions
ws <- data.frame(w, star)
ws.tmp <- data.frame(ws[!is.na(s),])
s.tmp <- s[!is.na(s)]
colnames(ws.tmp) <- colnames(ws) <- c(colnames(w), "expos")
# estimate nuisance outcome model with SuperLearner
mumod <- SuperLearner(Y = s.tmp, X = ws.tmp, SL.library = sl.lib)
stilde <- c(predict(mumod, newdata = ws)$pred)
stilde[!is.na(s)] <- s[!is.na(s)]
return(stilde)
}
# highest posterior density
hpd <- function(x, alpha = 0.05){
n <- length(x)
m <- round(n * alpha)
x <- sort(x)
y <- x[(n - m + 1):n] - x[1:m]
z <- min(y)
k <- which(y == z)[1]
c(x[k], x[n - m + k])
}
|
ee12b42839a5730c5c8227d4633e7ad487c16357
|
74ce34dfcd0971aa389b379b7484fddde4cdffc9
|
/man/bsearch7.Rd
|
b5122e4f7f478e00bb2f589b2c78f2defdf1e313
|
[] |
no_license
|
cran/stackoverflow
|
294b5425c89167d3278faa19d88905f821ef194f
|
3bd6c79acafa3ba9caa681a740cae22da2c18416
|
refs/heads/master
| 2020-04-04T03:44:41.465303
| 2020-01-10T03:50:02
| 2020-01-10T03:50:02
| 35,567,770
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 713
|
rd
|
bsearch7.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/bsearch7.R
\name{bsearch7}
\alias{bsearch7}
\title{Efficient binary search for character vectors}
\usage{
bsearch7(val, tab, L = 1L, H = length(tab))
}
\arguments{
\item{val}{values}
\item{tab}{table to find values in}
\item{L}{lower bound}
\item{H}{upper bound}
}
\description{
Efficient binary search for character vectors
}
\examples{
bsearch7(sample(letters, 5000, replace=TRUE), letters)
}
\references{
\url{http://stackoverflow.com/questions/20133344/find-closest-value-in-a-vector-with-binary-search/} and
\url{https://stat.ethz.ch/pipermail/r-help/2011-April/274182.html}
}
\author{
Martin Morgan, Neal Fultz
}
|
aa70b19245c84d137deb10f4a6490d48091da257
|
7880f7fb7eead03f1e655f405f6e770b7e3e8ead
|
/R/sgph.R
|
e5d0c4a608cf64decafadc330a7bf7b0f9088e09
|
[] |
no_license
|
tselert/sgholidays
|
c6f3f7afd9410bb49f27386edf69823c4752e9b3
|
5570490c310515fc9cfc897f8e57ed7cb043d0c9
|
refs/heads/master
| 2023-07-19T11:20:28.018084
| 2021-09-03T00:48:22
| 2021-09-03T00:48:22
| 399,330,903
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,922
|
r
|
sgph.R
|
#' Data of Public Holidays in Singapore
#'
#'This dataset contains all the dates of gazetted public holidays (PH) in Singapore since 2008. It also contains dates of off-in-lieu (see Note on OIL below).
#'This dataset will be updated annually as per the official website of the Ministry of Manpower Singapore.
#'
#' Since Aug 1968, Singaporeans enjoy 11 gazetted public holidays:
#' 1. New Year Day (1d)
#' 2. Chinese New Year (2d)
#' 3. Good Friday (1d)
#' 4. Labour Day (1d)
#' 5. Hari Raya Puasa (1d)
#' 6. Vesak Day (1d)
#' 7. National Day (1d)
#' 8. Hari Raya Haji (1d)
#' 9. Deepavali (1d)
#' 10. Christmas Day (1d)
#'
#' In addition, Polling Day is also a public holiday for Presidential Election and
#' General Elections (not including by-elections)
#'
#' SG50 Public Holiday was declared a special holiday on 7 August 2015 to mark the nation's
#' 50th anniversary of independence.
#'
#' Fun fact: Singapore scrapped a few holidays in August 1968 to improve productivity.
#'
#'
#' Note on Off-in-lieu (OIL):
#'
#' If the gazetted PH falls on a Sunday, the following working day will be the 'OIL' by default.
#'
#' If the gazetted PH falls on a worker's rest day (especially Saturday), he may be granted an 'OIL' on a date agreed with his/her employer or be
#' compensated monetarily.
#'
#'
#'
#' @format A data.table with 2 variables:
#' \describe{
#' \item{holiday.name}{chr Name of public holiday}
#' \item{date}{Date Date of gazetted public holiday}
#' \item{wkday}{Ord.factor Day of week, abbreviated}
#' }
#'
#' @source
#' \url{https://www.mom.gov.sg/employment-practices/public-holidays}
#'
#' \url{https://www.mom.gov.sg/newsroom/press-releases/2007/singapore-public-holidays-for-the-year-2008}
#'
#' @references
#' \url{https://sso.agc.gov.sg/Act/HA1998#Sc-}
#'
#' \url{https://www.mom.gov.sg/employment-practices/public-holidays-entitlement-and-pay}
#'
"sgph"
|
9ee529cea0144986280533317f0f5d438d1cd32f
|
06382b9d2a348003ffed076537f0ad9cb0a63f2f
|
/run_analysis.r
|
aff34c6ca56279d44aaaf776431734f279585bfb
|
[] |
no_license
|
avikmoulik/data_cleaning
|
42f3cffc743dfd530e7017df7503562d9ae673fe
|
adefa5ca589541ebee8ec234108f2b8f372e2fae
|
refs/heads/master
| 2021-01-10T14:01:58.193811
| 2015-09-27T13:12:14
| 2015-09-27T13:12:14
| 43,245,448
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,200
|
r
|
run_analysis.r
|
## reading all test files
##----------------------------------------------
subject_test<-read.table("/home/avik/rfile/UCI HAR Dataset/test/subject_test.txt")
x_test<-read.table("/home/avik/rfile/UCI HAR Dataset/test/X_test.txt")
y_test<-read.table("/home/avik/rfile/UCI HAR Dataset/test/y_test.txt")
## reading all train files
##----------------------------------------------
subject_train<-read.table("/home/avik/rfile/UCI HAR Dataset/train/subject_train.txt")
x_train<-read.table("/home/avik/rfile/UCI HAR Dataset/train/X_train.txt")
y_train<-read.table("/home/avik/rfile/UCI HAR Dataset/train/y_train.txt")
##Step 1
## Merging test and train data frames
##----------------------------------------------
test_data<- cbind(subject_test,y_test,x_test)
train_data<- cbind(subject_train,y_train,x_train)
# this is the merged data set
test_train_data_v1<- rbind(test_data,train_data)
##Step 2
## keeping only mean and Standard deviation measurements
##--------------------------------------------------------
# Reading the names of the features and getting the column numbers where it is mean or STD
features<-read.table("/home/avik/rfile/UCI HAR Dataset/features.txt")
ind_mean_std<-c(grep('mean',features$V2,ignore.case = T),grep('std',features$V2,ignore.case = T))
#First two columns are subjects and activity they performed, So addind 2 with this column numbers
ind_mean_std<-ind_mean_std+2
#Adding first two column index also:
final_ind<-c(1,2,ind_mean_std)
#subsetting the combined dataset only for the required variables:
test_train_data_v2<-test_train_data_v1[,final_ind]
##Step 3
## Using descriptive activity names to name the activities in the data set
##-------------------------------------------------------------------------
# reading activity labels
activity_labels<-read.table("/home/avik/rfile/UCI HAR Dataset/activity_labels.txt")
#joining with the dataset
test_train_data_v3<-merge(x=test_train_data_v2,y=activity_labels,by.x='V1.1',by.y='V1',all=T,sort=F)
#keeping required columns and arrenging them in required order:
test_train_data_v3<-test_train_data_v3[,c(2,89,3:88)]
##Step 4
## Appropriately labeling the data set with descriptive variable names
##-------------------------------------------------------------------------
# Extracting the columns names from the feature file
namefeatures<-as.character(features$V2)
#Adding two aditional names for first two columns
final_name<-c('Subject','Activity',namefeatures)
#Extracting only required names
required_names<-final_name[final_ind]
required_names<-gsub("\\()","",required_names,ignore.case = TRUE)
#renaming the columns
test_train_data_v4<-test_train_data_v3
names(test_train_data_v4)<-required_names
##Step 5
## From the data set in step 4, creating a second, independent tidy data set
##with the average of each variable for each activity and each subject.
##-------------------------------------------------------------------------
test_train_data_v5<-aggregate(.~Subject+Activity,data=test_train_data_v4,FUN=mean)
## test_train_data_v5 is the wide format tidy data
## Writing it into a text file
write.table(x =test_train_data_v5 ,file = "/home/avik/rfile/final_tidy_data.txt",row.names = F)
|
2e0c4cc923a19cc80ce50807770e1b4033933fe4
|
0b535741c301358fd1510a8db3f5fa2a2c479862
|
/man/getXcoords.Rd
|
92415b19b85382c2107e02400f2096067074a500
|
[] |
no_license
|
arcolombo/qusage
|
83f7514159c5881035c3dc2597b67d9a4c0a9267
|
6085f2cbac1883af2e83d249db4726ea6606d6e3
|
refs/heads/master
| 2021-01-17T08:56:52.732039
| 2015-10-13T19:59:53
| 2015-10-13T19:59:53
| 50,695,728
| 1
| 0
| null | 2016-01-29T22:31:31
| 2016-01-29T22:31:31
| null |
UTF-8
|
R
| false
| false
| 2,483
|
rd
|
getXcoords.Rd
|
\name{getXcoords}
\alias{getXcoords}
\title{Get the X coordinates for the points of the PDF}
\description{
Calculates the x-coordinates for the PDF of a given pathway.
}
\usage{
getXcoords(QSarray, path.index=1, addVIF=!is.null(QSarray$vif))
}
\arguments{
\item{QSarray}{A QSarray object as output by \link{qusage} (or \link{aggregateGeneSet})}
\item{path.index}{either an integer between 1 and numPathways(QSarray), or the name of the pathway to retrieve.}
\item{addVIF}{a logical indicating whether to use the VIF when calculating the variance}
}
\details{
The calculation of the x-coordinates for a PDF is not straightforward, and as such they are not included in the QSarray object initially. During the numerical convolution step, the gene set PDF is calculated at a number of points (equal to \code{QSarray$n.points}) over a range defined by:
\code{c(path.mean - range, path.mean + range})
However, the resulting PDF is actually the \emph{sum} of the individual gene PDFs, rather than the desired \emph{average} PDF. Therefore the range which is stored in the resulting QSarray is divided by the number of genes in the pathway, \code{QSarray$path.size}.
In addition, the width of the PDF can be expanded by the Variance Inflation Factor (VIF), which is equivalent to multiplying the range of the x-coordinates by the \code{sqrt(VIF)}. If the parameter \code{addVIF=TRUE}, the VIF calculatd using the \code{calcVIF} method will be included in the calculation of the x-coordinates.
In general, the x-coordinates for a pathway are calculated for each point n using the following formula:
\deqn{x_n = (-1+\frac{2(n-1)}{N_{pts}-1}) \times r \times \sqrt{VIF} + \hat{\mu}_{path}}{x.n = ( seq(-1,1,length.out=N.points) * range * sqrt(VIF) ) + path.mean}
}
\value{
A numeric vector of length \code{QSarray$n.points}.
}
\examples{
##create example data
eset = matrix(rnorm(500*20),500,20, dimnames=list(1:500,1:20))
labels = c(rep("A",10),rep("B",10))
##first 30 genes are differentially expressed
eset[1:30, labels=="B"] = eset[1:30, labels=="B"] + 1
geneSets = list(diff.set=1:30, base.set=31:60)
##Run qusage
set.results = qusage(eset, labels, "B-A", geneSets)
##Plot the PDF (see also: plotDensityCurves() )
x = getXcoords(set.results, 1)
y = set.results$path.PDF[,1]
plot(x,y, type="l")
}
|
1a2b19067fd54f13451c3cc06cac03da9352c95c
|
61c7597aad45fbd3a648f6629b7d813222cf3766
|
/man/add_grid_layer.Rd
|
1331dc3d1c902d46ac49ef37d0c1dc6f84a836bc
|
[
"MIT",
"BSD-3-Clause"
] |
permissive
|
xytczh/deckgl
|
3d7ba021dcaf3b59860510d66a345e2b93acadec
|
3d38b3e4a7c718030efe8e61090f0b01beb92255
|
refs/heads/master
| 2022-04-21T16:24:28.118634
| 2020-04-13T17:19:57
| 2020-04-13T17:19:57
| 258,428,819
| 1
| 0
|
NOASSERTION
| 2020-04-24T06:42:16
| 2020-04-24T06:42:15
| null |
UTF-8
|
R
| false
| true
| 1,589
|
rd
|
add_grid_layer.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/layers_grid-layer.R
\name{add_grid_layer}
\alias{add_grid_layer}
\title{Add a grid layer to the deckgl widget}
\usage{
add_grid_layer(deckgl, id = "grid-layer", data = NULL,
properties = list(), ...)
}
\arguments{
\item{deckgl}{A deckgl widget object.}
\item{id}{The unique id of the layer.}
\item{data}{The url to fetch data from or a data object.}
\item{properties}{A named list of properties with names corresponding to the properties defined
in the \href{https://deck.gl/#/documentation/deckgl-api-reference}{deckgl-api-reference}
for the given layer class.}
\item{...}{More properties that will be added to the \code{properties} object. This can be useful
if you want to use a properties object for more than one layer.}
}
\description{
The \code{GridLayer} renders a grid heatmap based on an array of points. It takes the constant size all each cell, projects points into cells.
The color and height of the cell is scaled by number of points it contains.
}
\examples{
## @knitr grid-layer
data("sf_bike_parking")
properties <- list(
extruded = TRUE,
cellSize = 200,
elevationScale = 4,
getPosition = ~lng + lat,
getTooltip = JS("object => `${object.position.join(', ')}<br/>Count: ${object.count}`"),
fixedTooltip = TRUE
)
deck <- deckgl(zoom = 11, pitch = 45, bearing = 35) \%>\%
add_grid_layer(data = sf_bike_parking, properties = properties) \%>\%
add_basemap()
if (interactive()) deck
}
\seealso{
\url{https://deck.gl/#/documentation/deckgl-api-reference/layers/grid-layer}
}
|
4061395a6b184983320509179c992648815d825b
|
07ab8b101571b2b3c991368e0d7791e4b7abc342
|
/script.r
|
78ee2c55e0a4c9f016c24993723ef0cf04f340be
|
[] |
no_license
|
x5pid/CRYPTO
|
294db9f02c1a0569e966a225ad79f7ebfd185cf2
|
213a91d14a36e1032727c6690da3e0c156e605f0
|
refs/heads/master
| 2021-04-03T10:23:50.547434
| 2018-04-08T21:00:45
| 2018-04-08T21:00:45
| 125,222,457
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 138
|
r
|
script.r
|
#! /usr/bin/env Rscript
d<-read.table('collisions2.txt')
d <- unlist(d, use.names=FALSE)
cat(min(d), max(d), mean(d), sep=' ')
cat('\n')
|
efbed10e630424a8c34e122d48c502430b73b3c0
|
0500ba15e741ce1c84bfd397f0f3b43af8cb5ffb
|
/cran/paws.developer.tools/man/codepipeline_list_tags_for_resource.Rd
|
8aa18f7d8e95ce7d6971a158dd36b2cf505823a8
|
[
"Apache-2.0"
] |
permissive
|
paws-r/paws
|
196d42a2b9aca0e551a51ea5e6f34daca739591b
|
a689da2aee079391e100060524f6b973130f4e40
|
refs/heads/main
| 2023-08-18T00:33:48.538539
| 2023-08-09T09:31:24
| 2023-08-09T09:31:24
| 154,419,943
| 293
| 45
|
NOASSERTION
| 2023-09-14T15:31:32
| 2018-10-24T01:28:47
|
R
|
UTF-8
|
R
| false
| true
| 1,017
|
rd
|
codepipeline_list_tags_for_resource.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/codepipeline_operations.R
\name{codepipeline_list_tags_for_resource}
\alias{codepipeline_list_tags_for_resource}
\title{Gets the set of key-value pairs (metadata) that are used to manage the
resource}
\usage{
codepipeline_list_tags_for_resource(
resourceArn,
nextToken = NULL,
maxResults = NULL
)
}
\arguments{
\item{resourceArn}{[required] The Amazon Resource Name (ARN) of the resource to get tags for.}
\item{nextToken}{The token that was returned from the previous API call, which would be
used to return the next page of the list. The ListTagsforResource call
lists all available tags in one call and does not use pagination.}
\item{maxResults}{The maximum number of results to return in a single call.}
}
\description{
Gets the set of key-value pairs (metadata) that are used to manage the resource.
See \url{https://www.paws-r-sdk.com/docs/codepipeline_list_tags_for_resource/} for full documentation.
}
\keyword{internal}
|
2c82bc3aee2e415ef98d475b4ba6c282cabc6e07
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/MSnbase/examples/normToReference.Rd.R
|
a8b7bd29d996e8448f79de41e695e27630d36c94
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 398
|
r
|
normToReference.Rd.R
|
library(MSnbase)
### Name: normToReference
### Title: Combine peptides into proteins.
### Aliases: normToReference NTR
### ** Examples
library("MSnbase")
data(msnset)
# choose the reference run automatically
combineFeatures(msnset, groupBy=fData(msnset)$ProteinAccession)
# use a user-given reference
combineFeatures(msnset, groupBy=fData(msnset)$ProteinAccession,
reference=rep(2, 55))
|
972b0d5c6a43c1a7d91e717b42414f90ced1cb32
|
c7e0ea5ce7aa2a6d322bf34613f4373b502abdb8
|
/Lab Homework/lab 7 homework/Lab 7 Homework Problem 2.R
|
787a67b91fd441571453fe7c22bc6c49ca4df9e1
|
[] |
no_license
|
alliyahg/BIS15W2020_agalvan
|
eb23623123a449d5475641651842dfae62b604cf
|
36845e30c487b77bb9d2f4b644f551d977677925
|
refs/heads/master
| 2020-12-13T12:08:49.156454
| 2020-03-13T01:25:52
| 2020-03-13T01:25:52
| 234,412,044
| 4
| 0
| null | 2020-01-30T17:48:52
| 2020-01-16T21:07:27
|
HTML
|
UTF-8
|
R
| false
| false
| 906
|
r
|
Lab 7 Homework Problem 2.R
|
library(tidyverse)
library(shiny)
library(shinydashboard)
UC_admit <- readr::read_csv("data lab 7/UC_admit.csv")
UC_admit %>%
mutate_at(vars(Academic_Yr), as.factor) %>%
mutate_at(vars(Ethnicity), as.factor)
ui <- dashboardPage(
dashboardHeader(title = "Ethnicities Data App"),
dashboardSidebar("UC System"),
dashboardBody(
radioButtons("x", "Select Choice", choices = c("Academic_Yr", "Campus", "Category"),
selected = "Category"),
plotOutput("plot", width = "800px", height = "600px")
))
server <- function(input, output, session) {
output$plot <- renderPlot({
ggplot(UC_admit, aes_string(x = "Ethnicity", y="FilteredCountFR", fill=input$x)) +
geom_col(position = "dodge") + theme_light(base_size = 18)+ coord_flip()+
labs(x = "Ethnicity",
y = "Filtered Count FR")
})
session$onSessionEnded(stopApp)
}
shinyApp(ui, server)
|
4f3cf62abb5a7e65bc98ca78d49ed0ed3be2ee52
|
cb4ef01ac32a5c72abd6977ae13edc97d2b18d0f
|
/R/dci.R
|
1ca721b82af46d9cff84eed25119bf590b8ad543
|
[] |
no_license
|
jsta/dciR
|
735627ea3e1ac3f63e7a2be7122f1f353b60df14
|
f255af7e7e32cb2aeea371e515c74f536ae4fd1e
|
refs/heads/master
| 2021-01-17T08:10:44.051394
| 2016-07-31T00:15:56
| 2016-07-31T00:15:56
| 34,424,333
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,135
|
r
|
dci.R
|
#'@name dci
#'@import RBGL
#'@title Directional connectivity matrix
#'@param mat matrix 2D matrix of zeros and ones, in which ones represent the landscape patch of interest. The axis of interest along which directional connectivity is computed is dimension 1 of this matrix.
#'@param xgrain pixel length in cm (i.e., along dimension 1 of the variable "state")
#'@param ygrain pixel width in cm (i.e., along dimension 2 of the variable "state")
#'@details This function first converts a binary image to an adjacency matrix (larsen::im2adjacency). Next, this matrix is fed into a modified version of DCIu (DCIu_aont). DCIu_aont calls an underlying distance function in the process of returning a DCI value. The distance function (dijkstra_edit) requires requires an adjacency list which is created with the adj2adjL function.
#'@export
#'@examples
#'\dontrun{
#'mat <- matrix(c(0,0,1,0,0,0,1,0,1,0,0,1,0,0,1,0,0,0,1,0,1,1,0,0,0),nrow=5,byrow=TRUE)
#'mat<-matrix(c(1,1,1,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,0,1,1,1,0,0),nrow=5,byrow=TRUE)
#'xgrain<-1
#'ygrain<-1
#'dci(mat,xgrain,ygrain)
#'}
dci<-function(mat,xgrain,ygrain){
requireNamespace(RcppOctave)
#run Octave####
aontfiles<-c("adj2adjL.m","dijkstra_edit.m")
for(i in 1:length(aontfiles)){
RcppOctave::o_source(system.file("Octave","aont",aontfiles[i],package="dciR"))
}
larsen2012files<-c("DCIu_preRBGL.m","im2adjacency_full.m")
for(i in 1:length(larsen2012files)){
RcppOctave::o_source(system.file("Octave","larsen2012",larsen2012files[i],package="dciR"))
}
adj<-RcppOctave::.O$im2adjacency_full(mat,xgrain,ygrain)
RcppOctave::o_assign(xgrain=xgrain)
RcppOctave::o_assign(dist=adj$distance)
RcppOctave::o_assign(pixelx=adj$pixelx)
adjL<-RcppOctave::.O$adj2adjL(adj$adjacency)
dciu_pre<-RcppOctave::.O$DCIupre(adj$distance,xgrain,adj$pixelx)
#run R####
start_nodes<-dciu_pre$start_nodes
adjacency<-adj$adjacency
dist<-dciu_pre$distance
pixelx<-adj$pixelx
R<-dciu_pre$R
dx<-dciu_pre$dx
#Rprof()
suppressMessages(dciu_rbgl(start_nodes,adjacency,dist,pixelx,R,dx,adjL))
#Rprof(NULL)
#summaryRprof()
}
|
0b136c2b7e11ca95f6fc7cb008149d6b79c84555
|
5cc230ad95dfcea7c8780e2d4c44a661536ff3ad
|
/R/dependencies.R
|
15e75e8ccfccef9fa31430ba6e7f1af0be5b278d
|
[
"MIT"
] |
permissive
|
timelyportfolio/remiotic
|
1abeb01a09c8adfaa2a9dd05e43f13e7aa8f1065
|
ab33d051e8ba68633817b5b19c3cf0cda07d1efd
|
refs/heads/master
| 2021-03-31T01:02:48.336346
| 2019-07-02T02:40:04
| 2019-07-02T02:40:04
| 124,822,933
| 7
| 0
|
NOASSERTION
| 2019-11-02T21:30:22
| 2018-03-12T02:29:41
|
HTML
|
UTF-8
|
R
| false
| false
| 722
|
r
|
dependencies.R
|
#' Dependencies for g2, bizcharts, and data-set
#'
#' @return \code{htmltools::htmlDependency}
#' @name dependencies
NULL
#' @rdname dependencies
#' @export
dep_corejs <- function() {
#shim/polyfill for ES5 and ES6 so react will show up in RStudio Viewer
#https://unpkg.com/core-js@2.5.3/
htmltools::htmlDependency(
name = "core-js",
version = "2.5.3",
src = c(file=system.file("www/core-js/dist", package="remiotic")),
script = "shim.min.js"
)
}
#' @rdname dependencies
#' @export
dep_semiotic <- function() {
htmltools::htmlDependency(
name = "semiotic",
version = "1.9.11",
src = c(file=system.file("www/semiotic/dist", package="remiotic")),
script = "semiotic.min.js"
)
}
|
85bb5f562ab8e7af67a7251407685090aad29b7a
|
f672121ef56b834ae5230cacb90fa97676fec1f9
|
/Assignment 9.1.R
|
be406e322e02f0523670da6aa56155b46aa7dd96
|
[] |
no_license
|
adhishree1001/Assignment-9.1
|
e46f4a33c3078dbb0ebfd65e3d878b0eeb7c23d7
|
259af099465a1e90420e8ff8b555f2d1804dbc7d
|
refs/heads/master
| 2021-05-12T09:16:33.365882
| 2018-02-25T20:52:19
| 2018-02-25T20:52:19
| 117,312,334
| 0
| 0
| null | null | null | null |
ISO-8859-1
|
R
| false
| false
| 1,467
|
r
|
Assignment 9.1.R
|
#1. If Z is norm (mean = 0, sd = 1)
#Find P(Z > 2.64)
pnorm(2.64, mean=0, sd=1, lower.tail=FALSE)
#Find P(|Z| > 1.39)
answer <- 2*(1-(pnorm(-1.39, mean=0, sd=1, lower.tail=FALSE)))
answer
#P(|Z| > 1.39) = P( Z > 1.39) + P( Z < -1.39) = [1 - P( z < 1.39)] + [ 1 - P( z - 1.39)] =2*[1 - P( z < 1.39)] = 2(1 - 0.9177) = 2(0.0823) = 0.1646
#2. Suppose p = the proportion of students who are admitted to the graduate school of the University of California at Berkeley, and suppose that a public relation officer boasts that UCB has historically had a 40% acceptance rate for its graduate school. Consider the data stored in the table UCBAdmissions from 1973.
#Assuming these observations constituted a simple random sample, are they consistent with the officerâ..s claim, or do they provide evidence that the acceptance rate was significantly less than 40%?
#Use an α = 0.01 significance level.
View(UCBAdmissions)
apply(UCBAdmissions, c(1, 2), sum)
mosaicplot(apply(UCBAdmissions, c(1, 2), sum),
main = "Student admissions at UC Berkeley")
prop.table(UCBAdmissions)
ftable(round(prop.table(UCBAdmissions), 3),
row.vars="Dept", col.vars = c("Gender", "Admit"))
prop.test(1755,4526 , p=0.4, alternative = "less" , conf.level = 0.99, correct = F)
Conclusion:
since the null probability value lies in the 99 percent confidence interval we say that, we are 99% conident about officer's claim of 40% of acceptance rate.
|
8326c9a0de16f57d9eda555509b3f616e01cf577
|
52a27a46e62e306fd2af922aa733d54019121061
|
/pollutantmean.R
|
0afead735ed3d7e406b44f6fc4bdecc91c93c4f1
|
[] |
no_license
|
RuchiJuneja1/RProgrammingCoursera
|
8e8d40bb72e1bf87fb487448f6f3d3579edfc1e3
|
ce58a024205989de289a73440589441a2803b6a9
|
refs/heads/master
| 2021-03-12T19:59:31.154943
| 2015-02-22T15:43:14
| 2015-02-22T15:43:14
| 30,932,011
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,307
|
r
|
pollutantmean.R
|
pollutantmean <- function(directory, pollutant, id = 1:332) {
## 'directory' is a character vector of length 1 indicating
## the location of the CSV files
## 'pollutant' is a character vector of length 1 indicating
## the name of the pollutant for which we will calculate the
## mean; either "sulfate" or "nitrate".
## 'id' is an integer vector indicating the monitor ID numbers
## to be used
## Return the mean of the pollutant across all monitors list
## in the 'id' vector (ignoring NA values)
vec_length<-length(id)
file_vec<-vector(length=as.integer(vec_length))
len_vec<-vector(length=as.integer(vec_length))
sum_vec<--vector(length=as.integer(vec_length))
for(i in 1:vec_length)
{
file_vec[i] <- if(id[i]<10){
paste(directory,"/00",as.character(id[i]),".csv",sep="")
}else if(id[i]>=10 && id[i]<100){
paste(directory,"/0",as.character(id[i]),".csv",sep="")
}else if (id[i]>=100 && id[i] <333){
paste(directory,"/",as.character(id[i]),".csv",sep="")
}else{
"NA"
}
data<-read.csv(file_vec[i])
x<-data[,pollutant]
good<-complete.cases(x)
x<-x[good]
len_vec[i]<-length(x)
sum_vec[i]<-sum(x)
}
sum<-sum(sum_vec)
len<-sum(len_vec)
mean<-sum/len
return (round(mean,digit=3))
}
|
d3125defb9059847078b02638b53ed1b4c8922ba
|
faca9fb310e0f5d25206dd7fbd8bd059e6facefb
|
/R/vcf.R
|
69177ae4d25c69e5fb0e46d5458f5907f643d338
|
[] |
no_license
|
imbs-hl/imbs
|
505f534fb68cd2d8fc6a3847f36784245cab3111
|
2d3ec95b81ea84623f007c5364ab19789a85715c
|
refs/heads/master
| 2023-08-11T08:33:42.695944
| 2019-09-05T20:01:22
| 2019-09-05T20:01:22
| 66,840,758
| 1
| 1
| null | 2018-01-29T15:02:18
| 2016-08-29T12:13:16
|
R
|
UTF-8
|
R
| false
| false
| 2,658
|
r
|
vcf.R
|
#' Normalize VCF file to bi-allelic variants
#'
#' Converting VCF files to plink format has never been easier. However, there are a few issues related to some intrinsic limitations of the plink format. The first is related to the fact that variants in a plink file are bi-allelic only, while variants in a VCF file can be multi-allelic. The second is related to an intrinsic limitation of plink which makes indel definitions ambiguous. Here is an example: is the following variant an insertion or a deletion compared to the GRCh37 reference?
#'
#' 20 31022441 A AG
#'
#' There is no way to tell, as the plink format does not record this information.
#'
#' Keeping this in mind, we are going to split mulit-allelic variants into bi-allelic ones, left-normalize indels, and assign unique idetifiers.
#'
#' @param vcf.file [\code{string}]\cr
#' The input VCF file path.
#' @param ref.file [\code{string}]\cr
#' A human reference genome \code{fasta} file to normalize indels against.
#' @param output.file [\code{string}]\cr
#' The output VCF file path.
#' @param bcftools.exec [\code{string}]\cr
#' Path of bcftools executable.
#' @param num.threads [\code{int}]\cr
#' Number of CPUs usable by bcftools
#' Default is determined by SLURM environment variables and at least 1.
#'
#' @return Captured system output as \code{character} vector.
#' @export
#'
vcf_normalization <- function(vcf.file, ref.file, output.file,
bcftools.exec = "bcftools",
num.threads) {
assertions <- checkmate::makeAssertCollection()
checkmate::assert_file(vcf.file, add = assertions)
checkmate::assert_file(ref.file, add = assertions)
checkmate::assert_directory(dirname(output.file), add = assertions)
assert_command(bcftools.exec, add = assertions)
if (missing(num.threads)) {
num.threads <- max(1, as.integer(Sys.getenv("SLURM_CPUS_PER_TASK")), na.rm = TRUE)
}
checkmate::assert_int(num.threads, lower = 1, add = assertions)
checkmate::reportAssertions(assertions)
# Run bcftools
system_call(
bin = bcftools.exec,
args = c(
"norm", "-Ou", "-m", "-any", vcf.file, # split multi-allelic alleles
"|",
bcftools.exec, "norm", "-Ou", "-f", ref.file, # normalize indels
"|",
bcftools.exec, "annotate", "--threads", num.threads, "-Oz", "-o", output.file, "-x", "ID", "-I", "+'%CHROM:%POS:%REF:%ALT'" # assign unique identifier
)
)
}
|
583ac085a6ff16204536f1de5fffe1fe37bf3ae2
|
3e6d16419caa195694378801400dea03e56f64a6
|
/W3.R
|
f9ee1293c07e13cfe0117d17a809b753361528c3
|
[] |
no_license
|
pablovmurcia3/Getting-and-Cleaning-Data
|
cdd1849a89fa9667c32f69261435de98480217e8
|
1fa7a843a06fdad9f2bc7d98f068ce0c1065511e
|
refs/heads/master
| 2022-11-21T00:30:01.588456
| 2020-07-20T15:15:27
| 2020-07-20T15:15:27
| 277,130,880
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 12,316
|
r
|
W3.R
|
################################################################################
# WEEK 3 #
################################################################################
# Get to tidy data!
# subsetting and sorting data
set.seed(13435)
X <- data.frame("var1"=sample(1:5),"var2"=sample(6:10),"var3"=sample(11:15))
X
X <- X[sample(1:5),] #scramble the data frame
X
X$var2[c(1,3)] = NA # make some values missing
X
X[,1]
X[,"var1"] #the same
X[1:2, "var2"] # subset rows and column at the same time
X[(X$var1 <= 3 & X$var3 > 11),] # with logical args (AND)
X[(X$var1 <= 3 | X$var3 > 15),] #(#OR)
# Dealing with missing values
X[X$var2 > 8,] # problem because NA
X[which(X$var2 > 8),] # subset with NAs
## Sorting
sort(X$var1)
sort(X$var1,decreasing=TRUE)
sort(X$var2,na.last=TRUE) # with the na at the end of the sort
sort(X$var2)
## Ordering
X[order(X$var1),]
X[order(X$var1,X$var3),] # first sort var 1 the by var 3
X <- data.frame(a = c(3,3,2,2),b= c(1,2,4,3))
X[order(X$a,X$b),]
# We can do the same with plyr package
install.packages("plyr")
library(plyr)
arrange(X,var1) #sorting
arrange(X, desc(var1))
# Add rows and columns
X$var4 <- rnorm(5) # var4 new variable.. assign new vector
X
Y <- cbind(X, rnorm(5)) # The same
Z <- rbind(Y, rnorm(5))
################################################################################
################################################################################
#Summarizing data
# crucial step in cleaning data
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://data.baltimorecity.gov/api/views/k5ry-ef3g/rows.csv?accessType=DOWNLOAD"
download.file(fileUrl,destfile="./data/restaurants.csv",method="curl")
restData <- read.csv("./data/restaurants.csv")
# look a bit of the data
head(restData, n=3) # n number of rows
tail(restData, n=3)
summary(restData) #info of variables
str(restData)
restData$name <-as.factor(restData$name)
quantile(restData$councilDistrict, na.rm = TRUE)
quantile(restData$councilDistrict,probs=c(0.5,0.75,0.9)) # Look at different percentage
# Make table
table(restData$Location.1, useNA = "ifany") # useNA ifany to know the numer of Na
# in this case there are not na
table(restData$councilDistrict,restData$zipCode) # two variables
# check for missing values
sum(is.na(restData$councilDistrict))
any(is.na(restData$councilDistrict))
all(restData$zipCode > 0) # we hop tha zipc are greater than 0--- so we have a problem
## INTERESTING!!!!!
#row and column sums
colSums(is.na(restData)) # for every variable !
all(colSums(is.na(restData)) == 0) # we have a problem!!!
# Values with specific characteristics
table(restData$zipCode %in% c("21212")) # all the zipc that are 21212
# are there any valuas that fall into the vector
sum(restData$zipCode == 21212)
table(restData$zipCode %in% c("21213"))
table(restData$zipCode %in% c("21212","21213"))
# are there any valuas that are either equal to one or the other of these values
a<-restData[restData$zipCode %in% c("21212","21213"),] # more easy to subset
b<-restData[ restData$zipCode == "21212" | restData$zipCode == "21213", ]
identical(a,b)
# Cross tabs
data(UCBAdmissions)
DF <- as.data.frame(UCBAdmissions)
summary(DF)
xt <- xtabs(Freq ~ Gender + Admit,data=DF) # CROSS TAB
xt
?xtabs
# Falt tables
warpbreaks <- as.data.frame(warpbreaks)
summary(warpbreaks)
warpbreaks$replicate <- rep(1:9, len = 54)
xt = xtabs(breaks ~.,data=warpbreaks)
xt
ftable(xt)
# Size of a data set
fakeData <- rnorm(1e5)
object.size(fakeData)
print(object.size(fakeData),units="Mb")
################################################################################
################################################################################
# Creating new data
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://data.baltimorecity.gov/api/views/k5ry-ef3g/rows.csv?accessType=DOWNLOAD"
download.file(fileUrl,destfile="./data/restaurants.csv",method="curl")
restData <- read.csv("./data/restaurants.csv")
# creating sequences
s1 <- seq(1,10,by=2) ; s1
s2 <- seq(1,10,length=3); s2
x <- c(1,3,8,25,100); seq(along = x)
seq_along(x)
# Subsetting variables
restData$nearMe <- restData$neighborhood %in% c("Roland Park", "Homeland")
table(restData$nearMe)
# Creating binary variables
restData$zipWrong <- ifelse(restData$zipCode < 0, TRUE, FALSE)
table(restData$zipWrong,restData$zipCode < 0)
as.numeric(restData$nearMe)
# Creating categorical variables
quantile(restData$zipCode)
restData$zipGroups <- cut(restData$zipCode,breaks=quantile(restData$zipCode))
# break the variable according to the quantiles
table(restData$zipGroups)
class(restData$zipGroups)
table(restData$zipGroups,restData$zipCode)
# Easier cutting
install.packages("Hmisc")
library(Hmisc)
restData$zipGroups <- cut2(restData$zipCode,g=4) # cut2! only put the groups
table(restData$zipGroups)
#Cutting produces factor variables
# Creating factor variables
restData$zcf <- factor(restData$zipCode)
restData$zcf[1:10]
class(restData$zcf)
# Levels of factor variables
yesno <- sample(c("yes","no"),size=10,replace=TRUE)
yesnofac <- factor(yesno,levels=c("yes","no"))
relevel(yesnofac,ref="yes")
as.numeric(yesnofac) # see as numeric
# Using the mutate function
library(Hmisc); library(plyr)
restData2 <- mutate(restData,zipGroups=cut2(zipCode,g=4))
table(restData2$zipGroups)
# Common transforms
# `abs(x)` absolute value
# `sqrt(x)` square root
# `ceiling(x)` ceiling(3.475) is 4
# `floor(x)` floor(3.475) is 3
# `round(x,digits=n)` round(3.475,digits=2) is 3.48
# `signif(x,digits=n)` signif(3.475,digits=2) is 3.5
# `cos(x), sin(x)` etc.
# `log(x)` natural logarithm
# `log2(x)`, `log10(x)` other common logs
# `exp(x)` exponentiating x
################################################################################
################################################################################
# Reshape the data --- get to tidy!
# each variable forms a column
# each observation a row
install.packages("reshape2")
library(reshape2)
data(mtcars)
head(mtcars)
# Melt the data set
mtcars$carname <- rownames(mtcars)
carMelt <- melt(mtcars,id=c("carname","gear","cyl"),measure.vars=c("mpg","hp"))
# melt the variables in one column
head(carMelt,n=3)
tail(carMelt,n=3)
table(carMelt$cyl)
# With the melt data set we can RE-Cast it into different shapes
# Cast the data set
cylData <- dcast(carMelt, cyl ~ variable) # summarize the data set
cylData # cyl in the rows and variable in the column
cylData <- dcast(carMelt, cyl ~ variable,mean) # re-summarize
cylData
# Average values
data(InsectSprays)
head(InsectSprays)
tapply(InsectSprays$count,InsectSprays$spray,sum)
# Another way - split
spIns <- split(InsectSprays$count,InsectSprays$spray)
spIns
sprCount <- lapply(spIns,sum)
sprCount
unlist(sprCount)
sapply(spIns,sum)
# Another way - plyr package
library(plyr)
ddply(InsectSprays,.(spray),summarize,sum=sum(count))
?ddply
# Creating a new variable
spraySums <- ddply(InsectSprays,.(spray),summarize,sum=ave(count,FUN=sum))
dim(spraySums)
head(spraySums)
# usual combination --- Mutate + ddply
com <- mutate(InsectSprays, holis = ddply(InsectSprays,.(spray),summarize,sum = ave(count,FUN=sum))[,2])
com
################################################################################
################################################################################
# Dplyr package!
# The data frame is a key data structure in statistics and in R.
# There is one observation per row
# Each column represents a variable or measure or characteristic
# Primary implementation that you will use is the default R
# implementation
# Other implementations, particularly relational databases systems
# Is faster tha plyr because is coded in C ++ in the low level
# The important verbs
# 1. select--- subset of columns
# 2. filter--- subset of rows
# 3. Arrange --- reorder
# 4. Rename
# 5. Mutate
# 6. Summarise
# dplyr.function(data.frame, other arguments) -> returns a data frame
install.packages("dplyr")
library(dplyr)
if(!file.exists("./data")){dir.create("./data")}
fileUrl <- "https://github.com/DataScienceSpecialization/courses/raw/master/03_GettingData/dplyr/chicago.rds"
download.file(fileUrl,destfile="./data/chicago.rds")
chicago <- readRDS("./data/chicago.rds")
dim(chicago)
str(chicago)
names(chicago)
# Select
head(select(chicago, city:dptp)) # : a notation to select all the columns
# between city and dptp
head(select(chicago, -(city:dptp))) # select all the columns except the ones in
# the -(:)
# Filter
chic.f <- filter(chicago, chicago$pm25tmean2 > 30)
head(chic.f)
chic.f <- filter(chicago, pm25tmean2 > 30 & tmpd > 80)
head(chic.f)
# the nice thing of these functions -- you can refer to the variable using their
# names
# Arrange
chicago <- arrange(chicago, date)
head(chicago)
tail(chicago)
chicago <- arrange(chicago, desc(date))
head(chicago)
tail(chicago)
# Rename
chicago <- rename(chicago, pm25 = pm25tmean2, dewpoint=dptp)
names(chicago)
head(chicago)
# Mutate
chicago <- mutate(chicago, pm25detrend = pm25 - mean(pm25, na.rm = TRUE))
head(chicago)
# Group by + summarize
chicago <- mutate(chicago, tempcat = factor(1*(tmpd > 80), labels = c("cold","hot")))
################################################################################
nums <- sample(70:90, 20, replace = TRUE)
factor(nums > 80) # displays TRUE and FALSE
factor(1*(nums > 80)) # displays 0 and 1
nums <- as.numeric(factor(nums > 80)) # displays 1 and 2
nums
################################################################################
hotcold <- group_by(chicago, tempcat)
hotcold
summarize(hotcold, pm25 = mean(pm25, na.rm = TRUE), o3 = max(o3tmean2),
no2 = median(no2tmean2))
################################################################################
chicago$date[1000]
as.POSIXlt(chicago$date)$year[1000] + 1900
chicago$date[1]
as.POSIXlt(chicago$date)$mon[1]
################################################################################
chicago <- mutate(chicago, year = as.POSIXlt(date)$year + 1900)
years <- group_by(chicago, year)
summarize(years, pm25 = mean(pm25, na.rm = TRUE), o3 = max(o3tmean2, na.rm = TRUE),
no2 = median(no2tmean2, na.rm = TRUE))
# Special operator -- chain different operations together -- pipe line
chicago %>% mutate(month = as.POSIXlt(date)$mon + 1) %>% group_by(month) %>%
summarize(pm25 = mean(pm25, na.rm = TRUE),
o3 = max(o3tmean2, na.rm = TRUE),
no2 = median(no2tmean2, na.rm = TRUE))
# i dont have to specify the name of the data frame
# we can use the dplyr with data.table!!!
################################################################################
# Merging Data
if(!file.exists("./data")){dir.create("./data")}
fileUrl1 = "https://raw.githubusercontent.com/jtleek/dataanalysis/master/week2/007summarizingData/data/reviews.csv"
fileUrl2 = "https://raw.githubusercontent.com/jtleek/dataanalysis/master/week2/007summarizingData/data/solutions.csv"
download.file(fileUrl1,destfile="./data/reviews.csv",method="curl")
download.file(fileUrl2,destfile="./data/solutions.csv",method="curl")
reviews = read.csv("./data/reviews.csv")
solutions <- read.csv("./data/solutions.csv")
head(reviews,2)
head(solutions,2)
names(reviews)
names(solutions)
mergedData <- merge(reviews,solutions,by.x="solution_id",by.y="id",all=TRUE)
# all -- include all the variables
head(mergedData)
# Using join in the plyr package
# Faster, but less full featured - defaults to left join,
library(plyr)
df1 <- data.frame(id=sample(1:10),x=rnorm(10))
df2 <- data.frame(id=sample(1:10),y=rnorm(10))
arrange(join(df1,df2),id) # only can merge in the base of common names between
# the data.frames
# If you have multiple data frames, the plyr packages is recomended (id the names
# are the same) -- join_all
df1 <- data.frame(id=sample(1:10),x=rnorm(10))
df2 <- data.frame(id=sample(1:10),y=rnorm(10))
df3 <- data.frame(id=sample(1:10),z=rnorm(10))
dfList <- list(df1,df2,df3) # all the data frames need to be in a list
join_all(dfList)
?join_all
|
74fb452c1646c7f26ff83744c24aa1f5fde6bb25
|
4ae0fd50679cc020ce92ba065211791448145f61
|
/Cluster_Analysis/partioning_cluster_analysis.R
|
f860c84d8c897a3cb3214da0a409de9fb377fbd1
|
[] |
no_license
|
JMunch/r_scripts
|
8950b1d2f96a999b4dc961c8329f94e83a7b7a16
|
5085a6173f75aa77f8c678b35f33de49ae5eb429
|
refs/heads/master
| 2020-07-02T14:20:41.855956
| 2016-12-21T19:12:42
| 2016-12-21T19:12:42
| 74,302,585
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,969
|
r
|
partioning_cluster_analysis.R
|
# Installing and Loading Required Packages --------------------------------
library(cluster)
library(factoextra)
# K-Means -----------------------------------------------------------------
set.seed(123)
# Simuklate some data wiith two clusters
df = rbind(matrix(rnorm(100, sd = 0.3), ncol = 2),
matrix(rnorm(100, mean = 1, sd = 0.3), ncol = 2))
# Compute kmeans with k = 2
set.seed(123)
km.res = kmeans(df, 2, nstart = 25)
# Cluster number of each observation
km.res$cluster
# Size of each cluster
km.res$size
# Centroids
km.res$centers
# Plot the data points with coloring
plot(df, col = km.res$cluster, pch = 19, frame = FALSE,
main = "K-means with k = 2")
points(km.res$centers, col = 1:2, pch = 8, cex = 3)
# Compute kmeans with k = 4
set.seet(123)
km.res = kmeans(df, 4, nstart = 25)
plot(df, col = km.res$cluster, pch = 19, frame = FALSE,
main = "K-means with k = 4")
points(km.res$centers, col = 1:4, pch = 8, cex = 3)
# K-Means on real data ----------------------------------------------------
# Load the data set
data("USArrests")
# Remove any missing value (i.e. NA values for not available)
# That might be present in the data
df = na.omit(USArrests)
head(df, n = 5)
# First some discreptive analysis
desc_stats = data.frame(
Min = apply(df, 2, min), # minimum
Med = apply(df, 2, median), # median
Mean = apply(df, 2, mean), # mean
SD = apply(df, 2, sd), # standard deviation
Max = apply(df, 2, max) # Maximum
)
round(desc_stats, 2)
# scale the variables
df_scaled = scale(df)
head(df)
# Determine number of clusters
fviz_nbclust(df_scaled, kmeans, method = "wss") +
geom_vline(xintercept = 4, linetype = 2)
# Compute k-means clustering with k = 4
set.seed(123)
km.res = kmeans(df_scaled, 4, nstart = 25)
aggregate(USArrests,
by = list(km.res$cluster),
mean)
# Plot the result
fviz_cluster(km.res, data = df_scaled)
# PAM clustering ----------------------------------------------------------
pam.res = pam(df_scaled, 4)
pam.res$medoids
head(pam.res$cluster)
# plot with cluster package
clusplot(pam.res, main = "Cluster plot k = 4",
color = TRUE)
# plot with factoextra
fviz_cluster(pam.res)
# plot Silhouette
plot(silhouette(pam.res), col = 2:5)
fviz_silhouette(silhouette(pam.res))
# Check which observations have negative silhouette
sil = silhouette(pam.res)
neg_sil_index = which(sil[, 'sil_width'] < 0)
sil[neg_sil_index, , drop = FALSE]
# CLARA: Clustering Large Applications ------------------------------------
set.seed(1234)
# Generate 500 objects, divided into 2 clusters
x = rbind(cbind(rnorm(200, 0, 8), rnorm(200, 0, 8)),
cbind(rnorm(300, 50, 8), rnorm(300, 50, 8)))
# Compute clara
clarax = clara(x, 2 , sample = 50)
# Cluster plot
fviz_cluster(clarax, stand = FALSE, geom = "point",
pointsize = 1)
# Silhouette plot
plot(silhouette(clarax), col = 2:3, main = "Silhoette plot")
# Medoids
clarax$medoids
|
9f6d426829140fb825069b3143e7dd4c800c61f5
|
b96e92d86bd142159e4674c59c6fbaf730049802
|
/R/vc_column_apply.R
|
b3813a5be80b3e71072a80193a5c284628430655
|
[] |
no_license
|
trinker/valiData
|
0ac536b9ed0435ff27f61973d949e9036fc8c1ac
|
59caaa67acaafb2508e90281812997464766d6f1
|
refs/heads/master
| 2022-06-09T05:59:46.696388
| 2022-05-12T18:25:54
| 2022-05-12T18:25:54
| 74,035,459
| 0
| 1
| null | 2016-11-17T14:37:24
| 2016-11-17T14:37:24
| null |
UTF-8
|
R
| false
| false
| 1,328
|
r
|
vc_column_apply.R
|
#' Apply Column Map to Dataframe
#'
#' Apply a column map from \code{read_column_map_dir} to a
#' \code{\link[base]{data.frame}}.
#'
#' @param data A data frame to run column-wise tests on.
#' @param colmap A column map from \code{read_column_map_dir}.
vc_column_apply <- function(data, colmap){
nms_lookup <- dplyr::data_frame(
nms = tolower(names(colmap)),
colmapnams =names(colmap)
)
# browser()
locs <- match(tolower(colnames(data)), nms_lookup[['nms']])
nms <- nms_lookup[['colmapnams']][locs]
colnames(data)[!is.na(nms)] <- nms[!is.na(nms)]
# colnames(data) <- tolower(colnames(data))
# names(colmap) <- tolower(names(colmap))
## only check the headers that exist in both map and data
map <- colmap[colnames(data)[colnames(data) %in% names(colmap)]]
data <- data[names(map)]
# browser()
Map(function(x, y, z){
# if (z == "EmailAddress1IsPreferred" ) browser()
#y <-
#gsub("\\)$", paste0("data, ", z, "\")"), y)
# if (tolower(z) == 'deliverymode') browser()
replacement <- paste0("\\1", paste0("data, ", shQuote(z), ", \\2"))
y <- gsub(",\\s*\\)", ")", gsub("(^[^\\(]+\\()(.+$)", replacement, y))
invisible(lapply(y, function(w) {
eval(parse(text=w))
}))
}, data, map, colnames(data))
}
|
5a82c9d615ca7955c7090889d88b772ef3e025e9
|
fed52643ad87ddd5e791fe6f249fa6b5a56a0785
|
/plot2.R
|
14fafc96e423e5e36d95f1cc65d0f0cbe5a879b9
|
[] |
no_license
|
Lcollins91/ExData_Plotting1
|
65f0756fc3134968a30d23f9816d34e93617b07f
|
6cb4efdbfa6e78e28386c6a99052cdbe7ab160b6
|
refs/heads/master
| 2021-01-21T06:27:01.072011
| 2017-02-26T20:56:55
| 2017-02-26T20:56:55
| 83,234,648
| 0
| 0
| null | 2017-02-26T19:30:35
| 2017-02-26T19:30:35
| null |
UTF-8
|
R
| false
| false
| 867
|
r
|
plot2.R
|
## First need to load the data.
if(!file.exists("./data")){dir.create("./data")}
download.file("https://d396qusza40orc.cloudfront.net/exdata%2Fdata%2Fhousehold_power_consumption.zip", destfile="./data/Power.zip")
# Get the names of the files in the zipped data folder
fileName <- unzip("./data/Power.zip")
library(lubridate)
# Store the data we want
power <- read.table(fileName, header = TRUE, sep = ";", stringsAsFactors = FALSE)
# Selecting the Dates We Want
power$FullDate <- dmy_hms(paste(power$Date, power$Time))
power <- power[(power$FullDate >= ymd("2007-02-01")&power$FullDate < ymd("2007-02-03")),]
# Initiate File
png(file = "plot2.png", width = 480, height = 480, units = "px")
# Plot the data
with(power, plot(as.numeric(Global_active_power)~FullDate, type = "l", xlab = NA, ylab = "Global Active Power (kilowatts)"))
# Close the file
dev.off()
|
e6f84988f6b7def04893868bca3f1b90c3f1abd1
|
6d4c39ba65bac38347ac6d65eafcae8f5b69bfaa
|
/Plot3.R
|
c26c64fd5cad14aed266bbdcf185b90eef2a8a4a
|
[] |
no_license
|
TAAH71/ExData_Plotting1
|
8d6c4fb761718beddbb132f9de9f71a110fe15dd
|
89759e5d4f23a140ca882452be4091f5beab25d0
|
refs/heads/master
| 2021-07-15T12:29:42.635901
| 2017-10-17T21:26:31
| 2017-10-17T21:26:31
| 106,662,189
| 0
| 0
| null | 2017-10-12T07:55:09
| 2017-10-12T07:55:09
| null |
UTF-8
|
R
| false
| false
| 1,522
|
r
|
Plot3.R
|
# Get file
setwd("C:/Users/Andrew/Documents/Coursera/4_Exploratory_Data_Analysis")
unzip("C:/Users/Andrew/Documents/Coursera/4_Exploratory_Data_Analysis/data/exdata_data_household_power_consumption.zip", overwrite = TRUE, exdir = "C:/Users/Andrew/Documents/Coursera/4_Exploratory_Data_Analysis/data")
powerdata <- read.table("C:/Users/Andrew/Documents/Coursera/4_Exploratory_Data_Analysis/data/household_power_consumption.txt", sep = ";")
colnames(powerdata) <- as.character(unlist(powerdata[1,]))
powerdata <- powerdata[-1,]
# Change format of date column
powerdata$Date <- as.Date(powerdata$Date,"%d/%m/%Y")
# Extract the two days of data we are interested in
powerdata <- subset(powerdata, powerdata$Date > "2007/01/31")
powerdata <- subset(powerdata, powerdata$Date < "2007-02-03")
# Plot 3
powerdata$Sub_metering_1 <- as.numeric(as.character(powerdata$Sub_metering_1))
powerdata$Sub_metering_2 <- as.numeric(as.character(powerdata$Sub_metering_2))
powerdata$Sub_metering_3 <- as.numeric(as.character(powerdata$Sub_metering_3))
with(powerdata,plot(DT,Sub_metering_1, type = "l", col = "grey",xlab="",ylab = "Energy sub metering"))
points(powerdata$DT, powerdata$Sub_metering_2, type = "l", col = "red")
points(powerdata$DT, powerdata$Sub_metering_3, type = "l", col = "blue")
legend("topright", legend = c("Sub_metering_1", "Sub_metering_2", "Sub_metering_3"), col = c("grey", "red", "blue"), lty = 1)
# Save to file
dev.copy(png, file="plot3.png", height=480, width=680)
dev.off()
|
384b7505c90adc5b93eea9aeecf4470f1a08e024
|
ea6469c85242a4df99d52823f96cff53078d3b86
|
/congestion_metric.R
|
d324cb37afb349e1bda213cb13cd9d7885744d68
|
[] |
no_license
|
snohan/trafikkdata
|
7ff2257467bd1425dbbbf5231542bd335019a7c7
|
1591b4ef847a9bf54fe5b1b3789bf3f1c4d11b52
|
refs/heads/master
| 2023-08-14T02:03:42.136163
| 2023-08-04T10:51:46
| 2023-08-04T10:51:46
| 159,170,326
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,674
|
r
|
congestion_metric.R
|
# VbV data from Kibana
library(tidyverse)
library(hms)
base::Sys.setlocale(locale = "nb.utf8")
source("H:/Programmering/R/byindeks/get_from_trafficdata_api.R")
source("H:/Programmering/R/byindeks/split_road_system_reference.R")
# Literature
# A Survey of Methods and Technologies for Congestion Estimation Based on Multisource Data Fusion
# https://www.mdpi.com/2076-3417/11/5/2306/htm
# https://research.geodan.nl/measuring-traffic-congestion-a-better-way/
# Estimation of traffic stream space mean speed from time aggregations of double loop detector data
# We should have vbv value of
# Time between activation of loop 1 and 2
# Time that loop 1 was activated
# TRP ----
trp <-
get_points_with_direction() |>
dplyr::distinct(trp_id, .keep_all = T)
# Choose which files to read
filename_root <- "kanalbrua_2021"
# Do read
vbv_data <-
list.files(
"congestion_data",
pattern = paste0("^", filename_root, ".*csv")
) |>
purrr::map_df(
~ read_a_file(.)
)
# Smoothing by intervals
# Alternative smoothing: exponential average of vbv
aggregated_data <-
vbv_data |>
dplyr::rename(
trp_id = traffic_registration_point_id
) |>
dplyr::filter(
valid_event == TRUE
) |>
dplyr::mutate(
event_timestamp =
lubridate::ymd_hms(
event_timestamp,
tz = "CET"
),
timestamp_floored =
lubridate::floor_date(
event_timestamp,
"5 mins"
),
interval_start = hms::as_hms(timestamp_floored),
date =
lubridate::date(event_timestamp),
valid_speed =
dplyr::if_else(
valid_speed == TRUE,
1,
0,
0
),
valid_speed_values =
dplyr::if_else(
valid_speed == 1,
speed,
NA_real_,
NA_real_
),
# Using personal car equivalents (pce) based on length
pce_length =
dplyr::case_when(
valid_length == FALSE ~ 5,
TRUE ~ length
),
pce = dplyr::case_when(
pce_length < 7.6 ~ 1,
pce_length < 12.5 ~ 2,
pce_length < 16 ~ 3.5,
TRUE ~ 5
)
) |>
dplyr::group_by(
trp_id,
timestamp_floored,
date,
interval_start,
lane
) |>
dplyr::summarise(
volume = n(),
volume_with_valid_speed = sum(valid_speed),
pce_volume = sum(pce),
#mean_speed = mean(speed),
space_mean_speed = 1 / mean(1 / valid_speed_values, na.rm = TRUE),
mean_time_gap = mean(time_gap),
.groups = "drop"
) |>
dplyr::mutate(
#flow = volume / 5 * 60, # Explicitly using 5 min aggregates
pce_flow = pce_volume / 5 * 60,
#density = flow / mean_speed,
pce_density = pce_flow / space_mean_speed
)
# When is traffic congested?
# Find maximum flow for a longer period, i.e. month.
# Find what mean speed is for that max flow.
# Then congestion occurs in all intervals with less than critical mean speed.
# To avoid single slow vehicles at night being tagged as congested, the
# mean time gap should be less than 5 s.
# Here we use time mean speed. Theoretically this should be replaced by
# space mean speed, but this is not measurable. If some cars have higher speeds
# than others,
# these influence time mean speed more than they would the space mean.
# Therefore, using the time mean speed can overestimate the critcal speed,
# and thus underestimate flow and density.
# critical_values <-
# aggregated_data |>
# dplyr::group_by(
# lane
# ) |>
# # TODO: polynomial regression (degree 2)
# # TODO: use stats::optimize to find maximum
# dplyr::slice_max(
# order_by = pce_flow,
# #n = 15,
# prop = 0.005
# ) |>
# dplyr::summarise(
# #max_flow = median(flow),
# pce_max_flow = median(pce_flow),
# #critical_speed = median(space_mean_speed),
# #road_capacity = median(density),
# # q = uk
# pce_road_capacity = min(pce_density),
# #pce_road_capacity = pce_max_flow / critical_speed,
# critical_speed = pce_max_flow / pce_road_capacity,
# .groups = "drop"
# )
critical_values <-
aggregated_data |>
dplyr::group_by(
lane
) |>
dplyr::filter(
pce_flow >= max(pce_flow) - 100
) |>
dplyr::summarise(
#max_flow = median(flow),
pce_max_flow = min(pce_flow),
#critical_speed = median(space_mean_speed),
#road_capacity = median(density),
# q = uk
pce_road_capacity = min(pce_density),
#pce_road_capacity = pce_max_flow / critical_speed,
critical_speed = pce_max_flow / pce_road_capacity,
.groups = "drop"
)
# critical_values_2 <-
# aggregated_data |>
# dplyr::group_by(
# lane
# ) |>
# dplyr::summarise(
# q3_flow = quantile(flow, probs = c(0.75)),
# q1_flow = quantile(flow, probs = c(0.25)),
# iqr = q3_flow - q1_flow,
# max_flow = q3_flow + 1.5 * iqr,
# #critical_speed = median(mean_speed),
# #road_capacity = median(density),
# .groups = "drop"
# )
data_congested <-
aggregated_data |>
dplyr::left_join(
critical_values,
by = "lane"
) |>
dplyr::mutate(
congestion =
dplyr::case_when(
space_mean_speed < critical_speed &
#mean_time_gap < 5 &
pce_density >= pce_road_capacity ~ "Ja",
TRUE ~ "Nei"
),
weekday =
lubridate::wday(
date,
label = TRUE,
abbr = FALSE,
week_start = 1
)
)
congestion_stats <-
data_congested |>
dplyr::group_by(
trp_id,
weekday,
lane
) |>
dplyr::count(
congestion
) |>
dplyr::ungroup() |>
tidyr::complete(
trp_id,
weekday,
lane,
congestion,
fill = list(n = 0)
) |>
tidyr::pivot_wider(
names_from = "congestion",
values_from = "n"
) |>
dplyr::mutate(
congestion_percentage =
Ja / (Ja + Nei) * 100
)
readr::write_rds(
congestion_stats,
file =
paste0("congestion_data/", filename_root, "_stats.rds")
)
# Write ----
find_trp_info_and_direction_names(data_congested) |>
readr::write_rds(
file = paste0("congestion_data/", filename_root, "_trp.rds")
)
list(
critical_values,
data_congested
) |>
readr::write_rds(
file = paste0("congestion_data/", filename_root, ".rds")
)
# Visually verify ----
## Density and flow ----
data_congested |>
ggplot(
aes(
x = pce_density,
y = pce_flow,
color = congestion
)
) +
geom_point() +
facet_wrap(
vars(lane)
) +
geom_hline(
data = critical_values,
aes(
yintercept = pce_max_flow
),
color = "red"
) +
geom_vline(
data = critical_values,
aes(
xintercept = pce_road_capacity
),
color = "red"
)
## Speed and density ----
data_congested |>
ggplot(
aes(
x = pce_density,
y = space_mean_speed,
color = congestion
)
) +
geom_point() +
facet_wrap(
vars(lane)
) +
geom_hline(
data = critical_values,
aes(
yintercept = critical_speed
),
color = "red"
) +
geom_vline(
data = critical_values,
aes(
xintercept = pce_road_capacity
),
color = "red"
)
## Look at a particular day ----
data_congested |>
dplyr::filter(
date == "2021-09-30"
) |>
ggplot(
aes(
x = timestamp_floored,
y = space_mean_speed,
color = congestion,
alpha = pce_density
)
) +
geom_point() +
facet_wrap(
vars(lane)
) +
geom_hline(
data = critical_values,
aes(
yintercept = critical_speed
),
color = "red"
)
## Timegaps ----
data_congested |>
dplyr::filter(
date == "2021-10-29"
) |>
ggplot(
aes(
x = timestamp_floored,
y = mean_time_gap
)
) +
geom_point() +
facet_wrap(
vars(lane)
)
|
3fb510d60c696712f24ad95eec9c16e224d4c24c
|
46099b44d50a2af496dce87234b923ee60541cc4
|
/PedigreeNetwork/ui.R
|
47f4c6340c10df76f086e76cf07a47ce84142abd
|
[] |
no_license
|
jhgille2/SoybeanPedigreeApp
|
0534ceec315beabc306a7192e7d5686bc591816a
|
f5ca7dd9c5f491796e1411809158ea78ad0aa620
|
refs/heads/main
| 2023-06-26T16:58:18.603937
| 2021-07-24T00:37:44
| 2021-07-24T00:37:44
| 360,652,905
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,412
|
r
|
ui.R
|
#
# This is the user-interface definition of a Shiny web application. You can
# run the application by clicking 'Run App' above.
#
# Find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com/
#
library(shiny)
library(igraph)
library(visNetwork)
library(tidyverse)
# Define UI for application that draws a histogram
shinyUI(fluidPage(
# Application title
titlePanel(title = h1("Soybase parentage data explorer", h3("Hover over circles in the graph to go to the corresponding Soybase entry")), windowTitle = "Soybase pedigree viewer"),
fluidRow(
column(2,
selectizeInput("CultivarSelection",
label = "Cultivar",
choices = NULL,
selected = "NC-Roy",
multiple = FALSE),
sliderInput("generations",
"Number of generations",
min = 1,
max = 20,
value = 4),
textOutput("crossString"), tags$head(tags$style("#clickGene{color:red; font-size:12px; font-style:italic;
overflow-y:scroll; max-height: 50px; background: ghostwhite;}"))),
column(10,
visNetworkOutput("network", height = "700px"))
)
)
)
|
b60433d4d8c9311ae402eea6a8eded5f54a212de
|
f40d0763dd69945ac6c5726f820dd56a1c2e1c57
|
/GrowthCurveAnalysis/R/gExport.r
|
f153ef2f33f42ce822a8ade744aeb69b11f8c007
|
[] |
no_license
|
KarlynB23/GCAF_git
|
1b3b3f414f41b9e0e5eba1766df652cb268c9a17
|
3bb386657cc0cc13312d28ec71b5aa3855c9e2fd
|
refs/heads/master
| 2020-12-24T14:52:58.822631
| 2011-07-27T23:47:32
| 2011-07-27T23:47:32
| 2,115,176
| 1
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,534
|
r
|
gExport.r
|
#BGN FUNCTION
gExport <- function(g.analysis, name="LastExport", control = gControl()){
# saves the parameters and meta information in a tab-delimited spreadsheet for use outside of gFunctions
# Args
# g.analysis - an object of class gAnalysis produced by gAnalysis or a list of such objects
# name - a name for the exported file
# control -
# $export.folder - folder for exported files to be saved to.
# Returns
# saves a tab-delimited spreadsheet to Exports folder
if (class(g.analysis) == "gAnalysis"){
all.wells <- g.analysis
} else {
if (class(g.analysis) == "list"){
all.wells <- list()
for (i in 1:length(g.analysis)){
if (class(g.analysis[[i]]) != "gAnalysis") {
stop("Must provide a list of gAnalysis objects")
} else {
all.wells <- c(all.wells, g.analysis[[i]])
} # if is a list of gAnalysis files
} #for each element of the list file
} else {# if is list
stop ("Need to provide either a gAnalysis object or a list of gAnalysis objects")
} #if isn't list
}# if is list or ganalysis file
already.added <- NULL
to.export.info <- data.frame()
to.export.parameters <- data.frame()
#Add information from each well to to.export
for (i in 1:length(all.wells)){
g.analysis.well <- all.wells[[i]]
if (class(g.analysis.well) != "gAnalysisWell"){
stop ("The wells in the provided g.Analysis object are not of class gAnalysisWell")}
well.id <- paste(g.analysis.well$Info$Date, g.analysis.well$Info$Initials, g.analysis.well$Info$Well.Number)
if (is.element(well.id, already.added) == F){
already.added <- c(already.added, well.id)
well.info <- g.analysis.well$Info
well.info.names <- names(well.info)
for (y in 1:length(well.info.names)){
to.export.info[i,well.info.names[y]] <- as.character(well.info[[well.info.names[y]]])
}#for each well.info.names
well.parameters <- g.analysis.well$parameters
well.parameters.names <- names(well.parameters)
for (j in 1:length(well.parameters.names)){
to.export.parameters[i,well.parameters.names[j]] <- as.character(well.parameters[[well.parameters.names[j]]])
}#for each well.info.names
} #if isn't already added
} #add each well to to.export
to.export <- cbind(to.export.info, to.export.parameters)
export.file.name <- paste(control$export.folder, "/", name, ".txt", sep="")
if(file.exists(control$export.folder) == F) {
dir.create(control$export.folder)
}
write.table(to.export, file=export.file.name, append=F, sep="\t", row.names=F)
to.export
} #END FUNCTION
|
372107d398257f7c0cfe6bf31149f6ba68dbf387
|
1e9c9f2a9639db7cdb032aae69cb4d99aef1d3a5
|
/dataCamp/introductionToR/6_lists/7_addingMoreMovieInformationToTheList.R
|
1ac7f46759858e48d7dab5e8ebc33ecdafe74a4b
|
[
"MIT"
] |
permissive
|
sagarnikam123/learnNPractice
|
f0da3f8acf653e56c591353ab342765a6831698c
|
1b3b0cb2cff2f478006626a4c37a99102acbb628
|
refs/heads/master
| 2023-02-04T11:21:18.211654
| 2023-01-24T14:47:52
| 2023-01-24T14:47:52
| 61,184,927
| 2
| 1
|
MIT
| 2022-03-06T11:07:18
| 2016-06-15T06:57:19
|
Python
|
UTF-8
|
R
| false
| false
| 1,259
|
r
|
7_addingMoreMovieInformationToTheList.R
|
# Adding more movie information to the list
#########################################################################################################################
#
# Being proud of your first list, you shared it with the members of your movie hobby club. However, one of the
# senior members, a guy named M. McDowell, noted that you forgot to add the release year. Given your ambitions
# to become next year's president of the club, you decide to add this information to the list.
#
# In order to conveniently add elements to lists you use the concatenate function c():
#
# c(list1 , some_variable)
#
# This will simply extend the original list, list1, with the component some_variable. This component gets appended
# to the end of the list. If you want to give the new list item a name, you just add the name as you did before:
#
# c(list1, new_name = some_variable)
#
#########################################################################################################################
# 'shining_list', the list containing moviename, actors and reviews, is pre-loaded in the workspace
# We forgot something; add the year to shining_list
shining_list_full <- c(shining_list,"year"=1980)
# Have a look at shining_list_full
str(shining_list_full)
|
c10a2d895adaccb0588d422c68927f6b2274843f
|
aba0008e63f77b56c8d437faa3d77677e5c8aa69
|
/1-moderate/longest_lines.R
|
34d303657b82096a82c526041d1606cc94858699
|
[] |
no_license
|
OehlR/codeeval
|
d690a11a5a1c2a5bbfaebd607452c953f78706a8
|
b269b99d1d639ec17688dd9f7e66e23e6711fc67
|
refs/heads/master
| 2021-05-02T08:58:46.005968
| 2017-11-03T22:25:12
| 2017-11-03T22:25:12
| 28,037,547
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 287
|
r
|
longest_lines.R
|
args <- commandArgs(trailingOnly=TRUE)
if(F) {
cat('2\nHello World\nCodeEval\nQuick Fox\nA\nSan Francisco\n',file=f<-tempfile())
args<-list(f)
#unlink(f)
}
l <- readLines(args[[1]], warn = FALSE)
odr <- l[order(nchar(l),decreasing=TRUE)]
cat(odr[1:as.numeric(l[1])],sep="\n")
|
b95202540e9de3f67ccc0502156789f5de28c825
|
c2171eaf8201b9de995a5f6f5e1ff3eec8963374
|
/R/breed.R
|
c27ea8d465dd48bb28002b0b01df4ec2049fafe5
|
[] |
no_license
|
SeemaShet/GA
|
a02b8ff0ae6e0689c0b039ede3a59d96bf4813b0
|
43cf7def2f5cb70555b95ff8225c7863994c1b8f
|
refs/heads/master
| 2022-04-02T19:10:43.994843
| 2019-12-20T00:36:16
| 2019-12-20T00:36:16
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,301
|
r
|
breed.R
|
#' Breed
#'
#' The function breed a new population given the current population by doing
#' cross-over and mutation of the individuals from current population. It uses
#' two helper functions, cross over and mutation. Two candidate parents
#' are selected everytime at random from the current population to be cross
#' overed and mutated to create offsprings for the new generation. Function makes
#' sure to breed the same number of individuals in the new generation as the
#' original population size.
#'
#' @param current_gen matrix; chromosomes for the current generation
#' @param pop_size positive integer; size of the original population (no of individuals)
#' @param crossover_count vector; number of genes to be cross overed (default set
#' as 1)
#' @param mut_prob float; probability of mutation (between 0 and 1) (default is
#' .05)
#' @param mutations positive integer; number of genes to be mutated if selected
#' to mutate (default set to 1)
#' @return returns a matrix of new generation
#' @export
breed <- function(current_gen, pop_size, crossover_count = 1, mut_prob = 0.01, mutations=5){
if(crossover_count < 1 | floor(crossover_count) != crossover_count ){
stop("Number of crossover points should be a positive integer!.")
}
#check if mut_prob is a valid probability
if (mut_prob > 1 | mut_prob < 0) {
stop("The probability of mutation should be between 0 and 1.")}
#Length of current_gen
current_gen_len = nrow(current_gen)
#No of genes
gene_len <- ncol(current_gen)
new_gen = matrix(NA,nrow=2*ceiling(pop_size/2), ncol=gene_len)
for (i in 1:ceiling(pop_size/2)){
#Select two chromosomes for crossover
parent_chromeSet <- current_gen[sample(nrow(current_gen),size=2),]
#Doing Crossover
offspring_chromeSet <- crossover(parent_chromeSet[1,],parent_chromeSet[2,], crossover_count)
offspring_1 <- unlist(offspring_chromeSet[1])
offspring_2 <- unlist(offspring_chromeSet[2])
#Mutation on the offsprings
offspring_1 <- mutation(offspring_1, mut_prob, mutations)
offspring_2 <- mutation(offspring_2, mut_prob, mutations)
new_gen[(2*i-1),] <- offspring_1
new_gen[(2*i),] <- offspring_2
}
#Obtaining the next generation
new_gen <- new_gen[sample(nrow(new_gen),size=pop_size),]
return(new_gen)
}
|
11dd133feb28bd744a3054087f03e5169dad8c96
|
63f7a5e85c5322234589efad98cd996f505dd9c9
|
/man/bootstrap.Rd
|
ef2755a861dfe266d5d2b2b3975d0b0363c99f6a
|
[] |
no_license
|
EllaKaye/BLB
|
e9bc1ae3063bf3c5ac0a853dabfd1e08624a7930
|
4a8af58f13bd2f87c924b36c65f617737df50f02
|
refs/heads/master
| 2021-01-10T17:13:29.810703
| 2015-12-04T08:59:42
| 2015-12-04T08:59:42
| 46,873,538
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 491
|
rd
|
bootstrap.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/multi_regr.R
\name{bootstrap}
\alias{bootstrap}
\title{Bootstrap}
\usage{
bootstrap(x, y, B, n)
}
\arguments{
\item{B}{the number of bootstrap replications}
\item{n}{the number of resamples to generate}
\item{data}{the original sample, as a vector}
\item{data}{is a vector}
}
\value{
vector with the standard deviation of the regression coeffiecients
}
\description{
Bootstrap
}
\examples{
bootstrap(,,,,,)
}
|
9ffd70328642ee8a881c12bcb8d7c699b7d1a0f3
|
4e5c61bf13f256bdcc689f8a7e7c1a410e75258e
|
/legacy/IR_plot_190207_v1.R
|
88e538a226dc720e69a67392bdcd3b6a40600a4a
|
[
"Apache-2.0"
] |
permissive
|
JureZmrzlikar/intron
|
61d85b0297de986d1d0f56d1cd140a1ec8bc96e3
|
a9850695846607f877d2795144e8cb9c2277a8c6
|
refs/heads/master
| 2020-07-29T03:36:59.257157
| 2020-01-20T11:21:43
| 2020-01-23T10:18:28
| 209,654,954
| 0
| 0
| null | 2019-09-19T21:53:39
| 2019-09-19T21:53:39
| null |
UTF-8
|
R
| false
| false
| 13,640
|
r
|
IR_plot_190207_v1.R
|
library(reshape2)
library(ggplot2)
library(gdata)
library(plyr)
library(readr)
get_bg_colors = function(bg_color){
if(bg_color=="white"){
front_color="black"
back_color="white"
}
if(bg_color=="black"){
front_color="white"
back_color="black"
}
return(c(front_color,back_color))
}
## SOURCE: https://stackoverflow.com/questions/28273716/r-implementation-for-finding-the-longest-common-starting-substrings-in-a-set-of
comsub<-function(x) {
# sort the vector
x<-sort(x)
# split the first and last element by character
d_x<-strsplit(x[c(1,length(x))],"")
# search for the first not common element and so, get the last matching one
der_com<-match(FALSE,do.call("==",d_x))-1
# if there is no matching element, return an empty vector, else return the common part
ifelse(der_com==0,return(character(0)),return(substr(x[1],1,der_com)))
}
###
###ONLY REPLETE FUNCTION
###
###RE-WRITE TO BE MORE INTUITIVE (LESS HARD CODING)
###
get_IR_table = function(data_table,sample_names,color_vector,average){
#Isolate from data table only the specified samples
new_colnames = grep(paste(sample_names,collapse="|"),colnames(data_table))
data_table = cbind(data_table[,1:2],data_table[,new_colnames])
k = colnames(data_table)[-c(1,2)]
print(new_colnames)
print(k)
if(average==FALSE){
k_labs=k
if(length(color_vector)==2){
color_vector = colorRampPalette(color_vector)(length(k))
}
else(color_vector = color_vector)
}
if(average==TRUE){
new_data_table = data_table[,1:2]
k_labs = sample_names
for(sample in sample_names){
temp_table = data_table[,grep(sample,colnames(data_table))]
new_data_table[[sample]] = rowMeans(temp_table)
}
n=1
data_table = new_data_table
}
print(color_vector)
output = list(data_table,k_labs,color_vector)
return(output)
}
exact_ks_test = function(data_file,sample_names,color_vector,bg_color,n,average=FALSE){
data_table = read_delim(data_file,"\t",escape_double = FALSE,trim_ws = TRUE)
front_color = get_bg_colors(bg_color)[1]
back_color = get_bg_colors(bg_color)[2]
data_table = as.data.frame(data_table)
data_out = get_IR_table(data_table,sample_names,color_vector,average)
data_table = as.data.frame(data_out[[1]])
k_labs = data_out[[2]]
color_vector = data_out[[3]]
#need to remove rows (i.e. for all conditions) in which there are 0-values if the plot is in log scale
print(nrow(data_table))
for (i in k_labs){
data_table=data_table[with(data_table,eval(as.name(i)))>0,]
}
#data_table=data_table[with(data_table,eval(as.name(x))>0 & eval(as.name(y))>0),]
print(nrow(data_table)) #spot check to see how many rows you are losing
sample_values_1 = data_table[[sample_names[1]]]
sample_values_2 = data_table[[sample_names[2]]]
print(ks.test(sample_values_1,sample_values_2,alternative="greater",exact=TRUE)$p.value)
}
plot_logIR_eCDF = function(data_file,sample_names,color_vector,bg_color,n,
x_min,x_max=0,output,average=FALSE,pseudo=FALSE,legend_pos=FALSE){
data_table = read_delim(data_file,"\t",escape_double = FALSE,trim_ws = TRUE)
front_color = get_bg_colors(bg_color)[1]
back_color = get_bg_colors(bg_color)[2]
data_table = as.data.frame(data_table)
data_out = get_IR_table(data_table,sample_names,color_vector,average)
data_table = as.data.frame(data_out[[1]])
k_labs = data_out[[2]]
color_vector = data_out[[3]]
if(pseudo!=FALSE){
data_table[,3:4] = data_table[,3:4]+pseudo
}
#need to remove rows (i.e. for all conditions) in which there are 0-values if the plot is in log scale
print(nrow(data_table))
for (i in k_labs){
data_table=data_table[with(data_table,eval(as.name(i)))>0,]
}
#data_table=data_table[with(data_table,eval(as.name(x))>0 & eval(as.name(y))>0),]
print(nrow(data_table)) #spot check to see how many rows you are losing
sample_values_1 = data_table[[sample_names[1]]]
sample_values_2 = data_table[[sample_names[2]]]
print(ks.test(sample_values_1,sample_values_2,alternative="greater",exact=TRUE)$p.value)
print(ks.test(sample_values_1,sample_values_2,alternative="less",exact=TRUE)$p.value)
long_data = melt(data_table,id.vars=c("intron_id","info"),variable.name="Condition",value.name="IR_Ratio")
sub_data = long_data[long_data$Condition %in% k_labs,]
print(nrow(sub_data))
sub_data$log_IR_Ratio = log10(sub_data$IR_Ratio)
print(k_labs)
if(legend_pos!=FALSE){
legend_pos = as.vector(legend_pos)
}else{legend_pos="none"}
ggplot(sub_data,aes(x=log_IR_Ratio,color=Condition))+
stat_ecdf(size=1.2)+
ggtitle(paste(nrow(data_table),"introns",sep=" "))+
ylab("Fraction of introns")+
xlab("Log10(IR Score)")+
coord_cartesian(xlim=c(x_min,x_max))+
scale_color_manual(values=color_vector,labels=k_labs)+
theme(panel.border=element_blank(),
panel.background=element_blank(),
plot.background=element_rect(fill="transparent",colour=NA),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
axis.ticks=element_line(colour=front_color),
axis.title=element_text(colour=front_color,size=18),
axis.text=element_text(colour=front_color,size=18),
legend.background = element_rect(fill="transparent"),
legend.title = element_text(color=front_color,size=16),
legend.key=element_rect(fill="transparent",color=NA),
legend.text=element_text(color=front_color,size=12),
legend.justification=c(1,0),
legend.position=legend_pos,
plot.title=element_text(color=front_color))+
theme(axis.line.x=element_line(color=front_color))+
theme(axis.line.y=element_line(color=front_color))+
theme(panel.border = element_rect(color=front_color,fill=NA,size=2))
#Most images in the past have been width = 6, height = 6
ggsave(paste(output,k_labs[1],"_",k_labs[2],"_",bg_color,"_eCDF.pdf",sep=""),width=6,height=6,bg="transparent")
ggsave(paste(output,k_labs[1],"_",k_labs[2],"_",bg_color,"_eCDF.png",sep=""),width=6,height=6,bg="transparent")
dev.off()
}
plot_logIR_duoScatter = function(data_file,sample_names,color_vector,bg_color,n,x_min,x_max=0,output,title="",average=FALSE,pseudo=FALSE){
data_table = read_delim(data_file,"\t",escape_double = FALSE,trim_ws = TRUE)
front_color = get_bg_colors(bg_color)[1]
back_color = get_bg_colors(bg_color)[2]
data_table = as.data.frame(data_table)
data_out = get_IR_table(data_table,sample_names,color_vector,average)
data_table = as.data.frame(data_out[[1]])
k_labs = data_out[[2]]
color_vector = data_out[[3]]
if(pseudo!=FALSE){
data_table[,3:4] = data_table[,3:4]+pseudo
}
#need to remove rows (i.e. for all conditions) in which there are 0-values if the plot is in log scale
print(nrow(data_table))
for (i in k_labs){
data_table=data_table[with(data_table,eval(as.name(i)))>0,]
}
#data_table=data_table[with(data_table,eval(as.name(x))>0 & eval(as.name(y))>0),]
print(nrow(data_table)) #spot check to see how many rows you are losing
#data_table[,3:4] = log10(data_table[,3:4])
#If you did the stupid thing and named your factors starting with non-characters
# k_labs[1] = paste("`",k_labs[1],"`",sep="",collapse="")
# k_labs[2] = paste("`",k_labs[2],"`",sep="",collapse="")
ggplot(data_table,aes_string(x=k_labs[2],y=k_labs[1]))+
stat_density2d(aes(fill=..level..,alpha=..level..),
geom='polygon',
color='grey50')+
scale_fill_gradient(low="white",high="red")+
scale_alpha(range=c(0,1),guide=F)+
geom_point(color=front_color,alpha=0.1)+
ggtitle(paste(nrow(data_table),"introns",sep=" "))+
xlab(paste("Log10(IR Score) ",k_labs[2],sep=""))+
ylab(paste("Log10(IR Score) ",k_labs[1],sep=""))+
xlim(x_min,x_max)+
ylim(x_min,x_max)+
geom_abline(intercept=0,slope=1,color="blue",linetype="dashed",size=1.5)+
theme(panel.border=element_blank(),
panel.background=element_blank(),
plot.background=element_rect(fill="transparent",colour=NA),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
axis.ticks=element_line(colour=front_color),
axis.title=element_text(colour=front_color,size=18),
axis.text=element_text(colour=front_color,size=18),
legend.background = element_rect(fill="transparent"),
legend.title = element_text(color=front_color,size=16),
legend.key=element_rect(fill="transparent",color=NA),
legend.text=element_text(color=front_color,size=12),
legend.justification=c(1,0),
legend.position=c(0.3,0.6),
plot.title=element_text(color=front_color))+
theme(axis.line.x=element_line(color=front_color))+
theme(axis.line.y=element_line(color=front_color))+
theme(panel.border = element_rect(color=front_color,fill=NA,size=2))
#Most images in the past have been width = 6, height = 6
ggsave(paste(output,k_labs[1],"_",k_labs[2],"_",bg_color,"_",title,"_scatter.png",sep=""),width=6,height=6,bg="transparent")
dev.off()
}
plot_logIR_FC_density = function(df,output,title,bg_color){
front_color = get_bg_colors(bg_color)[1]
back_color = get_bg_colors(bg_color)[2]
max = 2.5
df$DMSO_FC[df$DMSO_FC > 2.5] <- 2.5
df$`8800_FC`[df$`8800_FC` > 2.5] <- 2.5
# df$INPUT_FC[df$INPUT_FC > 10^2.5] <- 10^2.5
# df$J2_FC[df$J2_FC > 10^2.5] <- 10^2.5
ggplot(data=df,aes(log10(INPUT_FC),log10(J2_FC)))+
#stat_density2d(aes(fill=..level..,alpha=..level..),
# geom='polygon',
# color='grey50')+
#scale_fill_gradient(low="white",high="red")+
#scale_alpha(range=c(0,1),guide=F)+
# geom_point(alpha=0.1,size=0.3)+
# geom_point(data = subset(df,INPUT_FC/J2_FC > 2), size = 1.25, alpha = 0.75, col="blue")+
# geom_point(data = subset(df,INPUT_FC/J2_FC < 0.5), size = 1.25, alpha = 0.75, col="red")+
# geom_abline(slope=1,intercept=0,color="black",linetype=2,size=1)+
geom_point(data = subset(df, `8800_FC` > 1 & `8800_FC`>DMSO_FC), size=1, alpha=0.6, col="red")+
geom_point(data = subset(df, DMSO_FC > 1 & DMSO_FC>`8800_FC`), size=1, alpha=0.6, col="blue")+
geom_hline(yintercept=1,color="grey",linetype=2,size=0.5)+
geom_vline(xintercept=1,color="grey",linetype=2,size=0.5)+
ylim(-1,max)+
xlim(-1,max)+
theme_classic()+
theme(panel.border = element_rect(color=front_color,fill=NA,size=1),
axis.ticks=element_line(colour=front_color),
axis.text=element_text(colour=front_color,size=16),
axis.title=element_text(colour=front_color,size=16))
ggsave(paste(output,title,"_","_",bg_color,"_heatscatter.png",sep=""),width=5,height=5,bg="transparent")
dev.off()
}
log_IR_boxplots = function(data_file,sample_names,color_vector,bg_color,n,output,title="",y_label="",log=FALSE,average=FALSE,pseudo=FALSE,subset=FALSE){
data_table = read_delim(data_file,"\t",escape_double = FALSE,trim_ws = TRUE)
front_color = get_bg_colors(bg_color)[1]
back_color = get_bg_colors(bg_color)[2]
data_table = as.data.frame(data_table)
data_out = get_IR_table(data_table,sample_names,color_vector,average)
data_table = as.data.frame(data_out[[1]])
k_labs = data_out[[2]]
color_vector = data_out[[3]]
if(pseudo!=FALSE){
data_table[,3:4] = data_table[,3:4]+pseudo
}
if(subset!=FALSE){
subset = as.vector(subset)
data_table = data_table[subset,]
}
print(data_table)
#need to remove rows (i.e. for all conditions) in which there are 0-values if the plot is in log scale
print(nrow(data_table))
for (i in k_labs){
data_table=data_table[with(data_table,eval(as.name(i)))>0,]
}
#data_table=data_table[with(data_table,eval(as.name(x))>0 & eval(as.name(y))>0),]
print(nrow(data_table)) #spot check to see how many rows you are losing
long_data = melt(data_table,id.vars=c("intron_id","info"),variable.name="Condition",value.name="IR_Ratio")
sub_data = long_data[long_data$Condition %in% k_labs,]
print(nrow(sub_data))
if(log!=FALSE){
sub_data$IR_Ratio = log10(sub_data$IR_Ratio)
}
print(k_labs)
ggplot(sub_data,aes(y=IR_Ratio,x=Condition,color=Condition))+
geom_boxplot()+
ggtitle(paste(nrow(data_table),"introns",sep=" "))+
ylab(y_label)+
scale_color_manual(values=color_vector,labels=k_labs)+
theme(panel.border=element_blank(),
panel.background=element_blank(),
plot.background=element_rect(fill="transparent",colour=NA),
panel.grid.major=element_blank(),
panel.grid.minor=element_blank(),
axis.ticks=element_line(colour=front_color),
axis.title=element_text(colour=front_color,size=18),
axis.text=element_text(colour=front_color,size=18),
legend.background = element_rect(fill="transparent"),
legend.title = element_text(color=front_color,size=16),
legend.key=element_rect(fill="transparent",color=NA),
legend.text=element_text(color=front_color,size=12),
legend.justification=c(1,0),
legend.position="none",
plot.title=element_text(color=front_color))+
theme(axis.line.x=element_line(color=front_color))+
theme(axis.line.y=element_line(color=front_color))+
theme(panel.border = element_rect(color=front_color,fill=NA,size=2))
#Most images in the past have been width = 6, height = 6
ggsave(paste(output,k_labs[1],"_",k_labs[2],"_",bg_color,"_",title,"_boxplots.png",sep=""),width=6,height=6,bg="transparent")
dev.off()
}
|
b154243581ae82cad58b6863780825f6041a81f6
|
9fa889c10187dff24aecc072949db196562da19b
|
/RScripts_Recession/RScript02_sipp08_disab.R
|
cd54e4180c753200a7d1eb663cd6fc06c059e1c2
|
[] |
no_license
|
snandi/Project_Recession
|
098ef216939ec764cd9fef1b06fd2ee6ea640ac5
|
1bdbf170e4c9491def5af541f613b0846599e119
|
refs/heads/master
| 2020-04-12T09:41:40.357259
| 2017-08-21T05:55:17
| 2017-08-21T05:55:17
| 41,777,027
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,488
|
r
|
RScript02_sipp08_disab.R
|
rm(list=ls(all.names=TRUE))
rm(list=objects(all.names=TRUE))
#dev.off()
########################################################################
## This script reads in sipp08_master & 2008_disability and merges them
## and conducts basic summary statistics
########################################################################
########################################################################
## Run Path definition file ##
########################################################################
RScriptPath <- '~/Project_Recession/RScripts_Recession/'
DataPath <- '~/Project_Recession/Data/'
RDataPath <- '~/Project_Recession/RData/'
PlotPath <- '~/Project_Recession/Plots/'
Filename.Header <- paste('~/RScripts/HeaderFile_lmcg.R', sep='')
source(Filename.Header)
source(paste(RScriptPath, 'fn_Library_Recession.R', sep=''))
########################################################################
Today <- Sys.Date()
########################################################################
## load sipp08_MASTER.RData
########################################################################
Filepath1 <- paste(RDataPath, 'sipp08_MASTER.RData', sep = '')
load(Filepath1)
sipp08_master <- within(data = sipp08_master,{
epppnum <- as.integer(epppnum)
})
length(unique(as.numeric(sipp08_master$ssuid)))
# sipp08_01 <- subset(sipp08_master, ssuid == "019133398883")
# sipp08_02 <- subset(sipp08_master, ssuid %in% ssuids[1:20])
# View(sipp08_01)
########################################################################
## 2008_disability.dta
########################################################################
Filepath2 <- paste(RDataPath, '2008_disability.RData', sep = '')
load(Filepath2)
ssuids <- unique(as.vector(disability_2008$ssuid))
length(unique(as.numeric(disability_2008$ssuid)))
Colnames_Keep_disab <- c('ssuid', 'shhadid', 'epppnum', 'disb_wrk_ageR2', 'gas', 'rent', 'meet_expenses',
'phone', 'medical')
########################################################################
## Get disability information, with only 1 type of disability entry
## per ssuid, in the disability_2008 data
########################################################################
Disab <- fn_returnDisb_ssuid(disbData = disability_2008)
names(Disab)
disability_2008_1 <- unique(Disab[['disability_2008_1']][,Colnames_Keep_disab])
ssuid_disb_1 <- Disab[['ssuid_disb_1']]
length(unique(as.numeric(ssuid_disb_1)))
Disab[['ssuid_disb_2_ORmore']]
########################################################################
## Merge sipp_2008 & 2008_disability.dta
########################################################################
sipp08_master_disab <- merge(x = sipp08_master, y = disability_2008_1,
by = c('ssuid', 'shhadid', 'epppnum'), all.x = T)
sipp08_master_disab$yearmon <- as.yearmon(paste(sipp08_master_disab$rhcalmn, sipp08_master_disab$rhcalyr))
sipp08_master_disab <- sipp08_master_disab[order(sipp08_master_disab$ssuid, sipp08_master_disab$yearmon),]
Colnames_Keep_merged <- c('ssuid', 'shhadid', 'yearmon', 'ehrefper', 'rhtype', 'wpfinwgt', 'thtotinc', 'rhpov', 'epppnum',
'disb_wrk_ageR2')
sipp08_master_disab <- sipp08_master_disab[, Colnames_Keep_merged]
# disab_01 <- subset(disability_2008, ssuid == "730925701502")[,Colnames_Keep_disab]
#sipp08_master_disab <- subset(sipp08_master_disab, ssuid %in% ssuid_disb_1[1:500])
sipp08_master_disab <- na.omit(sipp08_master_disab)
#View(sipp08_master_disab[,Colnames_Keep_merged])
str(sipp08_master_disab)
########################################################################
## Get Income Poverty information
########################################################################
Data_forIncPov <- aggregate(cbind(thtotinc, rhpov, disb_wrk_ageR2) ~ ssuid + shhadid + yearmon,
data = sipp08_master_disab, FUN = mean)
Data_forIncPov <- Data_forIncPov[order(Data_forIncPov$ssuid, Data_forIncPov$yearmon), ]
Data_forIncPov$rhpov2 <- 2 * Data_forIncPov$rhpov
Data_forIncPov$FPL200 <- Data_forIncPov$thtotinc < Data_forIncPov$rhpov2
Data_forIncPov$FPL100 <- Data_forIncPov$thtotinc < Data_forIncPov$rhpov
Data_forIncPov$FPL200[Data_forIncPov$FPL100 == TRUE] <- FALSE
Data_forIncPov$Pct_rhpov <- Data_forIncPov$thtotinc/Data_forIncPov$rhpov
Data_forIncPov$disb_wrk_ageR2 <- factor(Data_forIncPov$disb_wrk_ageR2, labels = c('no', 'yes'))
str(Data_forIncPov)
rownames(Data_forIncPov) <- NULL
head(Data_forIncPov)
comment(Data_forIncPov) <- 'The lower the value of Pct_rhpov, the worse off the household is'
str(Data_forIncPov)
#qplot() + geom_boxplot(aes(x = as.factor(as.Date(yearmon)), y = Pct_rhpov), data = Data_forIncPov )
########################################################################
## Boxplot of ratio of thtotinc and rhpov, by yearmon
########################################################################
Plot1_box <- qplot() + geom_boxplot(aes(x = as.factor(as.Date(yearmon)), y = Pct_rhpov, fill = disb_wrk_ageR2,
col = disb_wrk_ageR2),
data = Data_forIncPov , outlier.colour = 'gray30', outlier.size = 0.3) +
ylab(label = 'thtotinc / rhpov') + xlab('Year month') +
theme(
legend.position = 'top',
axis.text.x = element_text(angle=90, vjust=1)
)
#Plot1_box
########################################################################
## Boxplot of ratio of thtotinc and 2*rhpov, by yearmon
########################################################################
Data_forIncPov$Pct_rhpov2 <- Data_forIncPov$thtotinc/Data_forIncPov$rhpov2
Plot2_box <- qplot() + geom_boxplot(aes(x = as.factor(as.Date(yearmon)), y = Pct_rhpov2, fill = disb_wrk_ageR2,
col = disb_wrk_ageR2),
data = Data_forIncPov , outlier.colour = 'gray30', outlier.size = 0.3) +
ylab(label = 'thtotinc / (2*rhpov)') + xlab('Year month') +
theme(
legend.position = 'top',
axis.text.x = element_text(angle=90, vjust=1)
)
#Plot2_box
# qplot() + geom_line(aes(x = as.Date(yearmon), y = Pct_rhpov, col = ssuid), data = Data_forIncPov )
Filename.plot <- paste0(PlotPath, 'IncomePovertyPlots_', Today, '.pdf')
pdf(file = Filename.plot, onefile = TRUE)
print(Plot1_box)
print(Plot2_box)
dev.off()
Filename <- paste0(RDataPath, 'Data_forIncPov.RData')
save(Data_forIncPov, file = Filename)
|
2f2428cf8a33496a57467c4547041bd3352f3cfb
|
c0e7ceea2ee949e24aeca3850011d54dc47e256c
|
/scripts/deprecated/filenames_hibench_hadoop_preprun.R
|
a2450a6af177baf9978529984ea8f4d44ab33f3e
|
[] |
no_license
|
ananthnarayan/bpod2020
|
b604b0cd79ac145cdb9115ea0df95c06bbdb806f
|
e2448e0d59ca7d891cb580776bc3de4a10e9637e
|
refs/heads/master
| 2023-01-31T09:20:34.727555
| 2020-12-09T08:13:25
| 2020-12-09T08:13:25
| 233,527,509
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,576
|
r
|
filenames_hibench_hadoop_preprun.R
|
library('hash')
#crono_files=c("apsp.csv","bc.csv","birch.csv","bfs.csv", "community.csv", "connected.csv", "tsp.csv", "pagerank.csv","sssp.csv","triangle.csv")
hibench_27_tiny_files=c(
"aggregation_prep.csv" , "bayes_run.csv" , "kmeans_prep.csv" , "scan_prep.csv" , "sort_run.csv" , "wordcount_prep.csv",
"aggregation_run.csv" , "join_prep.csv" , "kmeans_run.csv" , "pagerank_prep.csv" , "scan_run.csv" , "terasort_prep.csv" , "wordcount_run.csv",
"bayes_prep.csv" , "join_run.csv" , "pagerank_run.csv" , "sort_prep.csv" , "terasort_run.csv"
)
hibench_27_small_files=c(
"aggregation_prep.csv" , "bayes_run.csv" , "kmeans_prep.csv" , "scan_prep.csv" , "sort_run.csv" , "wordcount_prep.csv",
"aggregation_run.csv" , "join_prep.csv" , "kmeans_run.csv" , "pagerank_prep.csv" , "scan_run.csv" , "terasort_prep.csv" , "wordcount_run.csv",
"bayes_prep.csv" , "join_run.csv" , "pagerank_run.csv" , "sort_prep.csv" , "terasort_run.csv"
)
hibench_27_large_files=c(
"aggregation_prep.csv" , "bayes_run.csv" , "kmeans_prep.csv" , "scan_prep.csv" , "sort_run.csv" , "wordcount_prep.csv",
"aggregation_run.csv" , "join_prep.csv" , "kmeans_run.csv" , "scan_run.csv" , "terasort_prep.csv" , "wordcount_run.csv",
"bayes_prep.csv" , "join_run.csv" , "sort_prep.csv" , "terasort_run.csv"
)
hibench_210_tiny_files=c(
"aggregation_prep.csv" , "bayes_run.csv" , "kmeans_prep.csv" , "scan_prep.csv" , "sort_run.csv" , "wordcount_prep.csv",
"aggregation_run.csv" , "join_prep.csv" , "kmeans_run.csv" , "pagerank_prep.csv" , "scan_run.csv" , "terasort_prep.csv" , "wordcount_run.csv",
"bayes_prep.csv" , "join_run.csv" , "pagerank_run.csv" , "sort_prep.csv" , "terasort_run.csv"
)
hibench_210_small_files=c(
"aggregation_prep.csv" , "bayes_run.csv" , "kmeans_prep.csv" , "scan_prep.csv" , "sort_run.csv" , "wordcount_prep.csv",
"aggregation_run.csv" , "join_prep.csv" , "kmeans_run.csv" , "pagerank_prep.csv" , "scan_run.csv" , "terasort_prep.csv" , "wordcount_run.csv",
"bayes_prep.csv" , "join_run.csv" , "pagerank_run.csv" , "sort_prep.csv" , "terasort_run.csv"
)
hibench_210_large_files=c(
"aggregation_prep.csv" , "bayes_run.csv" , "kmeans_prep.csv", "scan_prep.csv" , "sort_run.csv" , "wordcount_prep.csv",
"aggregation_run.csv" , "join_prep.csv" , "kmeans_run.csv" , "scan_run.csv" , "terasort_prep.csv" , "wordcount_run.csv",
"bayes_prep.csv" , "join_run.csv" , "sort_prep.csv" , "terasort_run.csv"
)
hibench_321_tiny_files=c(
"sort_run.csv" , "wordcount_prep.csv", "terasort_prep.csv" , "wordcount_run.csv", "sort_prep.csv" , "terasort_run.csv"
)
hibench_321_small_files=c(
"sort_run.csv" , "wordcount_prep.csv", "terasort_prep.csv" , "wordcount_run.csv", "sort_prep.csv" , "terasort_run.csv"
)
hibench_321_large_files=c(
"sort_run.csv" , "wordcount_prep.csv", "terasort_prep.csv" , "wordcount_run.csv", "sort_prep.csv" , "terasort_run.csv"
)
paths<- hash()
#HiBench
paths["hibench_27_tiny"]="hadoop-2.7.7/tiny"
paths["hibench_27_small"]="hadoop-2.7.7/small"
paths["hibench_27_large"]="hadoop-2.7.7/large"
paths["hibench_210_tiny"]="hadoop-2.10.0/tiny"
paths["hibench_210_small"]="hadoop-2.10.0/small"
paths["hibench_210_large"]="hadoop-2.10.0/large"
paths["hibench_321_tiny"]="hadoop-3.2.1/tiny"
paths["hibench_321_small"]="hadoop-3.2.1/small"
paths["hibench_321_large"]="hadoop-3.2.1/large"
#/home/meena/Ananth/Ananth-Research/research_code/results_workspace/bdb
fileslist<-hash()
fileslist["hibench_27_tiny"] = hibench_27_tiny_files
fileslist["hibench_27_small"] = hibench_27_small_files
fileslist["hibench_27_large"] = hibench_27_large_files
fileslist["hibench_210_tiny"] = hibench_210_tiny_files
fileslist["hibench_210_small"] = hibench_210_small_files
fileslist["hibench_210_large"] = hibench_210_large_files
fileslist["hibench_321_tiny"] = hibench_321_tiny_files
fileslist["hibench_321_small"] = hibench_321_small_files
fileslist["hibench_321_large"] = hibench_321_large_files
metrics_hibench_27_tiny_filelist = paste("metrics", hibench_27_tiny_files, sep="_")
metrics_hibench_27_small_filelist = paste("metrics", hibench_27_small_files, sep="_")
metrics_hibench_27_large_filelist = paste("metrics", hibench_27_large_files, sep="_")
metrics_hibench_210_tiny_filelist = paste("metrics", hibench_210_tiny_files, sep="_")
metrics_hibench_210_small_filelist = paste("metrics", hibench_210_small_files, sep="_")
metrics_hibench_210_large_filelist = paste("metrics", hibench_210_large_files, sep="_")
metrics_hibench_321_tiny_filelist = paste("metrics", hibench_321_tiny_files, sep="_")
metrics_hibench_321_small_filelist = paste("metrics", hibench_321_small_files, sep="_")
metrics_hibench_321_large_filelist = paste("metrics", hibench_321_large_files, sep="_")
metrics_filelist <-hash()
metrics_filelist["hibench_27_tiny"] = metrics_hibench_27_tiny_filelist
metrics_filelist["hibench_27_small"] = metrics_hibench_27_small_filelist
metrics_filelist["hibench_27_large"] = metrics_hibench_27_large_filelist
metrics_filelist["hibench_210_tiny"] = metrics_hibench_210_tiny_filelist
metrics_filelist["hibench_210_small"] = metrics_hibench_210_small_filelist
metrics_filelist["hibench_210_large"] = metrics_hibench_210_large_filelist
metrics_filelist["hibench_321_tiny"] = metrics_hibench_321_tiny_filelist
metrics_filelist["hibench_321_small"] = metrics_hibench_321_small_filelist
metrics_filelist["hibench_321_large"] = metrics_hibench_321_large_filelist
|
3812bc3604de2d33e8bced6d7a532c20c60599a0
|
e63317e82a6bf9826efdd6c7b714e7599441fc21
|
/man/bootLM.Rd
|
65d2076c0fdce273394de1299680f663b03c09cb
|
[] |
no_license
|
anspiess/reverseR
|
f21774d6ab629b7f91107d21fcf935dde4919214
|
3ec07fda74d8d9900b4926fe9e79ef3b17dae5d9
|
refs/heads/master
| 2022-06-19T00:55:54.050231
| 2022-05-11T18:04:09
| 2022-05-11T18:04:09
| 130,033,550
| 3
| 1
| null | 2018-04-18T20:29:13
| 2018-04-18T08:55:11
|
R
|
UTF-8
|
R
| false
| false
| 1,991
|
rd
|
bootLM.Rd
|
\name{bootLM}
\alias{bootLM}
\encoding{latin1}
\title{Nonparametric bootstrap linear model}
\description{
Nonparametric bootstrap (sampling cases with replacement) method for parameter estimation and confidence interval of a linear model.
}
\usage{
bootLM(formula, data = NULL, R = 10000, alpha = 0.05)
}
\arguments{
\item{formula}{a formula of type \code{y ~ x} for the linear model.}
\item{data}{an optional data frame, list or environment containing the variables in the model.}
\item{R}{number of bootstrap samples.}
\item{alpha}{the \eqn{\alpha}-level to use as the threshold border.}
}
\details{
For all (\eqn{x_i, y_i}) datapoints, linear models are created by sampling \code{R} times - with replacement - from \eqn{n \in \{1 \ldots N\}} and building models \eqn{Y_n = X_n\beta + \varepsilon}. This is also known as the .632-bootstrap, because the samples will, on average, contain \eqn{1 - e^{-1} = 0.632} unique elements.
Parameter estimates are obtained from each sampling, from which the average \eqn{\overline{P_{n}}} and standard error \eqn{\frac{\sigma}{\sqrt n}} is calculated as well as a quantile based confidence interval. \emph{p}-values are calculated through inversion of the confidence interval (\code{\link{boot.pval}}).
}
\value{
A dataframe containing the estimated coefficients, their standard error, lower an upper confidence values and \emph{p}-values.
}
\author{
Andrej-Nikolai Spiess
}
\references{
An Introduction to the Bootstrap.\cr
Efron B, Tibshirani R.\cr
Chapman & Hall (1993).
The Bootstrap and Edgeworth Expansion.\cr
Hall P.\cr
Springer, New York (1992).
Modern Statistics with R.\cr
Thulin M.\cr
Eos Chasma Press, Uppsala (2021).
}
\examples{
## Example #1 with single influencers and insignificant model (p = 0.115).
## Jackknife estimates are robust w.r.t. outlier #18.
set.seed(123)
a <- 1:20
b <- 5 + 0.08 * a + rnorm(20, 0, 1)
LM1 <- lm(b ~ a)
bootLM(LM1, R = 1000)
}
\keyword{optimize}
\keyword{models}
\keyword{linear}
|
5970e840fab6c591e22514a0f6e7afc00a8916e0
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/plotrix/examples/sumbrk.Rd.R
|
f3f18e1418f2603d9bb86483dfec249a674f52e7
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 188
|
r
|
sumbrk.Rd.R
|
library(plotrix)
### Name: sumbrk
### Title: Count specified values in a vector
### Aliases: sumbrk
### Keywords: misc
### ** Examples
sumbrk(sample(LETTERS,100,TRUE),trueval="M")
|
a5f10ee5043a8d89446ee4d2aa5433b144693e9a
|
3a04468a8440fe5d8d2c8665334b654a63f1af74
|
/6_dataVisualization.R
|
163cda224e6f7e9be5fb3151e1c3391ed0dd194f
|
[] |
no_license
|
sukesh-reddy/rTutorials
|
b6c6e76538d8b66c897fb61a6d50c4ba497cf000
|
cd6fd4e87d1c311361b13ef0c24379d45bb71384
|
refs/heads/master
| 2021-05-17T19:02:53.523434
| 2020-04-12T07:58:25
| 2020-04-12T07:58:25
| 250,930,313
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,058
|
r
|
6_dataVisualization.R
|
##################################
# Data Visulization
# base package, ggplot2
# grid graphics, lattice graphic - optional
###################################
######### Base Package #####################
# -----------------------------Scatter Plot
# It helps in visulization data easily and find the linear relationship
# B/W 2 continuos variables
plot(mtcars$hp,mtcars$mpg)
# arguments - type='p'(points),'l'(line),o(both points and line)
plot(mtcars$hp
,mtcars$mpg,
type='p',xlab = 'Horsepower',
ylab='miles per gallon',
main = 'HPvsMPG',
xlim = c(0,max(mtcars$hp)),
ylim = c(0,max(mtcars$mpg)))
# ---------------------------------Par() function
# par() - mfrow - return all the arguments that you can change in a plot
par(mfrow=c(2,2))
# par()- pch - it specify the symbols to use when plotting the points
# you can specify another column/factor var to pch (or) a number
plot(mtcars$hp,mtcars$mpg,type='n')
points(mtcars$hp,mtcars$mpg,pch='15')
# par() - text() - to specify the font in the plot
text(mtcars$hp,mtcars$mpg,
adj=-0.2,
cex=1.2,
font=4)
# par() = legend()
legend('topright',pch = c(1,2,3),
legend = c('4','6','8'))
# par() - color(
colors()
# ---------------------------Line chart
# It is commonly used to analyze a trend spread over a time period
par(mfrow=c(1,1))
plot(c(7,12,28,3,41),type = 'o',
main='Line chart',xlab = 'Month',
ylab = 'date',
col='red')
lines(c(1,2,3,4,5,6,10,20),type ='o',col='blue')
# -----------------------bar plot
# It is suitable for showing comparision between categorical variable
counts <- table(mtcars$gear)
counts <- table(mtcars$vs,mtcars$gear)
barplot(counts,legend=names(counts),col=c('red','yellow','green'))
barplot(counts,horiz = T,names.arg = c('3 gears','4 gears','5 gears'),beside = T)
# -----------------------histogram
hist(mtcars$disp,col = rainbow(7),breaks = 10)
# ------------------------boxplot
boxplot(mtcars$mpg~mtcars$cyl,col=c('red','green','yellow'))
# ------------------------mosaic plot
# for comparison between 3 categorical variables
data("HairEyeColor")
HairEyeColor
mosaicplot(HairEyeColor)
# -------------------------------Correlation Plot
library(corrplot)
corrplot(cor(mtcars),method = 'ellipse')
?corrplot
corrplot(cor(mtcars),method = 'color',
type = 'lower',addCoef.col = 'black',
diag = F,order = 'hclust')
# ----------------------------------Word cloud
install.packages(c('wordcloud','RColorBrewer'))
library(wordcloud)
library(RColorBrewer)
counts <-table(rownames(mtcars))
wordcloud(words = names(counts),
freq = as.numeric(counts),
scale = c(1.25,0.25),
min.freq = 1)
###################### GGplot 2 #######################3
# grammer/frame work of ggplot2
# FIRST LAYER - Data - { variables of interest}
# SECOND LAYER - asthetic - the scales on which we map our data - {x-axis,y-axis,color,fill,size,shape,labels,alpha,,line width, line shape}
# THIRD LAYER - GEOMETRY - visual layer(boxplot,hist) - {point, line, histogram, bar, boxplot}
# FOURTH LAYER - FACETS - plotting small multiples - {columns, rows}
# FIVTH LAYER - STATISTICS - {descriptive, inferential, bining, smoothing}
# SIXTH LAYER - COORDIANTES - aspectration, coord_flip() - {cartesian, fixed, polar, limits}
# SEVENTH LAYER - THEMES - All non-data link - { non-data link}
#---------------------------framework
library(ggplot2)
ggplot(data = mtcars,aes(x=,y=,shape=,col,size,lables)) +
geom_point() +
facet_grid(.~am) +
stat_smooth(method = lm,col='red') +
scale_x_continuous('mpg',limits = c(),expand = ) +
coord_flip()
#------------------------------example
# Scatter plot - size, col, shape
ggplot(data = mtcars,aes(x=wt,y=mpg,col=factor(cyl),size=disp,shape=factor(am))) +
geom_point()
# Bar plot
ggplot(data = mtcars,aes(x=factor(cyl),fill=factor(am)))+
geom_bar()
# Histogram
ggplot(data=mtcars, aes(x=mpg)) +
geom_histogram(bins = 6,color='black',fill='blue')
# Density plot
ggplot(data=mtcars, aes(x=mpg)) +
geom_density(color='black',fill='blue')
# Boxplot
ggplot(mtcars,aes(x=factor(cyl),y=mpg)) +
geom_boxplot(fill='lightblue')
# Facet layer
ggplot(mtcars,aes(x=factor(cyl),y=mpg,fill=factor(am))) +
geom_boxplot() +
facet_grid(.~factor(am))
ggplot(mtcars,aes(x=factor(cyl),y=mpg,fill=factor(am))) +
geom_boxplot() +
facet_grid(factor(am)~.)
# Stat Layer
ggplot(data = mtcars,aes(x=wt,y=mpg)) +
geom_point() +
stat_smooth(method = lm,col='red')
ggplot(data = mtcars,aes(x=wt,y=mpg)) +
geom_point() +
geom_smooth(method = lm,col='red',se=F)
ggplot(data = mtcars,aes(x=wt,y=mpg,col=factor(cyl))) +
geom_point() +
stat_smooth(method = 'lm',se=F)
# Coordinate Layer
ggplot(data = mtcars,aes(x=wt,y=mpg,col=factor(cyl))) +
geom_point() +
geom_smooth() +
coord_cartesian(xlim = c(3,6))
# Theme layer - element_text, element_line, elelment_rect
ggplot(data = mtcars,aes(x=wt,y=mpg)) +
geom_point() +
facet_grid(.~cyl) +
theme(plot.background = element_rect(fill = 'darkgrey'))
|
a6293c6cc23b529d0bf9521f18a258c83b7e5e12
|
56b0e1e55287425abd23e8c58840d4cc22802dd9
|
/R/diss_rate_chg.R
|
7cbe33429127f9eda4f2d0ea0ff0be76daff2f87
|
[] |
no_license
|
fawda123/pteropod_ts
|
2a0749810fa051b8c52fdfc2cbf0dba8a94575f1
|
8accadd51bb4bff9863aa160b0722e662891aca8
|
refs/heads/master
| 2021-06-02T19:20:10.649781
| 2020-09-29T23:43:27
| 2020-09-29T23:43:27
| 135,618,117
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 373
|
r
|
diss_rate_chg.R
|
# estimate dissolution rate of change
library(tidyverse)
data(biodat)
disrat <- biodat %>%
select(date, cohortyr, mo, station, typ3) %>%
arrange(station, date) %>%
group_by(station) %>%
mutate(
disavg = mean(typ3, na.rm = T),
disdel = c(NA, diff(typ3)) / disavg,
dtsdel = c(NA, (as.numeric(diff(date)) / 31)),
disrat = 100 * disdel/dtsdel
)
|
34327f60c4bee6955659deceebbd0a47cb15f53c
|
802d0666cfa220d2873060cbe0b44cfd68b3fc57
|
/man/ettersonEq14v1.Rd
|
53b897b3c0bc964e21b0c2340408de64949a9d79
|
[] |
no_license
|
fraenzi/carcass
|
9e3f0f8d4355fc6f43623fee1262f932f2ab04c8
|
d3aff9fdbc4322ec8ddcbe6dcd22d35b8b9fe614
|
refs/heads/master
| 2021-05-10T21:41:24.145988
| 2019-01-21T10:40:16
| 2019-01-21T10:40:16
| 118,236,258
| 0
| 1
| null | 2018-03-05T14:04:24
| 2018-01-20T11:22:52
|
R
|
UTF-8
|
R
| false
| false
| 1,989
|
rd
|
ettersonEq14v1.Rd
|
\name{ettersonEq14v1}
\alias{ettersonEq14v1}
\title{
Equation 14 of Etterson (2013) Ecological Applications 23, 1915-1925,
adapted so that persistence probability and searcher efficiency can vary
with calender date
}
\description{
Calculates the probability that a carcass that has fallen in the search area during
n regular or irregular search intervals is found by a searcher.
}
\usage{
ettersonEq14v1(s, f, J)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{s}{vector of persistence probabilities with N elements, where N is the number of days in
the full study period (i.e, N = sum(J)).
}
\item{f}{vector of searcher efficiency values (probabilities that a carcass
present in the search area is found during one search) with n elements,
where n is the number of searches performed.
}
\item{J}{a vector containing the lengths of the search intervals.
}
}
\details{
This function assumes that persistence probability and searcher efficiency change over
time and that this change depends upon serial date within your study period. In other words, it
assumes that all carcasses in the environment on a given date experience the same scavenging probability
and detectability, regardless of how old the carcasses is.
However it does allow persistence probability and searcher efficiency to change with the calendar date.
}
\value{
the probability that a carcass that has fallen into the search area during the study
is found by the searcher.
}
\references{
Etterson, M.A. (2013) Hidden Markov models for estimating animal mortality from
antropogenic hazards. Ecological Applications, 23, 1915-1925.
}
\author{
M. A. Etterson
}
\seealso{
\code{\link{ettersonEq14}}
\code{\link{ettersonEq14v2}}
\code{\link{pkorner}}
\code{\link{phuso}}
\code{\link{perickson}}
}
\examples{
J <- c(2,3,2,4,3,5,3,2,3,4)
s <- plogis(seq(0.2, 2, length=sum(J)))
f <- plogis(seq(1.5, 0.9, length=length(J)))
ettersonEq14v1(s,f,J)
}
\keyword{methods}
\keyword{misc}
|
75bb32c579425efe1d339a632fc5e785feb58a1a
|
9790f2d332593b64955c312a2ac1f31768da7f1f
|
/demo/equations.R
|
693b9d2da41fe477388282e52b8f1c036786f7e5
|
[] |
no_license
|
nalimilan/R2HTML
|
5a4d0ab514cc30f1f33d7752c6c1eabc1fd0b070
|
108a16184890a75a8ef08567843c43e86afd06a8
|
refs/heads/master
| 2022-05-29T15:31:00.000106
| 2022-05-23T09:21:27
| 2022-05-23T09:21:27
| 23,266,035
| 4
| 6
| null | 2022-05-14T21:03:38
| 2014-08-23T20:52:10
|
R
|
UTF-8
|
R
| false
| false
| 609
|
r
|
equations.R
|
.HTML.file = HTMLInitFile()
HTML.title("sample page",1,file=.HTML.file)
HTML(as.title("Sample equation"),HR=3)
cat("Some text and then a math mode:",file=.HTML.file,append=TRUE)
HTML(as.latex("[[a,b],[c,d]]((n),(k))") ,file=.HTML.file)
cat(". Nice isn't it?",file=.HTML.file,append=TRUE)
HTML(as.latex("\\int_{-\\infty}^{1}f(x)dx",inline=FALSE,count=TRUE) ,file=.HTML.file)
HTML(as.title("Labelled equations"),HR=3)
HTML(as.latex("x+b/(2a)=+-sqrt((b^2)/(4a^2)-c/a)",inline=FALSE,label="Label of this equation"))
cat("file:", .HTML.file, "is created")
browseURL(paste("file://",.HTML.file,sep=""))
|
e7681737c71a06314d1a5cba3a46d6179a4d9b39
|
d1675379757820d2a27d517bccfca93143cb88de
|
/R/zzz.R
|
c7a087a2a175c43676a771382d6f44912a8f6a8e
|
[] |
no_license
|
svd09/ckbplotr
|
7b949e185b3aa217f52db7a6cbbd0c26635265b6
|
d08e8bf82df6143d5d56c5ffb5a6f3901309ecc5
|
refs/heads/main
| 2023-04-03T17:31:45.184461
| 2021-04-14T19:09:04
| 2021-04-14T19:09:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 139
|
r
|
zzz.R
|
.onAttach <- function(...) {
packageStartupMessage("Please check NEWS.md, help files and vignettes for latest changes to the package.")
}
|
0763c95f91686fa632298ce3be629481e39796c5
|
fdae9375a71c1ec06db96b5ce96e7897219e6c58
|
/man/company_information.Rd
|
6a5dd3a2700d87b5decfbc21892ce192eb5442cf
|
[
"MIT"
] |
permissive
|
Feihelan/edgarWebR
|
f7bb8e82a0a21ca56b87b6b9b70da065ba95312d
|
18ef6c27d17a80f5e29c413cc69c3281b0806130
|
refs/heads/master
| 2020-04-16T20:35:03.564129
| 2018-08-24T12:32:43
| 2018-08-24T12:32:43
| 165,901,956
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 481
|
rd
|
company_information.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/company_information.R
\name{company_information}
\alias{company_information}
\title{SEC Company Info}
\usage{
company_information(x)
}
\arguments{
\item{x}{Either a stock symbol (for the 10,000 largest companies) or CIK
code}
}
\value{
a dataframe with all SEC company information
}
\description{
Fetches basic information on a given company from the SEC site
}
\examples{
company_information("INTC")
}
|
217d12161d8a336aebb7b25df5d101a85a5a4ed5
|
0d03305dc8edc133003ec5b2cdb70b0521513cc7
|
/comparativeGenomics/chainScripts/hotspotIndel.R
|
0ff30ad8e40ae0304d2772a2e103327893bdb933
|
[] |
no_license
|
ReubenBuck/RTN_domains_scripts
|
e3d5c222a2182ed21fc609dedfd660c25a28dbd1
|
745c2b3f71924b3897cb58f7463a88fd4d9eab98
|
refs/heads/master
| 2020-07-04T06:33:52.525113
| 2018-01-09T03:09:05
| 2018-01-09T03:09:05
| 67,857,570
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 15,553
|
r
|
hotspotIndel.R
|
## Produce all our plots across one script
## so lets go back a couple of steps
## what are we actually measuring with our 4 stats
## human bases that have been lost since divergence with mouse
## human bases that have been gained since divergence with mouse
## mouse bases that have been gaines since divergence with human
## mouse bases that have been lost since divergence with human
## We are looking to see how the independant gain and loss of DNA along each lineage associates with changes in TE content
## in shared regions, we are expecting to see similar levels of gain
## in new TE lineage specific, lineage specific amounts of gain
## how these two things relate
## need to correctly assign values too
library(GenomicRanges)
library(dplyr)
###
# regions identified
rm(list = ls())
inter <- read.table("~/Desktop/RTN_domains/data/repeatHotspot/intersect.txt", header = TRUE)
# differnces in TE content
# hgGap <- read.table("~/Desktop/RTN_domains/data/comparativeGenomics/queGenomes/gaps/hg19.mm10.que.indel", header = TRUE)
# hgGap.gr <- GRanges(hgGap)
# mcols(hgGap.gr)$gapWidth <- mcols(hgGap.gr)$queRange.width
# mcols(hgGap.gr[mcols(hgGap.gr)$inDel == "ins"])$gapWidth <- width(hgGap.gr[mcols(hgGap.gr)$inDel == "ins"])
hgHot <- read.table("~/Desktop/RTN_domains/data/repeatHotspot/hg19/hg19_mm10_conDif.txt", header=TRUE)
#hgHot <- hgHot[hgHot$hotspotID %in% inter$domains[inter$genome == "hg19"],]
hgHot.gr <- GRanges(hgHot)
hgHot.gr <- hgHot.gr[mcols(hgHot.gr)$genome == "ref"]
# how do we get the lineage specific
hgIndel <- read.table("~/Desktop/RTN_domains/data/comparativeGenomics/supportedIndels/hg19.supportedIndel.que", header = TRUE)
hgIndel.gr <- GRanges(hgIndel)
mcols(hgIndel.gr)$queRange <- GRanges(seqnames = Rle(hgIndel$queRange.seqnames),
ranges = IRanges(start = hgIndel$queRange.start, end = hgIndel$queRange.end))
hgBr <- read.table("~/Desktop/RTN_domains/data/comparativeGenomics/queGenomes/baseRates/hg19.base", header = TRUE)
hgBr.gr <- GRanges(hgBr)
#mmGap <- read.table("~/Desktop/RTN_domains/data/comparativeGenomics/queGenomes/gaps/mm10.hg19.que.indel", header = TRUE)
#mmGap.gr <- GRanges(mmGap)
#mcols(mmGap.gr)$gapWidth <- mcols(mmGap.gr)$queRange.width
#mcols(mmGap.gr[mcols(mmGap.gr)$inDel == "ins"])$gapWidth <- width(mmGap.gr[mcols(mmGap.gr)$inDel == "ins"])
mmHot <- read.table("~/Desktop/RTN_domains/data/repeatHotspot/mm10/mm10_hg19_conDif.txt", header=TRUE)
#mmHot <- mmHot[mmHot$hotspotID %in% inter$domains[inter$genome == "mm10"],]
mmHot.gr <- GRanges(mmHot)
mmHot.gr <- mmHot.gr[mcols(mmHot.gr)$genome == "ref"]
mmIndel <- read.table("~/Desktop/RTN_domains/data/comparativeGenomics/supportedIndels/mm10.supportedIndel.que", header = TRUE)
mmIndel.gr <- GRanges(mmIndel)
mcols(mmIndel.gr)$queRange = GRanges(seqnames = Rle(mmIndel$queRange.seqnames),
ranges = IRanges(start = mmIndel$queRange.start, end = mmIndel$queRange.end))
mmBr <- read.table("~/Desktop/RTN_domains/data/comparativeGenomics/queGenomes/baseRates/mm10.base", head = TRUE)
mmBr.gr <- GRanges(mmBr)
#
# # 1 for 1 overlap conflicting
# ol <- findOverlaps(hgIndel.gr, mmIndel.gr, minoverlap = 1)
# smoothScatter(mcols(hgIndel.gr[queryHits(ol)])$supGenoNo, mcols(mmIndel.gr[subjectHits(ol)])$supGenoNo)
#
# # is there a bad species
# a1 <- table(unlist(strsplit(as.character(mcols(hgIndel.gr)$supGenoName), split = ",")))
# a2 <- table(unlist(strsplit(as.character(mcols(hgIndel.gr[queryHits(ol)])$supGenoName), split = ",")))
#
# plot(a1/sum(a1))
# points(a2/sum(a2), col = 2, type = "p")
#
# b1 <- table(unlist(strsplit(as.character(mcols(mmIndel.gr)$supGenoName), split = ",")))
# b2 <- table(unlist(strsplit(as.character(mcols(mmIndel.gr[subjectHits(ol)])$supGenoName), split = ",")))
#
# plot(b1/sum(b1))
# points(b2/sum(b2), col = 2, type = "p")
#
#
# echTel <- grep("echTel2",as.character(mcols(hgIndel.gr[queryHits(ol)])$supGenoName))
# hist(mcols(hgIndel.gr[queryHits(ol)])$supGenoNo, breaks = 20, freq = FALSE, ylim = c(0,2))
# hist(mcols(hgIndel.gr[queryHits(ol)][echTel])$supGenoNo, add = T, col = 2, density = 0, breaks = 20, freq = FALSE)
#
# hist(mcols(hgIndel.gr[queryHits(ol)][echTel])$supGenoNo, col = 2, density = 0, breaks = 20)
#
#
#
# olSamp <- ol[mcols(mmIndel.gr[subjectHits(ol)])$supGenoNo == mcols(hgIndel.gr[queryHits(ol)])$supGenoNo]
# a3 <- table(unlist(strsplit(as.character(mcols(hgIndel.gr[queryHits(olSamp)])$supGenoName), split = ",")))
# b3 <- table(unlist(strsplit(as.character(mcols(mmIndel.gr[subjectHits(olSamp)])$supGenoName), split = ",")))
#
# mmIndel.gr[subjectHits(olSamp)]
#
# plot(a1/sum(a1))
# points(a2/sum(a2), col = 2, type = "p")
# points(a3/sum(a3), col = 3, type = "p")
#
#
# plot(b1/sum(b1))
# points(b2/sum(b2), col = 2, type = "p")
# points(b3/sum(b3), col = 3, type = "p")
#
# smoothScatter(mcols(hgIndel.gr[queryHits(ol)])$supGenoNo,mcols(mmIndel.gr[subjectHits(ol)])$supGenoNo )
#
#
# # resolve by counting votes
#
# length(ol)
#hgHot <- hgHot[hgHot$hotspotID %in% inter$domains[inter$genome == "hg19"],]
olIndel <- (findOverlaps(hgHot.gr, hgIndel.gr))
dfIndel <- data.frame(queryHits = queryHits(olIndel), mcols(hgIndel.gr[subjectHits(olIndel)])[c("gapWidth","indel")] )
dfIndel <- summarise(group_by(dfIndel,queryHits, indel = indel), gapWidth = sum(gapWidth))
dfIndel$genome = "hg19"
olMindel <- findOverlaps(hgHot.gr, mcols(mmIndel.gr)$queRange)
dfMindel <- data.frame(queryHits = queryHits(olMindel), mcols(mmIndel.gr[subjectHits(olMindel)])[c("gapWidth","indel")])
dfMindel <- summarise(group_by(dfMindel, queryHits, indel = indel), gapWidth = sum(gapWidth))
dfMindel$genome = "mm10"
dfIndel <- rbind(dfIndel, dfMindel)
olBr <- findOverlaps(hgHot.gr, hgBr.gr)
dfBr <- data.frame(queryHits = queryHits(olBr), baseRate = width(hgBr.gr[subjectHits(olBr)]))
dfBr <- summarise(group_by(dfBr, queryHits), baseRate = sum(baseRate))
mer <- merge(dfIndel, dfBr)
#olGap <- findOverlaps(hgHot.gr, hgGap.gr)
#dfGap <- data.frame(queryHits = queryHits(olGap), mcols(hgGap.gr[subjectHits(olGap)])[c("inDel","gapWidth")])
#dfGap <- summarise(group_by(dfGap, queryHits, GapIndel = inDel), HMgapWidth = sum(gapWidth))
#mer <- merge(mer, dfGap)
dfAll <- data.frame(mcols(hgHot.gr[mer$queryHits]), mer)
s <- summarise(group_by(dfAll, repGroup, hotspotID, genome.1, conState, indel),
gapWidth = sum(gapWidth), baseRate = sum(baseRate), known = sum(known))
s <- s[s$conState!="mid",]
s$conState <- factor(s$conState, levels = c("con","dif"))
# s <- filter(s,(genome.1 == "hg19" & indel == "del" & GapIndel == "del") |
# (genome.1 == "hg19" & indel == "ins" & GapIndel == "ins") |
# (genome.1 == "mm10" & indel == "ins" & GapIndel == "ins") |
# (genome.1 == "mm10" & indel == "del" & GapIndel == "del"))
pdf(file = "~/Desktop/RTN_domains/plots/inDelIdentify/HG19indelRates.pdf", width = 12, height = 8, onefile = TRUE)
par(mar = c(10,5,4,4))
stripchart((s$gapWidth/s$baseRate) ~ + s$repGroup + s$indel + s$genome.1,
method= "jitter",jitter = .3, pch = 16, cex = .3, vert = TRUE,
las = 2, main = "hg19 all repeat enriched regions", xaxs = "i",
col = c("darkblue", "purple", "aquamarine3", "red"), log = "y")
boxplot((s$gapWidth/s$baseRate) ~ s$repGroup + s$indel + s$genome.1,
las = 2, main = "", xaxs = "i", outline = FALSE, add = TRUE, col = NA,
border = c("darkblue", "purple", "aquamarine3", "red"), log = "y")
for(i in 0:16){abline(v = (i)+ .5, lty = 2, lwd = 1)};for(i in 0:4){abline(v = (i * 4) + .5, lty = 1, lwd = 2)};abline(v = 8.5, lty = 1, lwd = 3, col= 2)
stripchart(log10(s$gapWidth/s$baseRate) ~ s$conState + s$repGroup + s$indel + s$genome.1,
method= "jitter",jitter = .3, pch = 16, cex = .3, vert = TRUE,
las = 2, main = "hg19 high/low in query", xaxs = "i",
col = c("darkblue", "darkblue","purple", "purple","aquamarine3", "aquamarine3","red", "red"))
boxplot(log10(s$gapWidth/s$baseRate) ~ s$conState + s$repGroup + s$indel + s$genome.1,
las = 2, main = "", xaxs = "i", outline = FALSE, add = TRUE, col = NA,
border = c("darkblue", "darkblue","purple", "purple","aquamarine3", "aquamarine3","red", "red"))
for(i in 0:16){abline(v = (i * 2) + .5, lty = 2)};for(i in 0:4){abline(v = (i * 8) + .5, lty = 1, lwd = 2)};abline(v = 16.5, lty = 1, lwd = 3, col= 2)
r1 <- s[s$genome.1 == "hg19" & s$hotspotID %in% inter$domains[inter$genome == "hg19"],]
r2 <- s[s$genome.1 == "mm10" & s$hotspotID %in% inter$domains[inter$genome == "mm10"],]
r <- rbind(r1,r2)
stripchart(log10(r$gapWidth/r$baseRate) ~ r$conState + r$repGroup + r$indel + r$genome.1,
method= "jitter",jitter = .3, pch = 16, cex = .3, vert = TRUE,
las = 2, main = "hg19 shared/lineage specific", xaxs = "i",
col= c("darkblue", "darkblue","purple", "purple","aquamarine3", "aquamarine3","red", "red"))
boxplot(log10(r$gapWidth/r$baseRate) ~ r$conState + r$repGroup + r$indel + r$genome.1,
las = 2, main = "", xaxs = "i", outline = FALSE, add = TRUE, col = NA,
border = c("darkblue", "darkblue","purple", "purple","aquamarine3", "aquamarine3","red", "red"))
for(i in 0:16){abline(v = (i * 2) + .5, lty = 2)};for(i in 0:4){abline(v = (i * 8) + .5, lty = 1, lwd = 2)};abline(v = 16.5, lty = 1, lwd = 3, col= 2)
dev.off()
head(inter)
# relative contribution of insertions and deleations is similar
# we are trying to look at associated factors of what the gaps could be due to
# proportion of human sided gaps and proportion of mouse sided gaps
# so this will help us identify particular classes of TE assocaited evolution
# so really there is little variation between regions
# it might mean TEs ahve little impact
# if we consider it as a proportion of gapped sequence.
olIndel <- (findOverlaps(mmHot.gr, mmIndel.gr))
dfIndel <- data.frame(queryHits = queryHits(olIndel), mcols(mmIndel.gr[subjectHits(olIndel)])[c("gapWidth","indel")] )
dfIndel <- summarise(group_by(dfIndel,queryHits, indel = indel), gapWidth = sum(gapWidth))
dfIndel$genome = "mm10"
olMindel <- findOverlaps(mmHot.gr, mcols(hgIndel.gr)$queRange)
dfMindel <- data.frame(queryHits = queryHits(olMindel), mcols(hgIndel.gr[subjectHits(olMindel)])[c("gapWidth","indel")])
dfMindel <- summarise(group_by(dfMindel, queryHits, indel = indel), gapWidth = sum(gapWidth))
dfMindel$genome = "hg19"
dfIndel <- rbind(dfIndel, dfMindel)
olBr <- findOverlaps(mmHot.gr, mmBr.gr)
dfBr <- data.frame(queryHits = queryHits(olBr), baseRate = width(mmBr.gr[subjectHits(olBr)]))
dfBr <- summarise(group_by(dfBr, queryHits), baseRate = sum(baseRate))
mer <- merge(dfIndel, dfBr)
#olGap <- findOverlaps(mmHot.gr, mmGap.gr)
#dfGap <- data.frame(queryHits = queryHits(olGap), mcols(mmGap.gr[subjectHits(olGap)])[c("inDel","gapWidth")])
#dfGap <- summarise(group_by(dfGap, queryHits, GapIndel = inDel), HMgapWidth = sum(gapWidth))
#mer <- merge(mer, dfGap)
dfAll <- data.frame(mcols(mmHot.gr[mer$queryHits]), mer)
s <- summarise(group_by(dfAll, repGroup, hotspotID, genome.1, conState, indel),
gapWidth = sum(gapWidth), baseRate = sum(baseRate), known = sum(known))
s <- s[s$conState!="mid",]
s$conState <- factor(s$conState, levels = c("con","dif"))
# s <- filter(s,(genome.1 == "hg19" & indel == "del" & GapIndel == "del") |
# (genome.1 == "hg19" & indel == "ins" & GapIndel == "ins") |
# (genome.1 == "mm10" & indel == "ins" & GapIndel == "ins") |
# (genome.1 == "mm10" & indel == "del" & GapIndel == "del"))
pdf(file = "~/Desktop/RTN_domains/plots/inDelIdentify/MM10indelRate.pdf", width = 12, height = 8, onefile = TRUE)
par(mar = c(10,4,4,4))
stripchart((s$gapWidth/s$baseRate) ~ s$repGroup + s$indel + s$genome.1,
method= "jitter",jitter = .3, pch = 16, cex = .3, vert = TRUE,
las = 2, main = "mm10 all repeat enriched regions", xaxs = "i",
col = c("darkblue", "purple", "aquamarine3", "red"), ylim = c(0,.1))
boxplot((s$gapWidth/s$baseRate) ~ s$repGroup + s$indel + s$genome.1,
las = 2, main = "", xaxs = "i", outline = FALSE, add = TRUE, col = NA,
border = c("darkblue", "purple", "aquamarine3", "red"))
for(i in 0:16){abline(v = (i)+ .5, lty = 2, lwd = 1)};for(i in 0:4){abline(v = (i * 4) + .5, lty = 1, lwd = 2)};abline(v = 8.5, lty = 1, lwd = 3, col= 2)
stripchart(log10(s$gapWidth/s$baseRate) ~ s$conState + s$repGroup + s$indel + s$genome.1,
method= "jitter",jitter = .3, pch = 16, cex = .3, vert = TRUE,
las = 2, main = "mm10 high/low in query", xaxs = "i",
col = c("darkblue","darkblue", "purple", "purple", "aquamarine3", "aquamarine3", "red", "red"))
boxplot(log10(s$gapWidth/s$baseRate) ~ s$conState + s$repGroup + s$indel + s$genome.1,
las = 2, main = "", xaxs = "i", outline = FALSE, add = TRUE, col = NA,
border = c("darkblue","darkblue", "purple", "purple", "aquamarine3", "aquamarine3", "red", "red"))
for(i in 0:16){abline(v = (i * 2) + .5, lty = 2)};for(i in 0:4){abline(v = (i * 8) + .5, lty = 1, lwd = 2)};abline(v = 16.5, lty = 1, lwd = 3, col= 2)
r1 <- s[s$genome.1 == "hg19" & s$hotspotID %in% inter$domains[inter$genome == "hg19"],]
r2 <- s[s$genome.1 == "mm10" & s$hotspotID %in% inter$domains[inter$genome == "mm10"],]
r <- rbind(r1,r2)
stripchart(log10(r$gapWidth/r$baseRate) ~ r$conState + r$repGroup + r$indel + r$genome.1,
method= "jitter",jitter = .3, pch = 16, cex = .3, vert = TRUE,
las = 2, main = "mm10 shared/lineage specific", xaxs = "i", add = FALSE,
col = c("darkblue","darkblue", "purple", "purple", "aquamarine3", "aquamarine3", "red", "red"))
boxplot(log10(r$gapWidth/r$baseRate) ~ r$conState + r$repGroup + r$indel + r$genome.1,
las = 2, main = "", xaxs = "i", outline = FALSE, add = TRUE, col = NA, border = c("darkblue","darkblue", "purple", "purple", "aquamarine3", "aquamarine3", "red", "red"))
for(i in 0:16){abline(v = (i * 2) + .5, lty = 2)};for(i in 0:4){abline(v = (i * 8) + .5, lty = 1, lwd = 2)};abline(v = 16.5, lty = 1, lwd = 3, col= 2)
dev.off()
# so now i have the full details of hat happned in human and mouse in human regions.
# that might mean no more awkward matching up
### overlap of human delations in mouse returned nothing
### I wonder why
layout(matrix(c(1,2), nrow = 1))
par(mar = c(5,4,4,2))
hist(log10(width(hgGap.gr[mcols(hgGap.gr)$inDel == "ins"])), breaks = 100, main = "Human side gaps",
xlab = "width (log10 bp)", ylim = c(0,6e5), xlim = c(1,4.5))
hist(log10(mcols(hgIndel.gr[mcols(hgIndel.gr)$indel == "ins"])$gapWidth), breaks = 100, add = TRUE, col = 2, density = 0)
hist(log10(mcols(mmIndel.gr[mcols(mmIndel.gr)$indel == "del"])$gapWidth), breaks = 50, add = TRUE, col = 3, density = 0)
legend("topright",legend = c("all", "human insertion", "mouse deletion"), fill = c(1,2,3))
hist(log10(mcols(hgGap.gr[mcols(hgGap.gr)$inDel == "del"])$gapWidth), breaks = 100, main = "Mouse side gaps",
xlab = "width (log10 bp)", ylim = c(0,6e5),xlim = c(1,4.5))
hist(log10(mcols(mmIndel.gr[mcols(mmIndel.gr)$indel == "ins"])$gapWidth), breaks = 50, add = TRUE, col = 2, density = 0)
hist(log10(mcols(hgIndel.gr[mcols(hgIndel.gr)$indel == "del"])$gapWidth), breaks = 50, add = TRUE, col = 3, density = 0)
legend("topright",legend = c("all", "mouse insertion", "human deletion"), fill = c(1,2,3))
|
fb8b6e1376b1a93ca75abda1364a90b87b6b32f6
|
a6bb9d8d9c9c7be92ef7a5e6da6a0c426b66134b
|
/ui.R
|
6a4f5fd47653c7e735064f36f276519d7526d7ed
|
[] |
no_license
|
nahfa911/shinyweather
|
616ed878d7abff5c810a5b0665dc5c5f73435244
|
302016621e8e734a0c8dd6253aee7922fa2a4789
|
refs/heads/master
| 2020-03-30T08:03:45.995237
| 2018-10-27T13:51:41
| 2018-10-27T13:51:41
| 150,971,885
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 476
|
r
|
ui.R
|
shiny::fluidPage(
shiny::titlePanel(div("Weather forecast",style="background-color:aliceblue;text-align:center;
font-family:Calibri;height:35px;border-radius:6px;"
)
)
, shiny::sidebarLayout(
shiny::sidebarPanel(shiny::textInput(inputId = "cityName",label = "City Name",value = "Linkoping"))
, shiny::mainPanel(plotly::plotlyOutput("map")
,br()
,plotly::plotlyOutput("forecast"))
)
)
|
a8510ee9325b05b41e0d761c27c2aa3f097dd43c
|
8b5e1c11d8a66b943ed5b4d0f7c905bb4398f709
|
/caminata raul zacatelco.R
|
ced8d87f645da1eeed482ff2238ba3968f13d641
|
[] |
no_license
|
sherlockcubing/Programacion_Actuarial_III_OT2016
|
e9d0107e30dd9d89aa5f8a18e444ca553b426b9b
|
be74969a73a1908fe2fa21be15fb080e08b16ba6
|
refs/heads/master
| 2020-04-18T12:43:04.018262
| 2016-11-09T11:21:11
| 2016-11-09T11:21:11
| 65,914,634
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 460
|
r
|
caminata raul zacatelco.R
|
n <- 0
y <- 0
z <- 5
salida <- vector("numeric")
caminata <- vector("numeric")
for (i in 1:100) {
caminata <- vector("numeric")
z <- 5
while(z>=3 && z<=10){
caminata <- c(caminata,z)
print(z)
moneda <- rbinom(1,1,0.5)
if(moneda==1){
z <- z + 0.5
} else {
z <- z - 0.5
}
}
salida <- c(salida,z)
if (z<3){
n <- n+1
}else {
y <- y+1
}
}
#Numero de veces que se sale por abajo
n
#Numero de veces que se sale por arriba
y
|
a39abef37947208d98785b3f5c6c897de427cc38
|
c4885558962de723db5a1331cdc0158bffef73fd
|
/R/geom_pushpin.R
|
b1814dcfa72b5069167c01a287c5e4962c9b7393
|
[
"MIT"
] |
permissive
|
R-CoderDotCom/chinchet
|
3d1042ce51af8beb831f3f12da4244bc6563c66c
|
76428786bd75f5ae7b3f9aeb77522fb05db0ced9
|
refs/heads/main
| 2023-02-19T02:07:44.426882
| 2021-01-17T19:08:55
| 2021-01-17T19:08:55
| 330,424,890
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,434
|
r
|
geom_pushpin.R
|
# pushpinGrob
pushpinGrob <- function(x, y, size, pushpin, geom_key = list(red = "red.png",
blue = "blue.png",
green = "green.png",
yellow = "yellow.png",
orange = "orange.png",
gray = "gray.png",
white = "white.png",
transparent = "transparent.png")) {
filename <- system.file(geom_key[[unique(pushpin)]], package = "chinchet", mustWork = TRUE)
img <- as.raster(png::readPNG(filename))
# rasterGrob
grid::rasterGrob(x = x,
y = y,
image = img,
# only set height so that the width scales proportionally and so that the icon
# stays the same size regardless of the dimensions of the plot
height = size * ggplot2::unit(20, "mm"))
}
# GeomPushpin
GeomPushpin <- ggplot2::ggproto(`_class` = "GeomPushpin",
`_inherit` = ggplot2::Geom,
required_aes = c("x", "y"),
non_missing_aes = c("size", "pushpin"),
default_aes = ggplot2::aes(size = 1, pushpin = "red", shape = 19,
colour = "black"),
draw_panel = function(data, panel_scales, coord, na.rm = FALSE) {
coords <- coord$transform(data, panel_scales)
ggplot2:::ggname(prefix = "geom_pushpin",
grob = pushpinGrob(x = coords$x,
y = coords$y,
size = coords$size,
pushpin = coords$pushpin))
})
#' @title Pushpin layer
#' @description The geom is used to add pushpins to plots. See ?ggplot2::geom_points for more info. The pushpin indicates the exact point on the pin point. Use the argument `pushpin` to select the color. Available options are `"red"` (default), `"blue"`, `"green"`, `"yellow"`, `"orange"`, `"gray"`, `"white"` and `"transparent"`. Change the size of the puspin with `size` as in `ggplot2::geom_point`
#' @inheritParams ggplot2::geom_point
#' @examples
#'
#' # install.packages("ggplot2")
#'library(ggplot2)
#'
#'
#'df <- data.frame(x = state.center$x, y = state.center$y,
#' state = state.name)
#'
#'ggplot(df, aes(x = x, y = y)) +
#' geom_polygon(data = map_data("state"),
#' color = "black",
#' fill = "white",
#' aes(x = long, y = lat,
#' group = group)) +
#' geom_pushpin(pushpin = "blue", size = 1) +
#' guides(fill = FALSE) +
#' theme_void()
#'
#' @importFrom grDevices as.raster
#' @export
geom_pushpin <- function(mapping = NULL,
data = NULL,
stat = "identity",
position = "identity",
...,
na.rm = FALSE,
show.legend = NA,
inherit.aes = TRUE) {
ggplot2::layer(data = data,
mapping = mapping,
stat = stat,
geom = GeomPushpin,
position = position,
show.legend = show.legend,
inherit.aes = inherit.aes,
params = list(na.rm = na.rm, ...))
}
library(ggplot2)
library(maps)
world <- map_data("world")
head(world)
set.seed(2)
cities_visited <- world.cities[sample(1:nrow(world.cities), 10), ]
cities_to_visit <- world.cities[sample(1:nrow(world.cities), 10), ]
ggplot(data = world) +
geom_polygon(aes(x = long, y = lat, group = group), fill = "white", color = 1, size = 0.1) +
geom_pushpin(cities_visited, mapping = aes(x = long, y = lat, pushpin = "red")) +
geom_pushpin(cities_to_visit, mapping = aes(x = long, y = lat, pushpin = "green")) +
theme_void()
|
3368bf1e166c45b2796dcc114c03e22fa777ddf1
|
c593a8dfa39cf6f137e86242fcb2218d0b68ab1f
|
/NBR/02-Run_NBR/find_noncoding_drivers_precomp_correctoverlaps_EEWmod_noindels.R
|
b1eb05f71386f7b4e7c1d1f13ba095c71016659c
|
[
"LicenseRef-scancode-warranty-disclaimer"
] |
no_license
|
BioAmelie/2020-hrPC-landscape
|
3f188c95a480b098ea9394ae8de75a1654d98e72
|
f3fd52f8dd7af980fcf562bceb689fd2db54433f
|
refs/heads/master
| 2022-11-26T21:12:50.820683
| 2020-08-13T02:09:45
| 2020-08-13T02:09:45
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25,890
|
r
|
find_noncoding_drivers_precomp_correctoverlaps_EEWmod_noindels.R
|
# Inigo Martincorena - 2014
# Non-coding driver detection with precomputed trinucleotide composition and using the
# local density of putatively neutral mutations as covariates ("t") in the framework of a
# negative binomial regression.
#
#
##########################################################################################
# Instructions for Mark Cowley et al:
# Required R packages: "GenomicRanges", "Rsamtools", "MASS"
# Mutations file must be a tab-separated matrix, with columns: "sampleID","chr","pos","ref","mut"
# Example:
# sampleID chr pos ref mut
# 0009b464-b376-4fbc-8a56-da538269a02f 1 1230448 G A
# 0009b464-b376-4fbc-8a56-da538269a02f 1 1609723 C T
# 0009b464-b376-4fbc-8a56-da538269a02f 1 1903276 C T
# 0009b464-b376-4fbc-8a56-da538269a02f 1 2574999 C T
# ...
#
# Use chromosome names without "chr", and use build 37 (hg19) coordinates
#
# Modify PATH_TO_DATA_AND_GENOME and genomeFile to match your paths and file names
# PATH_TO_DATA_AND_GENOME: path to GRanges_driver_regions.RData,
# Trinucfreqs_within_100kb_bins.txt
# Neutral_regions_within_100kb_bins.txt
# Regions/
#
# Examples:
# miRNA promoters:
# Rscript find_noncoding_drivers_precomp_correctoverlaps.R "mutations.txt" "Regions/mirna.prom.bed"
# miRNA precursors:
# Rscript find_noncoding_drivers_precomp_correctoverlaps.R "mutations.txt" Regions/mirna.pre.bed
# miRNA mature:
# Rscript find_noncoding_drivers_precomp_correctoverlaps.R "mutations.txt" Regions/mirna.mat.bed
# Protein-coding genes promoters:
# Rscript find_noncoding_drivers_precomp_correctoverlaps.R "mutations.txt" "Regions/gc19_pc.promCore.bed"
#
# Input files:
# - mutations.txt: Table of mutations in the 5 column format (sampleID\tchr\tpos\tref\tmut)
# - trinucfreqs_prefix: Region_name\tIntervals_string\tTrinucleotide composition
#
# Test:
# Rscript find_noncoding_drivers_precomp_correctoverlaps.R example_Thy-AdenoCa.muts.5cols Regions/gc19_pc.promCore.bed
# Main output:
# Regions/gc19_pc.promCore.bed-Selection_output.txt
# region chr start end exp_subs exp_indels obs_subs obs_indels local_t local_t_indels pval_subs pval_indels pval_both qval_subs qval_indels qval_both exclude_forfit cv_predicted_subs cv_predicted_indels pval_subs_CV pval_indels_CV pval_both_CV qval_subs_CV qval_indels_CV qval_both_CV qval_both_CV_tiered obsexp_subs_mle obsexp_subs_low obsexp_subs_high obsexp_indels_mle obsexp_indels_low obsexp_indels_high
# gc19_pc.promCore::gencode::TERT::ENSG00000164362.14 5 1295105 1295362 0.009328714 0.000690961 12 0 1.360598307 0.838612166 7.35E-21 1 0 1.48E-16 1 0 TRUE 0.008465574 0.000666505 6.23E-26 1 0 1.26E-21 1 0 0 1417.505812 721.8452856 2562.228503 0 0 2935.603388
# gc19_pc.promCore::gencode::PLEKHS1::ENSG00000148735.10 10 115511013 115534874 0.043607871 0.004911716 4 0 0.932850568 1.141215427 2.48E-05 1 0.000287769 0.249859112 1 1 FALSE 0.042695237 0.005474887 2.45E-06 1 3.41E-05 0.024713939 1 0.343976004 0.680680582 93.68726473 28.34270653 229.041039 0 0 357.5185204
# gc19_pc.promCore::gencode::OR4F5::ENSG00000186092.4 1 68891 69090 0.005602044 0.000535629 0 0 0.721785491 0.552476592 1 1 1 1 1 1 FALSE 0.005694223 0.00045065 1 1 1 1 1 1 1 0 0 343.7822825 0 0 4341.780179
# gc19_pc.promCore::gencode::AL627309.1::ENSG00000237683.5 1 139310 139579 0.008023746 0.000723099 0 0 0.703778928 0.537202648 1 1 1 1 1 1 FALSE 0.008181887 0.000603953 1 1 1 1 1 1 1 0 0 239.1610734 0 0 3239.682298
# gc19_pc.promCore::gencode::OR4F29::ENSG00000235249.1 1 367440 367658 0.004864302 0.000586513 0 0 0.727080947 0.509529225 1 1 1 1 1 1 FALSE 0.004939695 0.000483438 1 1 1 1 1 1 1 0 0 396.0928871 0 0 4047.297819
#
# Columns of interest: mainly "qval_subs_CV","qval_indels_CV","qval_both_CV","qval_both_CV_tiered"
# The corresponding pvalues can be of interest if you want to do restricted hypotheses testing on a group of
# genes or regions.
#
# If the script is going to be run for different sets of mutations and the same regions,
# consider moving the output files to avoid overwritting. Or modify the "write.table" lines
# to choose your own output naming choices.
#
##########################################################################################
# OPTIONS
PATH_TO_DATA_AND_GENOME = "/g/data/tx70/private/ew8662/PCAWC/NBR_EEW2" # Set this path accordingly.
#genomeFile = paste(PATH_TO_DATA_AND_GENOME,"/genome.fa",sep="") # Set genome file accordingly
genomeFile = "/g/data/tx70/genomes/hs37d5/genome.fa" # Set genome file accordingly
max_num_muts_perRegion_perSample = 2
unique_indelsites_FLAG = 1
load(paste(PATH_TO_DATA_AND_GENOME,"/Background/GRanges_driver_regions.RData",sep="")) # Regions to be excluded for the background model fit (GRanges object called "gr_drivers")
trinucfreq_neutralbins_file = paste(PATH_TO_DATA_AND_GENOME,"/Background/Trinucfreqs_within_100kb_bins.txt",sep="")
regions_neutralbins_file = paste(PATH_TO_DATA_AND_GENOME,"/Background/Neutral_regions_within_100kb_bins.txt",sep="")
# Environment
library("GenomicRanges")
library("Rsamtools")
library("MASS")
args = commandArgs(TRUE)
mutations_file = args[1]
trinucfreqs_prefix = args[2]
## put results in seperate folder
on1 <- gsub(".*/", "", trinucfreqs_prefix)
on2 <- gsub(".*/", "", mutations_file)
on3 <- gsub("\\.txt", "", on2)
output_prefix = paste(PATH_TO_DATA_AND_GENOME, "/Results/", on1,"-",on3, sep="")
## 1. Loading the mutations and extracting the trinucleotide context
cat("Loading mutations...\n")
chr_list = c(1:22,"X","Y")
mutations = read.table(mutations_file, header=0, sep = "\t", stringsAsFactors=F) # Loading the file
mutations = mutations[,1:5]
colnames(mutations) = c("sampleID","chr","pos","ref","mut")
mutations = mutations[as.vector(mutations$chr) %in% chr_list,]
mutations$pos = as.numeric(as.vector(mutations$pos))
# Extracting the trinucleotide context of each substitution
cat("Indels...\n")
indels_pos = as.vector(mutations$ref)=="-" | as.vector(mutations$mut)=="-" | nchar(as.vector(mutations$ref))!=1 | nchar(as.vector(mutations$mut))!=1
indels = mutations[indels_pos,]
subs = mutations[!indels_pos,]
cat("Trinucleotides...\n")
nt = c("A","C","G","T"); base1 = rep(nt,each=16,times=1); base2 = rep(nt,each=4,times=4); base3 = rep(nt,each=1,times=16)
trinuc_list = paste(base1,base2,base3, sep="")
seqs = scanFa(genomeFile, GRanges(as.vector(subs$chr), IRanges(subs$pos-1, subs$pos+1)))
muts_trinuc = as.vector(seqs)
subs$trinuc_ref = muts_trinuc
if (nrow(indels)>0) { indels$trinuc_ref = NA }
# Annotating unique indels: indels with the same coordinates and same ref>mut event are flagged as duplicates
indels$unique_indels = rownames(indels) %in% rownames(unique(indels[,2:5]))
subs$unique_indels = TRUE
mutations = rbind(subs,indels)
mutations = mutations[order(mutations$sampleID, mutations$chr, mutations$pos),]
# Loading the precomputed region files
cat("Loading target regions...\n")
target_regions = read.table(sprintf("%s.regions",trinucfreqs_prefix), header=1, sep="\t")
neutral_regions = read.table(regions_neutralbins_file, header=1, sep="\t")
trinucfreq_table = read.table(sprintf("%s.txt",trinucfreqs_prefix), header=1, sep="\t")
trinucfreq_table_neutral = read.table(trinucfreq_neutralbins_file, header=1, sep="\t")
# Creating the 2 global L_matrix
cat("Defining L matrix...\n")
L_matrix_ref = array(0,c(64,4)); rownames(L_matrix_ref) = trinuc_list; colnames(L_matrix_ref) = nt
for (j in 1:64) { L_matrix_ref[j,base2[j]] = NA }
trin_freqs = colSums(trinucfreq_table[,trinuc_list])
L_matrix_global = L_matrix_ref
L_matrix_global[names(trin_freqs),] = L_matrix_global[names(trin_freqs),] + array(rep(trin_freqs,4), dim=c(length(trin_freqs),4))
trin_freqs = colSums(trinucfreq_table_neutral[,trinuc_list])
L_matrix_global_neutral = L_matrix_ref
L_matrix_global_neutral[names(trin_freqs),] = L_matrix_global_neutral[names(trin_freqs),] + array(rep(trin_freqs,4), dim=c(length(trin_freqs),4))
## 2. Mapping the mutations to the regions and to neutral bins
# Subfunction intersecting the mutations with the regions of interest
cat("Defining functions...\n")
map_mutations = function(mutations, intervals) {
rangesREGS = GRanges(intervals[,2], IRanges(as.numeric(intervals[,3]), as.numeric(intervals[,4])))
mut_start = as.numeric(as.vector(mutations$pos))
mut_end = mut_start + nchar(as.vector(mutations$ref)) - 1
rangesMUTS = GRanges(as.vector(mutations$chr), IRanges(mut_start, mut_end))
olmatrix = as.matrix(findOverlaps(rangesMUTS, rangesREGS, type="any", select="all"))
return(olmatrix)
}
# Subfunction that subsamples mutations to a maximum of nmax mutations per sample per region of interest
cap_mutations = function(m = sample_and_region, nmax = max_num_muts_perRegion_perSample) {
nmut = dim(m)[1]
mapped_muts = which(m[,2]!="0")
m = m[mapped_muts,]
rows = paste(m[,1],m[,2],sep=",")
freqs = table(rows)
freqs = freqs[freqs>nmax]
duplrows = strsplit(names(freqs),split=",")
rmrows = array(0,dim(m)[1])
if (length(freqs)>0) {
for (j in 1:length(freqs)) {
vals = duplrows[[j]]
if (vals[2]!="0") { # Only for mapped mutations we apply the capping
pos = which(m[,1]==vals[1] & m[,2]==vals[2])
rmrows[sample(pos, length(pos)-nmax)] = 1
}
}
}
rmrows_allmuts = array(0,nmut)
rmrows_allmuts[mapped_muts[rmrows==1]] = 1
return(rmrows_allmuts)
}
# a. Mapping the mutations to the regions of interest
cat("Mapping mutations...\n")
olm = map_mutations(mutations, target_regions)
mutations$target_region = 0
m1 = mutations[olm[,1],] # Duplicating subs if they hit more than one region
m1$target_region = target_regions[olm[,2],1] # Annotating hit region
m2 = mutations[-unique(olm[,1]),] # Mutations not mapping to any element
mutations = rbind(m1,m2)
# b. Mapping the remaining mutations to the neutral regions
cat("Mapping mutations (2)...\n")
olm = map_mutations(mutations, neutral_regions)
mutations$neutral_region = 0
m1 = mutations[olm[,1],] # Duplicating subs if they hit more than one region
m1$neutral_region = neutral_regions[olm[,2],1] # Annotating hit region
m2 = mutations[-unique(olm[,1]),] # Mutations not mapping to any element
mutations = rbind(m1,m2)
# c. Masking out duplicate (non-unique) indels (if desired)
if (unique_indelsites_FLAG==1) { # We mask out duplicate indels
mutations$target_region[mutations$unique_indels==F] = 0
mutations$neutral_region[mutations$unique_indels==F] = 0
}
# c. Subsampling mutations to a maximum of nmax mutations per sample per region of interest
sample_and_region = cbind(as.vector(mutations$sampleID), as.character(mutations$target_region))
maskmuts = cap_mutations(sample_and_region, max_num_muts_perRegion_perSample)
mutations$masked_muts_bynmax = maskmuts
mutations$target_region[maskmuts==1] = 0
### 3. Calculating the global trinucleotide rates and the local density of mutations
mutations$trinuc_ref[!(as.vector(mutations$trinuc_ref) %in% trinuc_list)] = NA
n_matrix_global = L_matrix_ref
n_matrix_global_neutral = L_matrix_ref
subsin = (!is.na(mutations$trinuc_ref)) & (mutations$target_region!=0)
aux = cbind(as.vector(mutations$trinuc_ref[subsin]), as.vector(mutations$mut[subsin]))
for (j in 1:dim(aux)[1]) {
n_matrix_global[aux[j,1],aux[j,2]] = n_matrix_global[aux[j,1],aux[j,2]] + 1
}
numindels_target = sum((is.na(mutations$trinuc_ref)) & (mutations$target_region!=0))
subsin = (!is.na(mutations$trinuc_ref)) & (mutations$neutral_region!=0) & (mutations$target_region==0)
aux = cbind(as.vector(mutations$trinuc_ref[subsin]), as.vector(mutations$mut[subsin]))
for (j in 1:dim(aux)[1]) {
n_matrix_global_neutral[aux[j,1],aux[j,2]] = n_matrix_global_neutral[aux[j,1],aux[j,2]] + 1
}
numindels_neutral = sum((is.na(mutations$trinuc_ref)) & (mutations$neutral_region!=0))
rate_names = array(NA, dim=c(64,4))
for (j in 1:dim(L_matrix_ref)[1]) {
for (h in 1:dim(L_matrix_ref)[2]) {
rate_names[j,h] = sprintf("%s>%s%s%s", trinuc_list[j], substr(trinuc_list[j],1,1), nt[h], substr(trinuc_list[j],3,3))
}
}
## a. Rates
rates_target = c(n_matrix_global/L_matrix_global); names(rates_target) = c(rate_names)
rates_neutral = c(n_matrix_global_neutral/L_matrix_global_neutral); names(rates_neutral) = c(rate_names)
indelsrate_target = numindels_target/sum(L_matrix_global,na.rm=T)*3
indelsrate_neutral = numindels_neutral/sum(L_matrix_global_neutral,na.rm=T)*3
## b. Local density of mutations
# Expected number of subs and indels
targetregions_df = data.frame(region=as.vector(trinucfreq_table[,1]))
aux = strsplit(as.vector(trinucfreq_table[,2]), split=":")
targetregions_df$chr = sapply(aux, function(x) x[1])
#aux2 = sapply(aux, function(x) min(suppressWarnings(as.numeric(unlist(strsplit(x[-1], split="-")))), na.rm=T))
targetregions_df$start = sapply(aux, function(x) min(suppressWarnings(as.numeric(unlist(strsplit(x[-1], split="-")))), na.rm=T))
targetregions_df$end = sapply(aux, function(x) max(suppressWarnings(as.numeric(unlist(strsplit(x[-1], split="-")))), na.rm=T))
#targetregions_df$start = sapply(aux2, function(x) min(x,na.rm=T))
#targetregions_df$end = sapply(aux2, function(x) max(x,na.rm=T))
# Shouldn't the previous two lines be replaced by the following two instead? At least on two occasions I have needed to run it like this...
#targetregions_df$start = apply(aux2,2,min)
#targetregions_df$end = apply(aux2,2,max)
tf = as.matrix(trinucfreq_table[,trinuc_list])
targetregions_df$exp_subs = apply(tf, 1, function(x) sum(rep(x,4)*rates_target, na.rm=T) )
targetregions_df$exp_indels = apply(tf, 1, function(x) sum(x)*indelsrate_target )
neutralregions_df = data.frame(region=as.vector(trinucfreq_table_neutral[,1:3]))
tf = as.matrix(trinucfreq_table_neutral[,trinuc_list])
neutralregions_df$exp_subs = apply(tf, 1, function(x) sum(rep(x,4)*rates_neutral, na.rm=T) )
neutralregions_df$exp_indels = apply(tf, 1, function(x) sum(x)*indelsrate_neutral )
# Observed number of subs and indels
targetregions_df$obs_subs = 0
targetregions_df$obs_indels = 0
indel_pos = is.na(mutations$trinuc_ref)
numsubs = table( mutations$target_region[mutations$target_region > 0 & !indel_pos] )
targetregions_df$obs_subs[as.numeric(names(numsubs))] = numsubs
numinds = table( mutations$target_region[mutations$target_region > 0 & indel_pos] )
targetregions_df$obs_indels[as.numeric(names(numinds))] = numinds
neutralregions_df$obs_subs = 0
neutralregions_df$obs_indels = 0
indel_pos = is.na(mutations$trinuc_ref)
numsubs = table( mutations$neutral_region[mutations$neutral_region > 0 & !indel_pos] )
neutralregions_df$obs_subs[as.numeric(names(numsubs))] = numsubs
numinds = table( mutations$neutral_region[mutations$neutral_region > 0 & indel_pos] )
neutralregions_df$obs_indels[as.numeric(names(numinds))] = numinds
# Estimating the neighbourhood "t" for every target region
# We choose as neighbouring neutral regions of a given target region all those that are
# within "neighbourhood_localrate" distance of the target region. We consider any overlap
# between the segments as valid, which means that the neighbourhood of a target region
# will always be contained within the interval "neighbourhood_localrate + 100kb" around
# the target region (for a 100kb binning of the genome in the neutral reference)
# For example, if neighbourhood_localrate=1e5, only regions less or equal to 200kb away
# from the ends of the target region are considered in the calculation of the local rate.
neighbourhood_localrate = ceiling(0.001/dim(mutations)[1]*3e9)*100000
rangesTARGET = GRanges(target_regions[,2], IRanges(as.numeric(target_regions[,3])-neighbourhood_localrate, as.numeric(target_regions[,4])+neighbourhood_localrate))
rangesNEUTRAL = GRanges(neutralregions_df[,1], IRanges(as.numeric(neutralregions_df[,2]), as.numeric(neutralregions_df[,3])))
ol = findOverlaps(rangesTARGET, rangesNEUTRAL, type="any", select="all")
olmatrix = as.matrix(ol)
neighbours = unique(cbind(target_regions[olmatrix[,1],1],olmatrix[,2]))
targetregions_df$local_t = NA
targetregions_df$local_t_indels = NA
for (j in 1:dim(targetregions_df)[1]) {
neutral_bins = neighbours[neighbours[,1]==j,2]
targetregions_df$local_t[j] = sum(neutralregions_df$obs_subs[neutral_bins])/sum(neutralregions_df$exp_subs[neutral_bins])
targetregions_df$local_t_indels[j] = sum(neutralregions_df$obs_indels[neutral_bins])/sum(neutralregions_df$exp_indels[neutral_bins])
if ((j/1000)==round(j/1000)) { print(j/dim(targetregions_df)[1]) }
}
### 4. Negative binomial regression with local density of mutations and covariates
# Masking out regions with ZERO expected values
targetregions_df$exp_subs[targetregions_df$exp_subs==0] = NA
targetregions_df$exp_indels[targetregions_df$exp_indels==0] = NA
## 4a. MODEL 1: No use of local mutation rates or covariates
# Negative binomial regression
model_subs = glm.nb(formula = targetregions_df$obs_subs ~ offset(log(targetregions_df$exp_subs)) -1 )
nb_size_subs = model_subs$theta
if (numindels_target>0) {
model_indels = glm.nb(formula = targetregions_df$obs_indels ~ offset(log(targetregions_df$exp_indels)) -1 )
nb_size_indels = model_indels$theta
}
# P-values (Neg Binom)
targetregions_df$pval_subs = NA
targetregions_df$pval_indels = NA
targetregions_df$pval_both = NA
for (j in 1:dim(targetregions_df)[1]) {
targetregions_df$pval_subs[j] = pnbinom(q=targetregions_df$obs_subs[j]-0.1, mu=targetregions_df$exp_subs[j], size=nb_size_subs, lower.tail=F)
if (numindels_target>0) {
targetregions_df$pval_indels[j] = pnbinom(q=targetregions_df$obs_indels[j]-0.1, mu=targetregions_df$exp_indels[j], size=nb_size_indels, lower.tail=F)
# Fisher combined p-value
p_vec = c(targetregions_df$pval_subs[j], targetregions_df$pval_indels[j])
targetregions_df$pval_both[j] = 1-pchisq(-2*sum(log(p_vec)),length(p_vec)*2)
} else {
# We use only subs
targetregions_df$pval_both[j] = targetregions_df$pval_subs[j]
}
if (round(j/1000)==(j/1000)) { print(j/dim(targetregions_df)[1]) }
}
targetregions_df$qval_subs = p.adjust(targetregions_df$pval_subs, method="BH") # Adjusted q-value
targetregions_df$qval_indels = p.adjust(targetregions_df$pval_indels, method="BH") # Adjusted q-value
targetregions_df$qval_both = p.adjust(targetregions_df$pval_both, method="BH") # Adjusted q-value
## 4b. MODEL 2: Using local mutation rates and covariates
# Excluding elements overlapping "driver" genomic regions from the negbin background fit
gr_elements = GRanges(targetregions_df$chr, IRanges(targetregions_df$start, targetregions_df$end))
olmatrix = as.matrix(findOverlaps(gr_elements, gr_drivers, type="any", select="all"))
exclude_forfit = (1:nrow(targetregions_df)) %in% unique(olmatrix[,1])
targetregions_df$exclude_forfit = exclude_forfit
# Negative binomial regression
model_subs = glm.nb(formula = obs_subs ~ offset(log(exp_subs)) + local_t , data=targetregions_df[!exclude_forfit,])
nb_size_subs_cv = model_subs$theta
targetregions_df$cv_predicted_subs = exp(predict(model_subs, newdata=targetregions_df))
if (numindels_target>0) {
model_indels = glm.nb(formula = obs_indels ~ offset(log(exp_indels)) + local_t_indels , data=targetregions_df[!exclude_forfit,]) ## FOR INDELS THE LOCAL RATE IS A COVARIATE
nb_size_indels_cv = model_indels$theta
targetregions_df$cv_predicted_indels = exp(predict(model_indels, newdata=targetregions_df))
}
# P-values (Neg Binom)
targetregions_df$pval_subs_CV = NA
targetregions_df$pval_indels_CV = NA
targetregions_df$pval_both_CV = NA
for (j in 1:dim(targetregions_df)[1]) {
targetregions_df$pval_subs_CV[j] = pnbinom(q=targetregions_df$obs_subs[j]-0.1, mu=targetregions_df$cv_predicted_subs[j], size=nb_size_subs_cv, lower.tail=F)
#targetregions_df$pval_indels_CV[j] = pnbinom(q=targetregions_df$obs_indels[j]-0.1, mu=targetregions_df$cv_predicted_indels[j], size=nb_size_indels_cv, lower.tail=F)
if (numindels_target>0) {
targetregions_df$pval_indels_CV[j] = pnbinom(q=targetregions_df$obs_indels[j]-0.1, mu=targetregions_df$cv_predicted_indels[j], size=nb_size_indels_cv, lower.tail=F)
# Fisher combined p-value
p_vec = c(targetregions_df$pval_subs_CV[j], targetregions_df$pval_indels_CV[j])
targetregions_df$pval_both_CV[j] = 1-pchisq(-2*sum(log(p_vec)),length(p_vec)*2)
} else {
# We use only subs
targetregions_df$pval_both[j] = targetregions_df$pval_subs[j]
}
if (round(j/1000)==(j/1000)) { print(j/dim(targetregions_df)[1]) }
}
targetregions_df$qval_subs_CV = p.adjust(targetregions_df$pval_subs_CV, method="BH") # Adjusted q-value
targetregions_df$qval_indels_CV = p.adjust(targetregions_df$pval_indels_CV, method="BH") # Adjusted q-value
targetregions_df$qval_both_CV = p.adjust(targetregions_df$pval_both_CV, method="BH") # Adjusted q-value
# Tiered FDR correction
targetregions_df$qval_both_CV_tiered = NA
inds = targetregions_df$exclude_forfit
targetregions_df$qval_both_CV_tiered[inds] = p.adjust(targetregions_df$pval_both_CV[inds], method="BH") # Adjusted q-value
targetregions_df$qval_both_CV_tiered[!inds] = p.adjust(targetregions_df$pval_both_CV[!inds], method="BH") # Adjusted q-value
targetregions_df = targetregions_df[order(targetregions_df$qval_both_CV_tiered),]
write.table(targetregions_df, file=paste(output_prefix,"-Selection_output.txt",sep=""), sep = "\t", row.names = FALSE, col.names = TRUE, append=FALSE, quote=FALSE)
# Plot of the underlying gamma distributions of the 4 models (subs and indels with and without covariates)
pdf(paste(output_prefix,"-Underlying_gamma_distributions_landscape.pdf",sep=""),height=4,width=4)
xvec = seq(0,4,by=0.0001)
plot(xvec,0.0001*dgamma(x=xvec,shape=nb_size_subs_cv,rate=nb_size_subs_cv),type="l",lty=1,xlab="Relative mutation rate",ylab="",col="cadetblue",las=1,main="PDFs of the underlying Gamma distributions",cex.axis=0.8,cex.main=0.85)
lines(xvec,0.0001*dgamma(x=xvec,shape=nb_size_subs,rate=nb_size_subs),lty=2,col="cadetblue")
#lines(xvec,0.0001*dgamma(x=xvec,shape=nb_size_indels,rate=nb_size_subs),lty=2,col="chocolate")
#lines(xvec,0.0001*dgamma(x=xvec,shape=nb_size_indels_cv,rate=nb_size_subs_cv),lty=1,col="chocolate")
#legend("topright", lty=c(2,1,2,1), col=c("cadetblue","cadetblue","chocolate","chocolate"), title="Size parameters", legend=c(sprintf("subs=%0.3g",nb_size_subs),sprintf("subs_cv=%0.3g",nb_size_subs_cv),sprintf("indels=%0.3g",nb_size_indels),sprintf("indels_cv=%0.3g",nb_size_indels_cv)), box.col=NA, cex=0.8)
legend("topright", lty=c(2,1), col=c("cadetblue","cadetblue"), title="Size parameters", legend=c(sprintf("subs=%0.3g",nb_size_subs),sprintf("subs_cv=%0.3g",nb_size_subs_cv)), box.col=NA, cex=0.8)
dev.off()
## Confidence intervals for the obs/exp ratios in NBR (23.12.2016)
calculate_CI95_flag = 1
# Subfunction to calculate CI95% for the obs/exp ratios of subs and indels in NBR
ci95nbr = function(n_obs,n_exp,nb_size) {
wmax = 100000; iter = 6; grid_size = 10; cutoff = qchisq(p=0.95,df=1) # Default params
wmle = n_obs/n_exp # MLE for w
ml = dnbinom(x=n_obs, mu=n_exp*wmle, size=nb_size, log=T) # LogLik under MLE
if (!is.na(n_exp)) {
if (wmle<wmax) {
# 1. Iterative search of lower bound CI95%
if (wmle>0) {
search_range = c(1e-9, wmle)
for (it in 1:iter) {
wvec = seq(search_range[1], search_range[2], length.out=grid_size)
ll = dnbinom(x=n_obs, mu=n_exp*wvec, size=nb_size, log=T)
lr = 2*(ml-ll) > cutoff
ind = max(which(wvec<=wmle & lr))
search_range = c(wvec[ind], wvec[ind+1])
}
w_low = wvec[ind]
} else {
w_low = 0
}
# 2. Iterative search of higher bound CI95%
search_range = c(wmle, wmax)
llhighbound = dnbinom(x=n_obs, mu=n_exp*wmax, size=nb_size, log=T)
outofboundaries = !(2*(ml-llhighbound) > cutoff)
if (!outofboundaries) {
for (it in 1:iter) {
wvec = seq(search_range[1], search_range[2],length.out=grid_size)
ll = dnbinom(x=n_obs, mu=n_exp*wvec, size=nb_size, log=T)
lr = 2*(ml-ll) > cutoff
ind = min(which(wvec>=wmle & lr))
search_range = c(wvec[ind-1], wvec[ind])
}
w_high = wvec[ind]
} else {
w_high = wmax
}
} else {
wmle = w_low = w_high = wmax # Out of bounds
}
} else {
wmle = w_low = w_high = NA # invalid
}
return(c(wmle,w_low,w_high))
}
if (calculate_CI95_flag == 1) {
ci95_subs = t(apply(as.matrix(targetregions_df[,c("obs_subs","cv_predicted_subs")]), 1, function(x) tryCatch(ci95nbr(x[1], x[2], nb_size_indels_cv), error=function(err) rep(NA,3))))
targetregions_df$obsexp_subs_mle = ci95_subs[,1]
targetregions_df$obsexp_subs_low = ci95_subs[,2]
targetregions_df$obsexp_subs_high = ci95_subs[,3]
#ci95_indels = t(apply(as.matrix(targetregions_df[,c("obs_indels","cv_predicted_indels")]), 1, function(x) tryCatch(ci95nbr(x[1], x[2], nb_size_indels_cv), error=function(err) rep(NA,3))))
#targetregions_df$obsexp_indels_mle = ci95_indels[,1]
#targetregions_df$obsexp_indels_low = ci95_indels[,2]
#targetregions_df$obsexp_indels_high = ci95_indels[,3]
write.table(targetregions_df, file=paste(output_prefix,"-Selection_output_landscape.txt",sep=""), sep = "\t", row.names = FALSE, col.names = TRUE, append=FALSE, quote=FALSE)
write.table(mutations, file=paste(output_prefix,"-Intermediate_mutations.txt", sep=""), sep = "\t", row.names = FALSE, col.names = TRUE, append=FALSE, quote=FALSE)
}
|
80138efbf2dbb60bc0302daa486089c58717c125
|
093dacede7c431ab1cbef672830f76920942b801
|
/man/IRB.Rd
|
7d70fbd820c02eb1ffc07e1df2acb258c1cb0c1b
|
[
"Apache-2.0"
] |
permissive
|
bhklab/MetaGxBreast
|
a30cee29007ededf0fbeb64524f18b3a3b8128b8
|
3ba8f39928a20dffb799c338622a1461d2e9ef98
|
refs/heads/master
| 2021-06-03T09:54:44.555453
| 2021-04-23T18:54:53
| 2021-04-23T18:54:53
| 100,535,452
| 4
| 2
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,653
|
rd
|
IRB.Rd
|
\name{ IRB }
\alias{ IRB }
\docType{data}
\title{ IRB }
\description{ ExpressionSet for the IRB Dataset}
\format{
\preformatted{
experimentData(eset):
Experiment data
Experimenter name:
Laboratory:
Contact information: http://www.ncbi.nlm.nih.gov/pubmed/18297396
Title:
URL: http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE5460
PMIDs: 18297396
No abstract available.
notes:
summary:
mapping.method:
maxRowVariance
mapping.group:
EntrezGene.ID
preprocessing:
As published by original author.
featureData(eset):
An object of class 'AnnotatedDataFrame'
featureNames: 1007_s_at 1053_at ... AFFX-HUMISGF3A/M97935_MB_at
(42447 total)
varLabels: probeset gene EntrezGene.ID best_probe
varMetadata: labelDescription
}}
\details{
\preformatted{
assayData: 42447 features, 129 samples
Platform type:
---------------------------
Available sample meta-data:
---------------------------
sample_name:
Length Class Mode
129 character character
alt_sample_name:
Length Class Mode
129 character character
sample_type:
tumor
129
er:
negative positive
53 76
her2:
negative positive
98 31
tumor_size:
Min. 1st Qu. Median Mean 3rd Qu. Max.
0.800 1.500 2.200 2.488 3.000 8.500
N:
0 1
64 65
grade:
1 2 3
27 32 70
treatment:
untreated
129
batch:
IRB
129
uncurated_author_metadata:
Length Class Mode
129 character character
}}
\source{ http://www.ncbi.nlm.nih.gov/geo/query/acc.cgi?acc=GSE5460 }
\keyword{datasets}
|
fca2bd4c784b99bb75920c8551f6f7fe6302f69b
|
2e8a0f83c5a27cfd1977eb7b94863d12bee7bc5f
|
/CorInteractVSneighbour.R
|
dac13259e9ee5c833fcb0074b32328077b66d1e0
|
[] |
no_license
|
ABorrel/saltbridges
|
611036cfa101da4c0e390de3c12b9cae04e3b5b0
|
5b8a0bb15ab6876082891f2afc2d3ce0b4c03c7a
|
refs/heads/master
| 2020-06-17T21:51:16.843534
| 2016-11-28T10:54:37
| 2016-11-28T10:54:37
| 74,966,556
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,547
|
r
|
CorInteractVSneighbour.R
|
#!/usr/bin/env Rscript
source("tool.R")
############
# MAIN #
############
args <- commandArgs(TRUE)
p_interact = args[1]
p_nb_neighbor = args[2]
pr_result = args[3]
d_interact = read.table(p_interact, header = TRUE)
d_neighbor = read.table(p_nb_neighbor, header = TRUE)
print (d_interact)
print (d_neighbor)
d_percentage = GetPercent (d_interact, 0)
rownames(d_percentage) = rownames(d_interact)
colnames(d_percentage) = colnames(d_interact)
yplot = NULL
for (sub in rownames(d_neighbor)){
if (sub == "COO"){
yplot = append (yplot, d_percentage[sub,"N"])
}else{
yplot = append (yplot, d_percentage[sub,"COO"])
}
}
names(yplot) = rownames(d_percentage)
svg (paste(pr_result, "NbVSCI.svg", sep = ""), bg = "transparent", 12, 10)
par (mar=c(4,5,1,2))
plot ( yplot, d_neighbor[,1], pch = 8, ylim = c(min(d_neighbor[,1] - d_neighbor[,2]), max (d_neighbor[,1] + d_neighbor[,2])), xlim = c(0,100), cex = 3, cex.lab = 2, xlab = "% of charged groups", ylab = "Number of neighbours", cex.axis = 1.5)
text (yplot + 1,d_neighbor[,1] + 1, labels = rownames(d_neighbor), cex = 1.6)
#error bar
arrows(yplot, d_neighbor[,1] - d_neighbor[,2], yplot, d_neighbor[,1] + d_neighbor[,2], angle=90, lwd = 2)
arrows(yplot, d_neighbor[,1] + d_neighbor[,2], yplot, d_neighbor[,1] - d_neighbor[,2], angle=90, lwd = 2)
#arrows(d[,3], d[,1], d[,4], d[,1], angle=90, lwd = 2)
#arrows(d[,4], d[,1], d[,3], d[,1], angle=90, lwd = 2)
dev.off()
yplot = NULL
for (sub in rownames(d_neighbor)){
if (sub == "COO"){
yplot = append (yplot, d_percentage[sub,"N"] + d_percentage[sub,"HOH"] + d_percentage[sub,"NH"])
}else{
yplot = append (yplot, d_percentage[sub,"COO"] + d_percentage[sub,"HOH"] + d_percentage[sub,"OH"])
}
}
names(yplot) = rownames(d_percentage)
svg (paste(pr_result, "NbVSCIALL.svg", sep = ""), bg = "transparent", 12, 10)
par (mar=c(4,5,1,2))
plot ( yplot, d_neighbor[,1], pch = 8, ylim = c(min(d_neighbor[,1] - d_neighbor[,2]), max (d_neighbor[,1] + d_neighbor[,2])), xlim = c(0,100), cex = 3, cex.lab = 2, xlab = "% of all ionizable groups", ylab = "Number of neighbours", cex.axis = 1.5)
text (yplot + 1,d_neighbor[,1] + 1, labels = rownames(d_neighbor), cex = 1.6)
#error bar
arrows(yplot, d_neighbor[,1] - d_neighbor[,2], yplot, d_neighbor[,1] + d_neighbor[,2], angle=90, lwd = 2)
arrows(yplot, d_neighbor[,1] + d_neighbor[,2], yplot, d_neighbor[,1] - d_neighbor[,2], angle=90, lwd = 2)
#arrows(d[,3], d[,1], d[,4], d[,1], angle=90, lwd = 2)
#arrows(d[,4], d[,1], d[,3], d[,1], angle=90, lwd = 2)
dev.off()
|
a71060856bb9ed84735d6bece2bf1b6a1fd855f4
|
ed2530b7c73ad80b86a0e60db075aaacdf53dbea
|
/QTL_genes.R
|
3c58a6c55ed4eb8f9bd9de2745616e6b046d9a2b
|
[] |
no_license
|
BrianRitchey/qtl
|
7444a409d4d07abda3f13b5cfa2794113f93c0e9
|
9792fef3dfa7ecdd62857d58ca3f9966456ae6b8
|
refs/heads/master
| 2023-07-08T01:38:25.485945
| 2017-09-11T18:44:29
| 2017-09-11T18:44:29
| 98,877,595
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,303
|
r
|
QTL_genes.R
|
# QTL_genes function
#
# written by Brian Ritchey.
# Used in conjunction with R/qtl package.
# Returns genes in a QTL interval based on "Mouse_Genome", which is Mouse Genome Build 37 available from BioMart.
# Utilizes R/qtl's "find.flanking" funciton.
#
# Aguments: Can either supply a bayesint object as the only argument (bayes)
# or supply the chromsome (chr) and starting (pos1) + ending positions (pos2) for the interval of interest
QTL_genes <-
function(bayes, chr, pos1, pos2){
if (!missing(bayes)){
chr_subset <- subset(Mouse_Genome,
subset = Chromosome == as.integer(bayes$chr[1]))
final <- subset(chr_subset,
subset = Position > (bayes$pos[1] * 10^6) & Position < (bayes$pos[3] * 10^6))
distance_to_peak <- abs(((bayes$pos[2] * 10^6) - final$Position) / 10^6)
distance_to_peak <- round(distance_to_peak, digits = 2)
output <- data.frame(final, distance_to_peak)
rownames(final) <- NULL
output
}
else{
chr_subset <- subset(Mouse_Genome,
subset = Chromosome == chr)
final <- subset(chr_subset,
subset = Position > (pos1 * 10^6) & Position < (pos2*10^6))
output <- data.frame(final)
output
}
output
}
|
37ff9f39d7ed8b5387550bbf870906c011d09c3b
|
80b3a24b6fece4d67c654a1c897ad479d5447a64
|
/Analysis.R
|
deaaadc63f98c57d00775b364c3e66fcaa1f4ecc
|
[] |
no_license
|
rholdo/Holdo_et_al_2021_EcolMon
|
22463945ed282523aef9b540ded89ccab6c3e17b
|
786b684a3f1c332a8670dea048811ceb0b711907
|
refs/heads/main
| 2023-04-11T11:47:11.028423
| 2022-01-10T15:19:38
| 2022-01-10T15:19:38
| 366,747,777
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 68,532
|
r
|
Analysis.R
|
# Seedling growth gradients interact with homogeneous disturbance regimes
# to explain tree cover discontinuities in savannas
# Code author: R. Holdo (rholdo@uga.edu)
# Nov 11, 2021
library(dplyr)
library(ggplot2)
library(nlme)
library(lme4)
library(reshape2)
library(grid)
library(gridExtra)
library(cowplot)
library(sp)
library(spdep)
library(spatialEco)
library(quantreg)
# The following assumes you are using RStudio
setwd(dirname(rstudioapi::getActiveDocumentContext()$path))
df <- read.csv("Seedling_final.csv")
# Note: 'Subplot' refers to an individual seedling
# Extract species info to reuse later
df.species <- subset(df, Per == 'Jun_2019')
df.species <- df.species[, c(1,3)]
# What proportion of individual stems were not found at some point in the study?
100 - mean(df$Found, na.rm = TRUE) * 100
# Only 0.507 %
# How many were lost for good at some point?
# This does not count those missing only in the very last survey (N = 2)
lost <- aggregate((1 - Found) ~ Subplot, df, function(x){sum(x, na.rm = TRUE)})
names(lost)[2] <- 'Lost'
lost2plus <- subset(lost, Lost > 1)
# These were 'lost' for more than 2 surveys. How many of these were
# still missing by the last survey?
lostfinal <- subset(df, Per == "Jun_2019" & Found == 0)
lost2plus$Subplot
lostfinal$Subplot
# There is no overlap in the two groups
# There were therefore no seedlings that were never again found
# Include only cases where seedling was found
df <- subset(df, Found == 1)
df$Found <- NULL
df <- df[order(df$Subplot, df$Day), ]
# Obtain mean grass biomass across subplots
grass.ag <- aggregate(Grass ~ Subplot, df, function(x){mean(x, na.rm = TRUE)})
# For seedlings that appeared dead but came back to life,
# Recategorize as 'topkilled' instead of dead, and fill
# corresponding zeros in 'Alive' column with 1s
subplots <- unique(df$Subplot)
N <- length(subplots)
for (i in 1:N){
sub <- subset(df, Subplot == subplots[i])
idx <- max(which(sub$Alive == 1))
misclass.death <- which(sub$Alive[1:idx] == 0)
if (length(misclass.death) > 0){
sub$Alive[misclass.death] <- 1
sub$Topkilled[misclass.death] <- 1
}
if (i == 1) df.cl <- sub
else df.cl <- rbind(df.cl, sub)
}
# Proportion of seedlings with mortality and topkill
# Make subset without Initial period
df.cl2 <- subset(df.cl, Per != "Initial") # Exclude setup survey
df.cl2$Dead <- 1 - df.cl2$Alive
# Did an individual die over the course of the survey (3 years)?
df.ag.mort <- aggregate(Dead ~ Subplot,
df.cl2, function(x){max(x, na.rm = TRUE)})
# Did an individual sustain damage in a given year?
df.ag.dam <- aggregate(cbind(Topkilled, Damage) ~ Subplot + Year,
df.cl2, function(x){max(x, na.rm = TRUE)})
# Calculate mean fire and herbivory damage in year 3
df.ag.fireherb <- aggregate(cbind(Fire, Herbivory) ~ Subplot, subset(df.cl2, Year == 2019),
function(x){mean(x, na.rm = TRUE)})
names(df.ag.fireherb)[2:3] <- c('F2019', 'H2019')
# Mean annual damage and topkill
df.ag <- aggregate(cbind(Topkilled, Damage) ~ Subplot, df.ag.dam, mean)
df.ag <- merge(df.ag, df.ag.fireherb, by = 'Subplot', all = TRUE)
df.ag <- merge(df.ag, df.ag.mort, by = 'Subplot', all = TRUE)
# Find maximum vertical growth across 2017, 2018, and 2019 seasons
# First, make initial height = 0 for individuals with size NA in Initial period
# These were recently topkilled at the very outset
df$Basal_cm <- ifelse(is.na(df$Basal_cm) & df$Per == 'Initial', 0, df$Basal_cm)
df$Ht_m <- ifelse(is.na(df$Ht_m) & df$Per == 'Initial', 0, df$Ht_m)
year1 <- subset(df, Year == 2017)
# Keep only seedlings that survived year 1
year1IDs <- subset(year1, Per == "May_2017" & Alive == 1)
year1 <- subset(year1, Subplot %in% year1IDs$Subplot)
year1H0 <- subset(year1, Per == "Initial")
year1H0 <- year1H0[, c(1,11)]
year1H0$Ht_m <- ifelse(is.na(year1H0$Ht_m), 0, year1H0$Ht_m)
year1Hmax <- aggregate(Ht_m ~ Subplot, year1[year1$Per != "Initial", ],
function(x){max(x, na.rm = TRUE)})
year1H <- merge(year1H0, year1Hmax, by = 'Subplot')
names(year1H)[2:3] <- c("H0", "Hmax")
year2 <- subset(df, Per == "May_2017" | Year == 2018)
# Keep only seedlings that survived year 2
year2IDs <- subset(year2, Per == "May_2018" & Alive == 1)
year2 <- subset(year2, Subplot %in% year2IDs$Subplot)
year2H0 <- subset(year2, Per == "May_2017")
year2H0 <- year2H0[, c(1,11)]
year2Hmax <- aggregate(Ht_m ~ Subplot, year2[year2$Per != "May_2017", ],
function(x){max(x, na.rm = TRUE)})
year2H <- merge(year2H0, year2Hmax, by = 'Subplot')
names(year2H)[2:3] <- c("H0", "Hmax")
year3 <- subset(df, Per == "May_2018" | Year == 2019)
# Keep only seedlings that survived year 3
year3IDs <- subset(year3, Per == "Jun_2019" & Alive == 1)
year3 <- subset(year3, Subplot %in% year3IDs$Subplot)
year3H0 <- subset(year3, Per == "May_2018")
year3H0 <- year3H0[, c(1,11)]
year3Hmax <- aggregate(Ht_m ~ Subplot, year3[year3$Per != "May_2018", ],
function(x){max(x, na.rm = TRUE)})
year3H <- merge(year3H0, year3Hmax, by = 'Subplot')
names(year3H)[2:3] <- c("H0", "Hmax")
# Merge together
growth <- merge(year1H, year2H, by = 'Subplot', all = TRUE)
growth <- merge(growth, year3H, by = 'Subplot', all = TRUE)
growth$deltaH1 <- growth$Hmax.x - growth$H0.x
growth$deltaH2 <- growth$Hmax.y - growth$Hmax.x
growth$deltaH3 <- growth$Hmax - growth$Hmax.y
growth <- growth[, -c(4,6)]
names(growth)[2:5] <- c('H0', 'H1', 'H2', 'H3')
growth.l <- melt(growth, id.vars = "Subplot", measure.vars = 6:8,
variable.name = "Year", value.name = "deltaH")
growth.l$Year <- ifelse(growth.l$Year == "deltaH1", 2017,
ifelse(growth.l$Year == "deltaH2", 2018, 2019))
# Combine growth data with damage data by year
grdam <- merge(growth.l, df.ag.dam, by = c('Subplot', 'Year'), all = TRUE)
grundam <- subset(grdam, Damage == 0 & Topkilled == 0)
grundam.ag <- aggregate(deltaH ~ Subplot, grundam,
function(x){mean(x, na.rm = TRUE)})
names(grundam.ag)[2] <- 'deltaH.undam'
# Combine aggregated disturbance dataset with aggregated grass biomass dataset
df.ag <- merge(df.ag, grass.ag, by = 'Subplot', all = TRUE)
# Now combine with growth data
growth$deltaH <- rowMeans(growth[,6:8], na.rm = TRUE)
growth.all <- growth[, c(1,9)]
df.ag <- merge(df.ag, growth.all, by = 'Subplot', all = TRUE)
# Now mean growth across 3 years has been added
# Add mean growth for periods without damage
df.ag <- merge(df.ag, grundam.ag, by = 'Subplot', all = TRUE)
# Reinsert species variable
df.ag <- merge(df.ag, df.species)
# Figure for calibration of light sensor data
ldrcal <- read.csv("Light_cal.csv")
ldrreg <- lm(log(PAR) ~ log(R), ldrcal)
figS1_app_S1 <- ggplot(ldrcal) + geom_point(aes(x = log(R), y = log(PAR)),
shape = 21, size = 6, stroke = 2) +
labs(x = expression(paste('log R (', Omega, ')')),
y = expression(paste('log PAR (', mu, 'mol m'^'-2', ' s'^'-1',')'))) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.title.y = element_text(angle=90, vjust = 2, size = rel(2)),
axis.title.x = element_text(vjust = 0, size = rel(2)),
axis.text = element_text(size = rel(2)),
axis.line = element_line(colour="black"),
axis.ticks = element_line(colour="black"),
axis.ticks.length = unit(.25, "cm"),
legend.title = element_text(size = rel(2)),
legend.text = element_text(size = rel(2)))
figS1_app_S1 <- figS1_app_S1 + geom_abline(intercept = coef(ldrreg)[1], slope = coef(ldrreg)[2], lwd = 1.2) +
annotate("text", x = 6.5, y = 6, label = "R ^ 2 == 0.99", parse = TRUE, size = 8)
# Obtain TC data, recalculate distances, combine with transect data
tc <- read.csv("Transect_TC_data.csv")
tc <- subset(tc, Type == "Seedling" | Type == "Sensor")
# Estimate basal area, tree density and seedling density along transects from belt transect data
# Calculate distances along transect using PCA
# Recover distance along and from transect for ground tree survey
tree <- read.csv("Transect_Tree_Density_Data.csv")
# Obtain mean crown area
tree$Crown <- pi * tree$Can1 * tree$Can2 / 4
Cr.mean <- mean(tree$Crown, na.rm = TRUE)
# Calculate basal and canopy area
tree <- tree[, -(7:10)]
tree$Basal2 <- ifelse(is.na(tree$Basal2), 0, tree$Basal2)
tree$Basal3 <- ifelse(is.na(tree$Basal3), 0, tree$Basal3)
tree$Bas <- (tree$Basal1 ^ 2 + tree$Basal2 ^ 2 + tree$Basal3 ^ 2) * pi / 4
tree <- tree[, -c(1, 7:11)]
names(tree)[2] <- "ID"
# Import seedling data from Deus' survey as well
seed <- read.csv("Transect_Seedling_Density_Data.csv")
seed <- seed[, c(2:5,11)]
# Join the seedling location and tree/seedling survey datasets
tc2 <- tc[, c(2,1,3,4,5)]
tree2 <- tree[, 1:4]
tree2$Type <- "Tree.tran" # Belt transect tree
tree2 <- tree2[, c(1,2,5,3,4)]
seed2 <- seed[, 1:4]
seed2$Type <- "Seedl.tran" # Belt transect seedling
seed2 <- seed2[, c(1,2,5,3,4)]
joint <- rbind(tc2, tree2)
joint <- rbind(joint, seed2)
tran <- unique(joint$Site)
for (i in 1:length(tran)){
sub <- subset(joint, Site == tran[i])
# Use seedling/sensor locations as reference for transect line
# The transect line is found via PCA on XY data
# Most parsimonious way to join survey seedlings and belt transect data
sub.seedl <- subset(sub, Type == "Seedling" | Type == "Sensor")
Xmin <- min(sub.seedl$X)
Ymin <- min(sub.seedl$Y)
sub.seedl$Xadj <- sub.seedl$X - Xmin
sub.seedl$Yadj <- sub.seedl$Y - Ymin
# Calculate best-fit transect line with PCA
pca <- princomp(sub.seedl[, 6:7])
pc1min <- min(pca$scores[,1])
# Adjusted PC1 is distance along transect
sub.seedl$Distpca <- pca$scores[,1] - pc1min
# PC2 is distance from the transect (Lateral)
sub.seedl$Lateral <- pca$scores[,2]
# Apply model to tree locations to map trees onto transect
sub.tran <- subset(sub, Type != "Seedling" & Type != "Sensor")
sub.tran$Xadj <- sub.tran$X - Xmin
sub.tran$Yadj <- sub.tran$Y - Ymin
pred <- predict(pca, sub.tran[, 6:7])
sub.tran$Distpca <- pred[,1] - pc1min
sub.tran$Lateral <- pred[,2]
# Reassemble
sub <- rbind(sub.seedl, sub.tran)
if (i == 1) {
newjoint <- sub
}
else {
newjoint <- rbind(newjoint, sub)
}
}
# Problems with Mbuzi Mawe - remove anything that is more than 100 m
# from original transect start - start of belt transect was apparently misplaced
newjoint <- subset(newjoint, Distpca > -100)
# Use segmented regression to identify tree cover breakpoint
newjoint2 <- subset(newjoint, Type == "Seedling" | Type == "Sensor")
tcid <- tc[,c(1,6)]
tcid <- merge(tcid, newjoint2, by = "ID")
# Flip distance direction in Makoma, MM, Simiyu and Togoro
# so that grassland-woodland transition happens as distance increases
distmax <- aggregate(Distpca ~ Site, newjoint2, max)
names(distmax)[2] <- "Dmax"
tcid <- merge(tcid, distmax)
tcid$Distpca <- ifelse(tcid$Site == "Makoma" | tcid$Site == "Mbuzi Mawe" | tcid$Site == "Simiyu" | tcid$Site == "Togoro",
tcid$Dmax - tcid$Distpca, tcid$Distpca)
tcid$Dmax <- NULL
# Find breakpoints (best-fit distance giving a transition from grassland to woodland)
brkdf <- data.frame(tran)
names(brkdf) <- "Site"
brkdf$Brkdist <- 0
for (i in 1:length(tran)){
sub <- subset(tcid, Site == tran[i])
sub <- sub[order(sub$Distpca), ]
sub$ssq <- numeric(nrow(sub))
for (j in 1:nrow(sub)){
brk <- sub$Distpca[j]
val <- ifelse(sub$Distpca <= brk, mean(sub$TC30[1:j]), mean(sub$TC30[(j + 1):nrow(sub)]))
sub$ssq[j] <- sum((val - sub$TC30) ^ 2)
}
brkdf$Brkdist[i] <- sub$Distpca[which.min(sub$ssq)]
}
# Add Brkdist to tc
tcid <- merge(tcid, brkdf)
tcid$Habitat <- ifelse(tcid$Distpca <= tcid$Brkdist, "Open", "Woody")
# Transect plot for appendix
sitecode <- c("FS", "IK", "MA", "MM", "SI", "SO", "TA", "TO")
tcid$Transect <- factor(sitecode[as.numeric(as.factor(tcid$Site))])
figS1_app_S2 <- ggplot(tcid) + geom_point(aes(x = Distpca, y = TC30, fill = Habitat),
shape = 21, size = 4) +
facet_wrap(~ Transect) +
labs(x = 'Distance (m)',
y = expression(paste('TC'[30]))) +
scale_fill_hue(l=40) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
strip.text = element_text(size = rel(1.5)),
axis.title.y = element_text(angle=90, vjust = 2, size = rel(1.2)),
axis.title.x = element_text(vjust = 0, size = rel(1.2)),
axis.text = element_text(size = rel(1.2)),
axis.line = element_line(colour="black"),
axis.ticks = element_line(colour="black"),
axis.ticks.length = unit(.25, "cm"),
legend.title = element_text(size = rel(1.5)),
legend.text = element_text(size = rel(1.2)))
tc <- tcid
tc <- tc[, -c(7,8,10,11,13)]
# Now apply breakpoint to transect data
transect <- subset(newjoint, Type == "Tree.tran" | Type == "Seedl.tran")
transect <- merge(transect, brkdf)
transect <- merge(transect, distmax)
transect$Distpca <- ifelse(transect$Site == "Makoma" | transect$Site == "Mbuzi Mawe" | transect$Site == "Simiyu" | transect$Site == "Togoro",
transect$Dmax - transect$Distpca, transect$Distpca)
transect$Habitat <- ifelse(transect$Distpca <= transect$Brkdist, "Open", "Woody")
transect <- transect[, -c(4,5,6,7,11)]
tree <- merge(tree, subset(transect, Type == "Tree.tran"), by = c("Site", "ID"))
seed <- merge(seed, subset(transect, Type == "Seedl.tran"), by = c("Site", "ID"))
# Remove temporary dataframes
rm(joint, newjoint, newjoint2, brkdf, tree2, seed2, sub, sub.tran, tc2)
# Calculate tree basal area and seedling density in woody and open habitats
# Get tree basal area data along transects
basdist <- aggregate(cbind(Distpca, Brkdist) ~ Site, tree, max)
bassum <- aggregate(Bas ~ Site + Habitat, tree, sum)
bassum <- merge(bassum, basdist)
# Get max tree height by habitat
basht <- aggregate(Ht ~ Site + Habitat, tree, max)
bassum <- merge(bassum, basht)
bassum$Area <- ifelse(bassum$Habitat == 'Open', bassum$Brkdist * 20,
(bassum$Distpca - bassum$Brkdist) * 20)
bassum$BA <- bassum$Bas / bassum$Area # Basal area in m2/ha
bassum <- bassum[, -c(3,4,5,7)]
# Compare tree cover with basal area
tchab <- aggregate(TC30 ~ Site + Habitat, tc, mean)
tchab <- merge(tchab, bassum)
# Seedling density per habitat type
seeddist <- aggregate(Distpca ~ Site, seed, max)
# Maximum transect length is max distance from tree or seedling dataset
seeddist <- merge(seeddist, basdist, by = 'Site')
seeddist$Distpca <- ifelse(seeddist$Distpca.x > seeddist$Distpca.y, seeddist$Distpca.x, seeddist$Distpca.y)
seeddist <- seeddist[, -(2:3)]
seedsum <- aggregate(ID ~ Site + Habitat, subset(seed, Seedl_Respr == 'S'), function(x){length(x)})
seedsum <- merge(seedsum, seeddist)
seedsum$Area <- ifelse(seedsum$Habitat == 'Open', seedsum$Brkdist * 3,
(seedsum$Distpca - seedsum$Brkdist) * 3)
names(seedsum)[3] <- 'N'
seedsum$Density <- seedsum$N / seedsum$Area * 1e4 # Density in seedlings / ha
# Tree basal area and seedling density across habitats
# Fill in 0's for seedsum - integrate with bassum
seedsum2 <- seedsum[, c(1,2,7)]
bassum <- merge(bassum, seedsum2, all = TRUE)
bassum$Density <- ifelse(is.na(bassum$Density), 0, bassum$Density)
bassum$BA <- ifelse(is.na(bassum$BA), 0, bassum$BA)
fig2a <- ggplot(bassum, aes(x = Habitat, y = BA)) +
geom_line(aes(group = Site), lwd=1.2) +
labs(x = 'Habitat',
y = expression(paste('Basal area (m'^2,' ha'^-1,')'))) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.title.y = element_text(angle=90, vjust = 2, size = rel(1.2)),
axis.title.x = element_text(size = rel(1.2)),
axis.text = element_text(size = rel(1.2)),
axis.line = element_line(colour="black"),
axis.ticks = element_line(colour="black"),
axis.ticks.length = unit(.25, "cm")) +
annotate("text", x = 1.2, y = 6, label = "P = 0.008", parse = FALSE, size = 4)
fig2b <- ggplot(bassum, aes(x = Habitat, y = Density)) +
geom_line(aes(group = Site), lwd=1.2) +
labs(x = 'Habitat',
y = expression(paste('Saplings ha'^-1))) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.title.y = element_text(angle=90, vjust = 2, size = rel(1.2)),
axis.title.x = element_text(size = rel(1.2)),
axis.text = element_text(size = rel(1.2)),
axis.line = element_line(colour="black"),
axis.ticks = element_line(colour="black"),
axis.ticks.length = unit(.25, "cm")) +
annotate("text", x = 2, y = 55, label = "P = 0.81", parse = FALSE, size = 4)
fig2c <- ggplot(bassum, aes(x = Habitat, y = Ht)) +
geom_line(aes(group = Site), lwd = 1.2) +
ylim(0, 12) +
labs(x = 'Habitat',
y = 'Max. height (m)') +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.title.y = element_text(angle=90, vjust = 2, size = rel(1.2)),
axis.title.x = element_text(size = rel(1.2)),
axis.text = element_text(size = rel(1.2)),
axis.line = element_line(colour="black"),
axis.ticks = element_line(colour="black"),
axis.ticks.length = unit(.25, "cm")) +
annotate("text", x = 1, y = 11.5, label = "P = 0.09", parse = FALSE, size = 4)
fig2 <- plot_grid(fig2a, fig2b, fig2c, labels = "AUTO", align = "v", label_size = 20,
nrow = 1, hjust = 0, vjust = 1, scale = 0.9)
# Stats
bassum <- bassum[order(bassum$Habitat, bassum$Site), ]
BA.test <- t.test(bassum$BA[1:8], bassum$BA[9:16], alternative = "less",
paired = TRUE)
Density.test <- t.test(bassum$Density[1:8], bassum$Density[9:16], alternative = "less",
paired = TRUE)
Height.test <- t.test(bassum$Ht[1:8], bassum$Ht[9:16], alternative = "less",
paired = TRUE)
# Rough calculation of crown area to be expected from current juvenile trees
# moving into the adult class
Seed.mean <- mean(seedsum$Density)
Cr.area.proj <- Seed.mean * Cr.mean # Mean crown projected area (no overlap)
Cr.area.proj / 1e4
# Mean projected crown area for all escaped seedlings is about 0.034
# TC30 as a function of basal area
figS2 <- ggplot(tchab) + geom_point(aes(x = BA, y = TC30, fill = Habitat),
shape = 21, size = 6) +
labs(x = expression(paste('Basal area (m'^2,' ha'^-1,')')),
y = expression(paste('TC'[30]))) +
scale_fill_hue(l=40) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
axis.title.y = element_text(angle=90, vjust = 2, size = rel(2)),
axis.title.x = element_text(vjust = 0, size = rel(2)),
axis.text = element_text(size = rel(2)),
axis.line = element_line(colour="black"),
axis.ticks = element_line(colour="black"),
axis.ticks.length = unit(.25, "cm"),
legend.title = element_text(size = rel(2)),
legend.text = element_text(size = rel(2)))
lpred <- summary(lm(TC30 ~ BA, tchab))
figS2 <- figS2 + geom_abline(slope = coef(lpred)[2], intercept = coef(lpred)[1], lwd = 1.2) +
annotate("text", x = 4, y = 0.2, label = "P = 0.0016", parse = FALSE, size = 8)
# Stats
TC30.BA.mod <- summary(lme(TC30 ~ BA, data = tchab, random = ~ 1 | Site))
anova(TC30.BA.mod)
summary(lm(predict(TC30.BA.mod) ~ tchab$TC30)) # R-squared value
summary(lm(TC30 ~ BA, data = tchab))
# Test also for a relationship between tree basal area and seedling density
tchab <- merge(tchab, seedsum, all = TRUE)
Den.BA.mod <- summary(lme(Density ~ BA, data = tchab,
na.action = 'na.exclude', random = ~ 1 | Site))
anova(Den.BA.mod)
#--------------------------------------------------------------------------
# Combine demography data with tc data
df.ag <- merge(df.ag, tc, by.x = 'Subplot', by.y = 'ID')
# Make binary version of fire/herbivory variables
df.ag$F2019.bin <- ifelse(df.ag$F2019 > 0, 1, 0)
df.ag$H2019.bin <- ifelse(df.ag$H2019 > 0, 1, 0)
# Test for relationship between tree cover and damage/mortality/growth
# Use distance as autoregressive covariate
# Problem with spatial data: two identical spatial coordinates - jitter
duplicated(df.ag[, 14:15])
df.ag$X[398] <- df.ag$X[398] + 1
df.ag$X[694] <- df.ag$X[694] + 1
# Get summary data for growth, topkill etc.
df.ag.tr <- aggregate(cbind(Topkilled, Damage, F2019.bin, H2019.bin, Dead, Grass, deltaH, deltaH.undam) ~
Site, df.ag, function(x){round(mean(x), 3)})
df.ag.mn <- aggregate(cbind(Topkilled, Damage, F2019.bin, H2019.bin, Dead, Grass, deltaH, deltaH.undam) ~
1, df.ag.tr, function(x){round(mean(x), 3)})
df.ag.se <- aggregate(cbind(Topkilled, Damage, F2019.bin, H2019.bin, Dead, Grass, deltaH, deltaH.undam) ~
1, df.ag.tr, function(x){round(sd(x) / sqrt(8), 3)})
df.ag.min <- aggregate(cbind(Topkilled, Damage, F2019.bin, H2019.bin, Dead, Grass, deltaH, deltaH.undam) ~
1, df.ag.tr, function(x){round(min(x), 3)})
df.ag.max <- aggregate(cbind(Topkilled, Damage, F2019.bin, H2019.bin, Dead, Grass, deltaH, deltaH.undam) ~
1, df.ag.tr, function(x){round(max(x), 3)})
df.ag.sum <- rbind(df.ag.mn, df.ag.se, df.ag.min, df.ag.max)
df.ag.sum$Metric <- c("Mean", "SE", "Min", "Max")
df.ag.sum <- df.ag.sum[, c(9,7,8,2,3,4,1,5,6)]
df.ag.sum$Grass <- round(df.ag.sum$Grass)
write.csv(df.ag.sum, "Table_S1.csv", row.names = FALSE)
# Obtain mean damage to fire or herbivore damaged seedlings
df.ag.sub.fi <- subset(df.ag, F2019 > 0)
mean(df.ag.sub.fi$F2019)
sd(df.ag.sub.fi$F2019)
min(df.ag.sub.fi$F2019)
max(df.ag.sub.fi$F2019)
df.ag.sub.he <- subset(df.ag, H2019 > 0)
mean(df.ag.sub.he$H2019)
sd(df.ag.sub.he$H2019)
min(df.ag.sub.he$H2019)
max(df.ag.sub.he$H2019)
rm(df.ag.sub.fi, df.ag.sub.he)
# Count number of height reversals per seedling across surveys
df <- df[order(df$Subplot, df$Day), ]
subplots <- unique(df$Subplot)
for (i in 1:length(subplots)){
sub <- subset(df, Subplot == subplots[i])
hts <- sub$Ht_m
hts <- hts[!is.na(hts)]
htsdf <- data.frame(subplots[i])
names(htsdf) <- 'ID'
htsdf$Reversal <- mean((hts[2:length(hts)] - hts[1:(length(hts) - 1)]) < 0)
if (i == 1) htsdf.all <- htsdf
else htsdf.all <- rbind(htsdf.all, htsdf)
}
mean(htsdf.all$Reversal)
sd(htsdf.all$Reversal)
df$Tran <- factor(sitecode[as.numeric(as.factor(df$Transect))])
# Get mean day per survey period
perday <- aggregate(Day ~ Per + Tran, df, mean)
df.ag.ht <- aggregate(Ht_m ~ Per + Tran, df, mean)
df.ag.ht <- merge(df.ag.ht, perday)
# Find expected height distribution without disturbance and compare with actual
hts <- df[, c(6,1,11)]
hinit <- subset(hts, Per == 'Initial')
hinit$Per <- NULL
hfinal <- subset(hts, Per == 'Jun_2019')
hfinal$Per <- NULL
htgr <- df.ag[, c(1,9)]
hinit <- merge(hinit, hfinal, by = 'Subplot')
names(hinit)[2:3] <- c('H0', 'Hf')
hinit <- merge(hinit, htgr)
hinit$Hf.pred <- hinit$H0 + hinit$deltaH.undam * 3
fig3 <- ggplot(NULL, aes(x = Day, y = Ht_m)) +
geom_line(data = df, aes(group = Subplot)) + facet_wrap(~ Tran) +
geom_line(data = df.ag.ht, col = 'red', lwd = 1.2) +
labs(x = 'Day',
y = 'Height (m)') +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
strip.text = element_text(size = rel(1.2)),
axis.title.y = element_text(angle=90, vjust = 2, size = rel(1.2)),
axis.title.x = element_text(size = rel(1.2)),
axis.text = element_text(size = rel(1)),
axis.line = element_line(colour="black"),
axis.ticks = element_line(colour="black"),
axis.ticks.length = unit(.25, "cm"))
# For compositional analysis, identify species that are present in only
# open or woody habitats
sp.site <- table(df.ag$Species, df.ag$Site)
sp.count <- data.frame(rownames(sp.site))
names(sp.count) <- 'Species'
sp.count$Nsites <- 0
for(i in 1:nrow(sp.site)){
sp.count$Nsites[i] <- sum(sp.site[i,] > 0)
}
# Include only species occurring on 2 or more sites
sp.count <- subset(sp.count, Nsites > 1)
sp.hab <- table(df.ag$Species, df.ag$Habitat)
# All species occurring in two or more sites were present in both habitats
# For Gaussian models, fit spatial lme models with site random effects
# Exponential autocorrelation models
fit.deltaH.exp <- lme(deltaH ~ TC30, data = df.ag,
random = ~ 1 | Site,
corr = corSpatial(form = ~ X + Y, type ="exponential", nugget = F),
method = "ML", na.action = 'na.exclude')
fit.deltaH.undam.exp <- lme(deltaH.undam ~ TC30, data = df.ag,
random = ~ 1 | Site,
corr = corSpatial(form = ~ X + Y, type ="exponential", nugget = F),
method = "ML", na.action = 'na.exclude')
fit.Grass.exp <- lme(Grass ~ TC30, data = df.ag,
random = ~ 1 | Site,
corr = corSpatial(form = ~ X + Y, type ="exponential", nugget = F),
method = "ML", na.action = 'na.exclude')
fit.Topkill.exp <- lme(Topkilled ~ TC30, data = df.ag,
random = ~ 1 | Site,
corr = corSpatial(form = ~ X + Y, type ="exponential", nugget = F),
method = "ML", na.action = 'na.exclude')
fit.Damage.exp <- lme(Damage ~ TC30, data = df.ag,
random = ~ 1 | Site,
corr = corSpatial(form = ~ X + Y, type ="exponential", nugget = F),
method = "ML", na.action = 'na.exclude')
# Spherical autocorrelation models
fit.deltaH.sph <- lme(deltaH ~ TC30, data = df.ag,
random = ~ 1 | Site,
corr = corSpatial(form = ~ X + Y, type ="spherical", nugget = F),
method = "ML", na.action = 'na.exclude')
fit.deltaH.undam.sph <- lme(deltaH.undam ~ TC30, data = df.ag,
random = ~ 1 | Site,
corr = corSpatial(form = ~ X + Y, type ="spherical", nugget = F),
method = "ML", na.action = 'na.exclude')
fit.Grass.sph <- lme(Grass ~ TC30, data = df.ag,
random = ~ 1 | Site,
corr = corSpatial(form = ~ X + Y, type ="spherical", nugget = F),
method = "ML", na.action = 'na.exclude')
fit.Topkill.sph <- lme(Topkilled ~ TC30, data = df.ag,
random = ~ 1 | Site,
corr = corSpatial(form = ~ X + Y, type ="spherical", nugget = F),
method = "ML", na.action = 'na.exclude')
fit.Damage.sph <- lme(Damage ~ TC30, data = df.ag,
random = ~ 1 | Site,
corr = corSpatial(form = ~ X + Y, type ="spherical", nugget = F),
method = "ML", na.action = 'na.exclude')
AIC(fit.deltaH.exp, fit.deltaH.sph)
AIC(fit.deltaH.undam.exp, fit.deltaH.undam.sph)
AIC(fit.Grass.exp, fit.Grass.sph)
AIC(fit.Topkill.exp, fit.Topkill.sph)
AIC(fit.Damage.exp, fit.Damage.sph)
# Little difference - exponential models generally fit better
summary(fit.deltaH.exp)
summary(fit.deltaH.undam.exp)
summary(fit.Grass.exp)
summary(fit.Topkill.exp)
summary(fit.Damage.exp)
# Test whether the addition of a quadratic term improves growth model
# Transform from proportion to percentage for convergence
fit.deltaH.undam.exp.lin <- lme(deltaH.undam ~ I(TC30 * 100),
data = df.ag,
random = ~ 1 | Site,
corr = corSpatial(form = ~ X + Y, type ="exponential", nugget = F),
method = "ML", na.action = 'na.exclude')
fit.deltaH.undam.exp.quad <- lme(deltaH.undam ~ I(TC30 * 100) + I((TC30 * 100)^2),
data = df.ag,
random = ~ 1 | Site,
corr = corSpatial(form = ~ X + Y, type ="exponential", nugget = F),
method = "ML", na.action = 'na.exclude')
anova(fit.deltaH.undam.exp.lin, fit.deltaH.undam.exp.quad)
summary(fit.deltaH.undam.exp.quad)
# No support for a quadratic term
# Re-examine growth patterns for more common species
df.acator <- subset(df.ag, Species == 'ACATOR')
df.comafr <- subset(df.ag, Species == 'COMAFR')
fit.deltaH.undam.acator <- lme(deltaH.undam ~ TC30, data = df.acator,
random = ~ 1 | Site,
corr = corSpatial(form = ~ X + Y, type ="exponential", nugget = F),
method = "ML", na.action = 'na.exclude')
fit.deltaH.undam.comafr <- lme(deltaH.undam ~ TC30, data = df.comafr,
random = ~ 1 | Site,
corr = corSpatial(form = ~ X + Y, type ="exponential", nugget = F),
method = "ML", na.action = 'na.exclude')
# Results hold for the two most common species
shapes <- c(0, 1, 2, 3, 4, 15, 16, 17)
df.ag$Transect <- factor(sitecode[as.numeric(as.factor(df.ag$Site))])
# Plots
fig4a <- ggplot(df.ag) + geom_point(aes(x = TC30, y = deltaH.undam, shape = Transect),
size = 2) +
labs(x = expression(paste('TC'[30])),
y = expression(paste('Growth (m y'^-1,')'))) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
axis.title.y = element_text(angle=90, vjust = 2, size = rel(0.8)),
axis.title.x = element_text(vjust = 0, size = rel(0.8)),
axis.text = element_text(size = rel(0.8)),
axis.line = element_line(colour="black"),
axis.ticks = element_line(colour="black"),
axis.ticks.length = unit(.25, "cm"),
legend.title = element_text(size = rel(0.8)),
legend.text = element_text(size = rel(0.8))) +
scale_shape_manual(labels = sitecode, values = shapes) +
guides(shape = guide_legend(override.aes = list(size=2)), size = "none")
df.ag$Pred <- fitted(fit.deltaH.undam.exp)
lpred <- summary(lm(Pred ~ TC30, df.ag))
fig4a <- fig4a + geom_abline(slope = coef(lpred)[2], intercept = coef(lpred)[1],
col = 'red', size = 1.5) +
annotate("text", x = 0.4, y = -0.25, label = "P = 0.0002", parse = FALSE, size = 4)
fig4b <- ggplot(df.ag) + geom_point(aes(x = TC30, y = Grass, shape = Transect),
size = 2) +
labs(x = expression(paste('TC'[30])),
y = expression(paste('Grass biomass (g m'^'-2',')'))) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
axis.title.y = element_text(angle=90, vjust = 2, size = rel(0.8)),
axis.title.x = element_text(vjust = 0, size = rel(0.8)),
axis.text = element_text(size = rel(0.8)),
axis.line = element_line(colour="black"),
axis.ticks = element_line(colour="black"),
axis.ticks.length = unit(.25, "cm"),
legend.title = element_text(size = rel(0.8)),
legend.text = element_text(size = rel(0.8))) +
scale_shape_manual(labels = sitecode, values = shapes) +
guides(shape = guide_legend(override.aes = list(size=2)), size = "none")
df.ag$Pred <- fitted(fit.Grass.exp)
lpred <- summary(lm(Pred ~ TC30, df.ag))
fig4b <- fig4b + geom_abline(slope = coef(lpred)[2], intercept = coef(lpred)[1],
col = 'red', size = 1.5) +
annotate("text", x = 0.45, y = 1000, label = "P = 0.22", parse = FALSE, size = 4)
fig4c <- ggplot(df.ag) + geom_point(aes(x = TC30, y = Topkilled, shape = Transect),
size = 2) +
labs(x = expression(paste('TC'[30])),
y = expression(paste('Topkill (y'^'-1',')'))) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
axis.title.y = element_text(angle=90, vjust = 2, size = rel(0.8)),
axis.title.x = element_text(vjust = 0, size = rel(0.8)),
axis.text = element_text(size = rel(0.8)),
axis.line = element_line(colour="black"),
axis.ticks = element_line(colour="black"),
axis.ticks.length = unit(.25, "cm"),
legend.title = element_text(size = rel(0.8)),
legend.text = element_text(size = rel(0.8))) +
scale_shape_manual(labels = sitecode, values = shapes) +
guides(shape = guide_legend(override.aes = list(size=2)), size = "none")
df.ag$Pred <- fitted(fit.Topkill.exp)
lpred <- summary(lm(Pred ~ TC30, df.ag))
fig4c <- fig4c + geom_abline(slope = coef(lpred)[2], intercept = coef(lpred)[1],
col = 'red', size = 1.5) +
annotate("text", x = 0.4, y = 0.9, label = "P = 0.60", parse = FALSE, size = 4)
fig4d <- ggplot(df.ag) + geom_point(aes(x = TC30, y = Damage, shape = Transect),
size = 2) +
labs(x = expression(paste('TC'[30])),
y = expression(paste('Damage (y'^'-1',')'))) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
axis.title.y = element_text(angle=90, vjust = 2, size = rel(0.8)),
axis.title.x = element_text(vjust = 0, size = rel(0.8)),
axis.text = element_text(size = rel(0.8)),
axis.line = element_line(colour="black"),
axis.ticks = element_line(colour="black"),
axis.ticks.length = unit(.25, "cm"),
legend.title = element_text(size = rel(0.8)),
legend.text = element_text(size = rel(0.8))) +
scale_shape_manual(labels = sitecode, values = shapes) +
guides(shape = guide_legend(override.aes = list(size=2)), size = "none")
df.ag$Pred <- fitted(fit.Damage.exp)
lpred <- summary(lm(Pred ~ TC30, df.ag))
fig4d <- fig4d + geom_abline(slope = coef(lpred)[2], intercept = coef(lpred)[1],
col = 'red', size = 1.5) +
annotate("text", x = 0.45, y = 0.9, label = "P = 0.76", parse = FALSE, size = 4)
prow <- plot_grid(
fig4a + theme(legend.position="none"),
fig4b + theme(legend.position="none"),
fig4c + theme(legend.position="none"),
fig4d + theme(legend.position="none"),
align = 'vh',
labels = c("A", "B", "C", "D"),
hjust = -1,
nrow = 2
)
legend <- get_legend(
# Create some space to the left of the legend
fig4a + theme(legend.box.margin = margin(0, 0, 0, 12))
)
fig4 <- plot_grid(prow, legend, rel_widths = c(3, .4), scale = 0.9)
# For logistic models, fit glms and autologistic models and obtain slopes by site
Site <- unique(df.ag$Site)
regs <- data.frame(Site)
Sites <- Site
regs$Fi.slope <- NA
regs$Fi.slope.se <- NA
regs$Fi.slope.auto <- NA
regs$Fi.slope.se.auto <- NA
regs$Fi.slope.pval.auto <- NA
regs$He.slope <- NA
regs$He.slope.se <- NA
regs$He.slope.auto <- NA
regs$He.slope.se.auto <- NA
regs$He.slope.pval.auto <- NA
regs$Mo.slope <- NA
regs$Mo.slope.se <- NA
regs$Mo.slope.auto <- NA
regs$Mo.slope.se.auto <- NA
regs$Mo.slope.pval.auto <- NA
df.ag$Dist2 <- rnorm(nrow(df.ag), df.ag$Distpca, 0.01)
for (i in 1:length(Site)){
sub <- subset(df.ag, Site == Sites[i])
sub <- subset(sub, !is.na(F2019))
# Conduct both logistic and autologistic regressions
coordinates(sub) <- ~ X + Y
if (sum(sub$F2019.bin) > 0){
# Fire logistic
mod.Fi <- glm(F2019.bin ~ TC30, sub, family = binomial)
regs$Fi.slope[i] <- coef(summary(mod.Fi))["TC30", "Estimate"]
regs$Fi.slope.se[i] <- coef(summary(mod.Fi))["TC30", "Std. Error"]
sub$Pred.Fi <- fitted(mod.Fi)
# Fire autologistic
mod.Fi.auto <- logistic.regression(sub, y = 'F2019.bin', x = 'TC30', autologistic=TRUE,
coords = coordinates(sub))
regs$Fi.slope.auto[i] <- mod.Fi.auto$coefTable[2, "Coef"]
regs$Fi.slope.se.auto[i] <- mod.Fi.auto$coefTable[2, "StdError"]
regs$Fi.slope.pval.auto[i] <- mod.Fi.auto$coefTable[2, "Prob"]
} else {
sub$Pred.Fi <- NA
}
if (sum(sub$H2019.bin) > 0){
# Herbivory logistic
mod.He <- glm(H2019.bin ~ TC30, sub, family = binomial)
regs$He.slope[i] <- coef(summary(mod.He))["TC30", "Estimate"]
regs$He.slope.se[i] <- coef(summary(mod.He))["TC30", "Std. Error"]
sub$Pred.He <- fitted(mod.He)
# Herbivory autologistic
mod.He.auto <- logistic.regression(sub, y = 'H2019.bin', x = 'TC30', autologistic=TRUE,
coords = coordinates(sub))
regs$He.slope.auto[i] <- mod.He.auto$coefTable[2, "Coef"]
regs$He.slope.se.auto[i] <- mod.He.auto$coefTable[2, "StdError"]
regs$He.slope.pval.auto[i] <- mod.He.auto$coefTable[2, "Prob"]
} else {
sub$Pred.He <- NA
}
if (sum(sub$Dead) > 0){
# Mortality logistic
mod.Mo <- glm(Dead ~ TC30, sub, family = binomial)
regs$Mo.slope[i] <- coef(summary(mod.Mo))["TC30", "Estimate"]
regs$Mo.slope.se[i] <- coef(summary(mod.Mo))["TC30", "Std. Error"]
sub$Pred.Mo <- fitted(mod.Mo)
# Mortality autologistic
mod.Mo.auto <- logistic.regression(sub, y = 'Dead', x = 'TC30', autologistic=TRUE,
coords = coordinates(sub))
regs$Mo.slope.auto[i] <- mod.Mo.auto$coefTable[2, "Coef"]
regs$Mo.slope.se.auto[i] <- mod.Mo.auto$coefTable[2, "StdError"]
regs$Mo.slope.pval.auto[i] <- mod.Mo.auto$coefTable[2, "Prob"]
} else {
sub$Pred.Mo <- NA
}
if (i == 1) df.ag2 <- sub
else df.ag2 <- rbind(df.ag2, sub)
}
df.ag2$Dist2 <- NULL
summary(lm(Fi.slope ~ 1, regs))
summary(lm(Fi.slope.auto ~ 1, regs))
summary(lm(He.slope ~ 1, regs))
summary(lm(He.slope.auto ~ 1, regs))
summary(lm(Mo.slope ~ 1, regs))
summary(lm(Mo.slope.auto ~ 1, regs))
tableS2 <- regs
tableS2[, 2:16] <- round(tableS2[, 2:16], 3)
write.csv(tableS2, "Table_S2.csv", row.names = FALSE)
# Mortality declines with tree cover
# Is mortality related to damage?
# Simplest analysis: break down dead and alive seedlings by transect
# and damage proportion - use paired t-test
df.mort <- aggregate(Damage ~ Dead + Site, df.ag, mean)
df.mort <- df.mort[order(df.mort$Dead, df.mort$Site), ]
Mort.test <- t.test(df.mort$Damage[1:8], df.mort$Damage[9:16], alternative = "less",
paired = TRUE)
# Plot
df.mort$Dead <- ifelse(df.mort$Dead == 0, 'Alive', 'Dead')
fig6 <- ggplot(df.mort, aes(x = Dead, y = Damage)) +
geom_line(aes(group = Site), lwd = 2) +
labs(x = '',
y = expression(paste('Damage (y'^'-1',')'))) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.title.y = element_text(angle=90, vjust = 2, size = rel(2)),
axis.title.x = element_text(size = rel(2)),
axis.text = element_text(size = rel(2)),
axis.line = element_line(colour="black"),
axis.ticks = element_line(colour="black"),
axis.ticks.length = unit(.25, "cm")) +
annotate("text", x = 1.5, y = 0.9, label = "P = 0.0002", parse = FALSE, size = 8)
# Fit GLMM models for fire, herbivory and mortality
fire.glm <- glm(F2019.bin ~ TC30, df.ag2, na.action = "na.exclude", family = binomial)
herb.glm <- glm(H2019.bin ~ TC30, df.ag2, na.action = "na.exclude", family = binomial)
mort.glm <- glm(Dead ~ TC30, df.ag2, na.action = "na.exclude", family = binomial)
# These models ignore fine-scale spatial autocorrelation and site
# random effects and are therefore approximations
# They serve to show aggregate trends across transects
# Plots
df.ag2$Transect <- factor(sitecode[as.numeric(as.factor(df.ag2$Site))])
df.ag2 <- as.data.frame(df.ag2)
# Add overall predictions to dataframe
df.ag2$Pred.Fi.all <- fitted(fire.glm)
df.ag2$Pred.He.all <- fitted(herb.glm)
df.ag2$Pred.Mo.all <- fitted(mort.glm)
fig5a <- ggplot(data = df.ag2) + geom_line(aes(x = TC30, y = Pred.Fi, color = Transect),
size = 1.5) +
geom_line(aes(x = TC30, y = Pred.Fi.all), size = 1.5) +
labs(x = expression(paste('TC'[30])),
y = expression(paste('2019 fire damage (y'^'-1',')'))) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
axis.title.y = element_text(angle=90, vjust = 2, size = rel(0.8)),
axis.title.x = element_text(vjust = 0, size = rel(0.8)),
axis.text = element_text(size = rel(0.8)),
axis.line = element_line(colour="black"),
axis.ticks = element_line(colour="black"),
axis.ticks.length = unit(.25, "cm"),
legend.title = element_text(size = rel(0.8)),
legend.text = element_text(size = rel(0.8))) +
scale_shape_manual(labels = sitecode, values = shapes) +
guides(color = guide_legend(override.aes = list(size = 1.5)), size = "none") +
annotate("text", x = 0.15, y = 0.95, label = "P = 0.37", parse = FALSE, size = 4)
fig5b <- ggplot(data = df.ag2) + geom_line(aes(x = TC30, y = Pred.He, color = Transect),
size = 1.5) +
geom_line(aes(x = TC30, y = Pred.He.all), size = 1.5) +
labs(x = expression(paste('TC'[30])),
y = expression(paste('2019 herbivore damage (y'^'-1',')'))) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
axis.title.y = element_text(angle=90, vjust = 2, size = rel(0.8)),
axis.title.x = element_text(vjust = 0, size = rel(0.8)),
axis.text = element_text(size = rel(0.8)),
axis.line = element_line(colour="black"),
axis.ticks = element_line(colour="black"),
axis.ticks.length = unit(.25, "cm"),
legend.title = element_text(size = rel(0.8)),
legend.text = element_text(size = rel(0.8))) +
scale_shape_manual(labels = sitecode, values = shapes) +
guides(shape = guide_legend(override.aes = list(size = 1.5)), size = "none") +
annotate("text", x = 0.35, y = 0.95, label = "P = 0.32", parse = FALSE, size = 4)
fig5c <- ggplot(data = df.ag2) + geom_line(aes(x = TC30, y = Pred.Mo, color = Transect),
size = 1.5) +
geom_line(aes(x = TC30, y = Pred.Mo.all), size = 1.5) +
labs(x = expression(paste('TC'[30])),
y = expression(paste('2017-19 mortality'))) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
axis.title.y = element_text(angle=90, vjust = 2, size = rel(0.8)),
axis.title.x = element_text(vjust = 0, size = rel(0.8)),
axis.text = element_text(size = rel(0.8)),
axis.line = element_line(colour="black"),
axis.ticks = element_line(colour="black"),
axis.ticks.length = unit(.25, "cm"),
legend.title = element_text(size = rel(0.8)),
legend.text = element_text(size = rel(0.8))) +
scale_shape_manual(labels = sitecode, values = shapes) +
guides(shape = guide_legend(override.aes = list(size = 1.5)), size = "none") +
annotate("text", x = 0.35, y = 0.4, label = "P = 0.017", parse = FALSE, size = 4)
prow <- plot_grid(
fig5a + theme(legend.position="none"),
fig5b + theme(legend.position="none"),
fig5c + theme(legend.position="none"),
align = 'vh',
labels = c("A", "B", "C"),
hjust = -1,
nrow = 1
)
legend <- get_legend(
# create some space to the left of the legend
fig5a + theme(legend.box.margin = margin(0, 0, 0, 12))
)
fig5 <- plot_grid(prow, legend, rel_widths = c(3, .4))
# Examine microclimate data in relation to tree cover
micro <- read.csv('Daily_microclimate_data.csv')
micro.ag <- aggregate(cbind(VWCmean, Tmean, Tmax, Tmin, PARmean, PARmax) ~ ID, micro, mean)
micro.ag <- merge(micro.ag, tc, by = 'ID')
# Site random effects - intercept
mod.PARmean <- lme(PARmean ~ TC30, data = micro.ag, control = list(opt = "optim"),
random = ~ 1 | Site)
mod.PARmax <- lme(PARmax ~ TC30, data = micro.ag, control = list(opt = "optim"),
random = ~ 1 | Site)
mod.VWCmean <- lme(VWCmean ~ TC30, data = micro.ag, control = list(opt = "optim"),
random = ~ 1 | Site)
mod.Tmean <- lme(Tmean ~ TC30, data = micro.ag, control = list(opt = "optim"),
random = ~ 1 | Site)
mod.Tmax <- lme(Tmax ~ TC30, data = micro.ag, control = list(opt = "optim"),
random = ~ 1 | Site)
mod.Tmin <- lme(Tmin ~ TC30, data = micro.ag, control = list(opt = "optim"),
random = ~ 1 | Site)
anova(mod.PARmean)
anova(mod.PARmax)
anova(mod.VWCmean)
anova(mod.Tmean)
anova(mod.Tmax)
anova(mod.Tmin)
# Figs
micro.ag$Transect <- factor(sitecode[as.numeric(as.factor(micro.ag$Site))])
fig7a <- ggplot(micro.ag) + geom_point(aes(x = TC30, y = PARmean, shape = Transect), size = 2) +
labs(x = expression(paste('TC'[30])),
y = expression(paste('Mean PAR (', mu, 'mol m'^-2,' s'^-1,')'))) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
axis.title.y = element_text(angle=90, vjust = 2, size = rel(0.8)),
axis.title.x = element_text(vjust = 0, size = rel(0.8)),
axis.text = element_text(size = rel(0.8)),
axis.line = element_line(colour="black"),
axis.ticks = element_line(colour="black"),
axis.ticks.length = unit(.25, "cm"),
legend.title = element_text(size = rel(0.8)),
legend.text = element_text(size = rel(0.8))) +
scale_shape_manual(labels = sitecode, values = shapes) +
guides(shape = guide_legend(override.aes = list(size=2)), size = "none")
micro.ag$Pred <- fitted(mod.PARmean)
lpred <- summary(lm(Pred ~ TC30, micro.ag))
fig7a <- fig7a + geom_abline(slope = coef(lpred)[2], intercept = coef(lpred)[1],
col = 'red', size = 1.5) +
annotate("text", x = 0.3, y = 900, label = "P = 0.039", parse = FALSE, size = 4)
fig7b <- ggplot(micro.ag) + geom_point(aes(x = TC30, y = PARmax, shape = Transect), size = 2) +
labs(x = expression(paste('TC'[30])),
y = expression(paste('Max. PAR (', mu, 'mol m'^-2,' s'^-1,')'))) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
axis.title.y = element_text(angle=90, vjust = 2, size = rel(0.8)),
axis.title.x = element_text(vjust = 0, size = rel(0.8)),
axis.text = element_text(size = rel(0.8)),
axis.line = element_line(colour="black"),
axis.ticks = element_line(colour="black"),
axis.ticks.length = unit(.25, "cm"),
legend.title = element_text(size = rel(0.8)),
legend.text = element_text(size = rel(0.8))) +
scale_shape_manual(labels = sitecode, values = shapes) +
guides(shape = guide_legend(override.aes = list(size=2)), size = "none")
micro.ag$Pred <- fitted(mod.PARmax)
lpred <- summary(lm(Pred ~ TC30, micro.ag))
fig7b <- fig7b + geom_abline(slope = coef(lpred)[2], intercept = coef(lpred)[1],
col = 'red', size = 1.5) +
annotate("text", x = 0.3, y = 1800, label = "P = 0.071", parse = FALSE, size = 4)
fig7c <- ggplot(micro.ag) + geom_point(aes(x = TC30, y = VWCmean, shape = Transect), size = 2) +
labs(x = expression(paste('TC'[30])),
y = expression(paste('Mean VWC (cm'^3,' cm'^-3,')'))) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
axis.title.y = element_text(angle=90, vjust = 2, size = rel(0.8)),
axis.title.x = element_text(vjust = 0, size = rel(0.8)),
axis.text = element_text(size = rel(0.8)),
axis.line = element_line(colour="black"),
axis.ticks = element_line(colour="black"),
axis.ticks.length = unit(.25, "cm"),
legend.title = element_text(size = rel(0.8)),
legend.text = element_text(size = rel(0.8))) +
scale_shape_manual(labels = sitecode, values = shapes) +
guides(shape = guide_legend(override.aes = list(size=2)), size = "none")
micro.ag$Pred <- fitted(mod.VWCmean)
lpred <- summary(lm(Pred ~ TC30, micro.ag))
fig7c <- fig7c + geom_abline(slope = coef(lpred)[2], intercept = coef(lpred)[1],
col = 'red', size = 1.5) +
annotate("text", x = 0.3, y = 40, label = "P = 0.0069", parse = FALSE, size = 4)
fig7d <- ggplot(micro.ag) + geom_point(aes(x = TC30, y = Tmean, shape = Transect), size = 2) +
labs(x = expression(paste('TC'[30])),
y = expression(paste('Mean T (', degree, 'C)'))) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
axis.title.y = element_text(angle=90, vjust = 2, size = rel(0.8)),
axis.title.x = element_text(vjust = 0, size = rel(0.8)),
axis.text = element_text(size = rel(0.8)),
axis.line = element_line(colour="black"),
axis.ticks = element_line(colour="black"),
axis.ticks.length = unit(.25, "cm"),
legend.title = element_text(size = rel(0.8)),
legend.text = element_text(size = rel(0.8))) +
scale_shape_manual(labels = sitecode, values = shapes) +
guides(shape = guide_legend(override.aes = list(size=2)), size = "none")
micro.ag$Pred <- fitted(mod.Tmean)
lpred <- summary(lm(Pred ~ TC30, micro.ag))
fig7d <- fig7d + geom_abline(slope = coef(lpred)[2], intercept = coef(lpred)[1],
col = 'red', size = 1.5) +
annotate("text", x = 0.3, y = 45, label = "P = 0.58", parse = FALSE, size = 4)
fig7e <- ggplot(micro.ag) + geom_point(aes(x = TC30, y = Tmin, shape = Transect), size = 2) +
labs(x = expression(paste('TC'[30])),
y = expression(paste('Min. T (', degree, 'C)'))) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
axis.title.y = element_text(angle=90, vjust = 2, size = rel(0.8)),
axis.title.x = element_text(vjust = 0, size = rel(0.8)),
axis.text = element_text(size = rel(0.8)),
axis.line = element_line(colour="black"),
axis.ticks = element_line(colour="black"),
axis.ticks.length = unit(.25, "cm"),
legend.title = element_text(size = rel(0.8)),
legend.text = element_text(size = rel(0.8))) +
scale_shape_manual(labels = sitecode, values = shapes) +
guides(shape = guide_legend(override.aes = list(size=2)), size = "none")
micro.ag$Pred <- fitted(mod.Tmin)
lpred <- summary(lm(Pred ~ TC30, micro.ag))
fig7e <- fig7e + geom_abline(slope = coef(lpred)[2], intercept = coef(lpred)[1],
col = 'red', size = 1.5) +
annotate("text", x = 0.3, y = 35, label = "P = 0.25", parse = FALSE, size = 4)
fig7f <- ggplot(micro.ag) + geom_point(aes(x = TC30, y = Tmax, shape = Transect), size = 2) +
labs(x = expression(paste('TC'[30])),
y = expression(paste('Max. T (', degree, 'C)'))) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
axis.title.y = element_text(angle=90, vjust = 2, size = rel(0.8)),
axis.title.x = element_text(vjust = 0, size = rel(0.8)),
axis.text = element_text(size = rel(0.8)),
axis.line = element_line(colour="black"),
axis.ticks = element_line(colour="black"),
axis.ticks.length = unit(.25, "cm"),
legend.title = element_text(size = rel(0.8)),
legend.text = element_text(size = rel(0.8))) +
scale_shape_manual(labels = sitecode, values = shapes) +
guides(shape = guide_legend(override.aes = list(size=2)), size = "none")
micro.ag$Pred <- fitted(mod.Tmax)
lpred <- summary(lm(Pred ~ TC30, micro.ag))
fig7f <- fig7f + geom_abline(slope = coef(lpred)[2], intercept = coef(lpred)[1],
col = 'red', size = 1.5) +
annotate("text", x = 0.3, y = 62, label = "P = 0.86", parse = FALSE, size = 4)
prow <- plot_grid(
fig7a + theme(legend.position="none"),
fig7b + theme(legend.position="none"),
fig7c + theme(legend.position="none"),
fig7d + theme(legend.position="none"),
fig7e + theme(legend.position="none"),
fig7f + theme(legend.position="none"),
align = 'vh',
labels = 'AUTO',
hjust = -1,
nrow = 2
)
legend <- get_legend(
# create some space to the left of the legend
fig7a + theme(legend.box.margin = margin(0, 0, 0, 12))
)
fig7 <- plot_grid(prow, legend, rel_widths = c(3, .4))
# Simulation model
# First use the original dataset
# Deterministic growth and stochastic disturbance
# Recalculate parameters using linear models
# Growth and mortality are related to TC
# Bootstrapped parameter estimates
df.sim <- df.ag[, c(2,3,6,8,9,12,18,19)]
ITER <- 100 # Bootstrapped iterations
# Bootstrapped coefficients
# Growth and mortality
gr <- matrix(nrow = ITER, ncol = 2)
gr.u <- matrix(nrow = ITER, ncol = 2)
gr.max <- matrix(nrow = ITER, ncol = 2)
gr.u.max <- matrix(nrow = ITER, ncol = 2)
mo <- matrix(nrow = ITER, ncol = 3)
gr.alt <- numeric(ITER) # Alternative parameterization where growth is unrelated to TC
gr.u.alt <- numeric(ITER)
mo.alt <- numeric(ITER)
# Damage and topkill
he <- numeric(ITER)
fi <- numeric(ITER)
tk <- numeric(ITER)
he.alt <- matrix(nrow = ITER, ncol = 2)
fi.alt <- matrix(nrow = ITER, ncol = 2)
tk.alt <- matrix(nrow = ITER, ncol = 2)
for (i in 1:ITER){
df.boot <- df.sim[sample(nrow(df.sim), replace = TRUE), ]
# Growth and mortality vary with TC30
sim.gr <- lm(deltaH ~ TC30, df.boot, na.action = "na.omit")
sim.gr.undam <- lm(deltaH.undam ~ TC30, df.boot, na.action = "na.omit")
sim.mreg <- glm(Dead ~ TC30 + Damage, df.boot, na.action = "na.omit", family = binomial)
sim.gr.max <- rq(deltaH ~ TC30, tau = 0.95, data = df.boot)
sim.gr.u.max <- rq(deltaH.undam ~ TC30, tau = 0.95, data = df.boot)
gr[i, ] <- coef(sim.gr)
gr.u[i, ] <- coef(sim.gr.undam)
mo[i, ] <- coef(sim.mreg)
gr.max[i, ] <- coef(sim.gr.max)
gr.u.max[i, ] <- coef(sim.gr.u.max)
# Disturbance does not
he[i] <- mean(df.boot$H2019.bin, na.rm = TRUE)
fi[i] <- mean(df.boot$F2019.bin, na.rm = TRUE)
tk[i] <- mean(df.boot$Topkilled)
# Now allow disturbance/topkill to vary with TC30, but not growth
# Growth and mortality
gr.alt[i] <- mean(df.boot$deltaH, na.rm = TRUE)
gr.u.alt[i] <- mean(df.boot$deltaH.undam, na.rm = TRUE)
mo.alt[i] <- mean(df.boot$Dead, na.rm = TRUE)
# Disturbance
hreg <- glm(H2019.bin ~ TC30, df.boot, family = binomial)
he.alt[i, ] <- coef(hreg)
freg <- glm(F2019.bin ~ TC30, df.boot, family = binomial)
fi.alt[i, ] <- coef(freg)
treg <- lm(Topkilled ~ TC30, df.boot)
tk.alt[i, ] <- coef(treg)
}
N <- 1e4 # 1000 individuals per 10 tree cover bins
f <- c(0.1, 0.5, 1, 1.5) # Proportional disturbance reduction
# Calculate bootstrapped escape proportions
# Scenario 1: Growth varies across habitats, disturbance does not
# This is analogous to the default scenario supported by the analysis
for (s in 1:4){
for (i in 1:ITER){
H <- numeric(N)
sim <- data.frame(H)
sim$TC <- rep(seq(from = 0, to = 0.45, by = 0.05), each = 1000)
sim$Dam <- 0
sim$Esc <- 0
# Cycle through 1000 time steps
for (t in 1:1000){
sim$H <- ifelse(sim$Dam == 0, sim$H + gr.u[i,1] + gr.u[i,2] * sim$TC,
sim$H + gr[i,1] + gr[i,2] * sim$TC)
sim$Dam <- rbinom(N, size = 1, prob = he[i] * f[s]) +
rbinom(N, size = 1, prob = fi[i] * f[s])
sim$Dam <- ifelse(sim$Dam == 2, 1, sim$Dam)
topkilled <- rbinom(N, size = 1, prob = tk[i] * f[s])
sim$H <- ifelse(topkilled == 1 & sim$Esc == 0, 0, sim$H)
# Probability of mortality (depends on TC and damage)
lp.m <- mo[i,1] + mo[i,2] * sim$TC + mo[i,3] * sim$Dam # Linear predictor for inverse logit
prob.m.3y <- exp(lp.m) / (exp(lp.m) + 1) # Three-year probability of mortality - annualize
prob.m.1y <- 1 - (1 - prob.m.3y) ^ (1/3)
Dead <- rbinom(N, size = 1, prob = prob.m.1y)
# Assume dead trees are removed and replaced with new seedlings on height 0
sim$H <- ifelse(Dead == 1 & sim$Esc == 0, 0, sim$H)
sim$Esc <- ifelse(sim$H > 2, 1, 0)
}
# Calculate proportion escaped for each TC class
sim.ag <- aggregate(Esc ~ TC, sim, mean)
sim.ag$f <- f[s]
sim.ag$ITER <- i
if (s == 1 & i == 1) sim1 <- sim.ag
else sim1 <- rbind(sim1, sim.ag)
cat(s, i, '\n')
}
}
sim1$Scen <- "Scenario 1"
# Scenario 2: Growth is spatial, all else nonspatial
for (s in 1:4){
for (i in 1:ITER){
H <- numeric(N)
sim <- data.frame(H)
sim$TC <- rep(seq(from = 0, to = 0.45, by = 0.05), each = 1000)
sim$Dam <- 0
sim$Esc <- 0
# Cycle through 1000 time steps
for (t in 1:1000){
sim$H <- ifelse(sim$Dam == 0, sim$H + gr.u[i,1] + gr.u[i,2] * sim$TC,
sim$H + gr[i,1] + gr[i,2] * sim$TC)
sim$Dam <- rbinom(N, size = 1, prob = he[i] * f[s]) +
rbinom(N, size = 1, prob = fi[i] * f[s])
sim$Dam <- ifelse(sim$Dam == 2, 1, sim$Dam)
topkilled <- rbinom(N, size = 1, prob = tk[i] * f[s])
sim$H <- ifelse(topkilled == 1 & sim$Esc == 0, 0, sim$H)
# Probability of mortality is fixed
prob.m.3y <- mo.alt[i] # Three-year probability of mortality - annualize
prob.m.1y <- 1 - (1 - prob.m.3y) ^ (1/3)
Dead <- rbinom(N, size = 1, prob = prob.m.1y)
# Assume dead trees are removed and replaced with new seedlings on height 0
sim$H <- ifelse(Dead == 1 & sim$Esc == 0, 0, sim$H)
sim$Esc <- ifelse(sim$H > 2, 1, 0)
}
# Calculate proportion escaped for each TC class
sim.ag <- aggregate(Esc ~ TC, sim, mean)
sim.ag$f <- f[s]
sim.ag$ITER <- i
if (s == 1 & i == 1) sim2 <- sim.ag
else sim2 <- rbind(sim2, sim.ag)
cat(s, i, '\n')
}
}
sim2$Scen <- "Scenario 2"
# Scenario 3: Growth is non-spatial, all else is spatial
for (s in 1:4){
for (i in 1:ITER){
H <- numeric(N)
sim <- data.frame(H)
sim$TC <- rep(seq(from = 0, to = 0.45, by = 0.05), each = 1000)
sim$Dam <- 0
sim$Esc <- 0
# Cycle through 1000 time steps
for (t in 1:1000){
sim$H <- ifelse(sim$Dam == 0, sim$H + gr.u.alt[i],
sim$H + gr.alt[i])
# Probability of herbivory
lp.h <- he.alt[i,1] + he.alt[i,2] * sim$TC # Linear predictor for inverse logit
prob.h <- exp(lp.h) / (exp(lp.h) + 1) * f[s]
# Probability of fire
lp.f <- fi.alt[i,1] + fi.alt[i,2] * sim$TC # Linear predictor for inverse logit
prob.f <- exp(lp.f) / (exp(lp.f) + 1) * f[s]
# Probability of damage
sim$Dam <- rbinom(N, size = 1, prob.h) + rbinom(N, size = 1, prob.f)
sim$Dam <- ifelse(sim$Dam == 2, 1, sim$Dam)
# Probability pf topkill
prob.t <- tk.alt[i,1] + tk.alt[i,2] * sim$TC
topkilled <- rbinom(N, size = 1, prob = prob.t * f[s])
sim$H <- ifelse(topkilled == 1 & sim$Esc == 0, 0, sim$H)
# Probability of mortality (depends on TC and damage)
lp.m <- mo[i,1] + mo[i,2] * sim$TC + mo[i,3] * sim$Dam # Linear predictor for inverse logit
prob.m.3y <- exp(lp.m) / (exp(lp.m) + 1) # Three-year probability of mortality - annualize
prob.m.1y <- 1 - (1 - prob.m.3y) ^ (1/3)
Dead <- rbinom(N, size = 1, prob = prob.m.1y)
# Assume dead trees are removed and replaced with new seedlings on height 0
sim$H <- ifelse(Dead == 1 & sim$Esc == 0, 0, sim$H)
sim$Esc <- ifelse(sim$H > 2, 1, 0)
}
# Calculate proportion escaped for each TC class
sim.ag <- aggregate(Esc ~ TC, sim, mean)
sim.ag$f <- f[s]
sim.ag$ITER <- i
if (s == 1 & i == 1) sim3 <- sim.ag
else sim3 <- rbind(sim3, sim.ag)
cat(s, i, '\n')
}
}
sim3$Scen <- "Scenario 3"
sim.all <- rbind(sim1, sim2)
sim.all <- rbind(sim.all, sim3)
sim.all.ag <- aggregate(Esc ~ Scen + f + TC, sim.all, mean)
sim.all.ag$ITER <- 0
sim.all.ag <- sim.all.ag[ ,c(1:3,5,4)]
sim.all <- sim.all[, c(5,3,1,4,2)]
sim.all <- rbind(sim.all.ag, sim.all)
# Save simulation results
write.csv(sim.all, "Simulation_results_finalb.csv", row.names = FALSE)
sim.all <- read.csv("Simulation_results_final.csv")
sim.all$f <- ifelse(sim.all$f == 0.1, 'f = 0.1',
ifelse(sim.all$f == 1, 'f = 1.0',
ifelse(sim.all$f == 1.5, 'f = 1.5', 'f = 0.5')))
sim.all$f <- as.factor(sim.all$f)
fig8 <- ggplot(NULL, aes(x = TC, y = Esc)) +
geom_line(data = subset(sim.all, ITER != "0"), aes(group = ITER)) +
geom_line(data = subset(sim.all, ITER == "0"), col = 'red', lwd = 1.2) +
facet_grid(rows = vars(f), cols = vars(Scen)) +
labs(x = expression(paste('TC'[30])),
y = 'Escape probability') +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
strip.text = element_text(size = rel(1.2)),
axis.title.y = element_text(angle=90, vjust = 2, size = rel(1.5)),
axis.title.x = element_text(vjust = 0, size = rel(1.5)),
axis.text = element_text(size = rel(1.2)),
axis.line = element_line(colour="black"),
axis.ticks = element_line(colour="black"),
axis.ticks.length = unit(.25, "cm"))
# Redo simulations (with f = 1 only) for two additional cases for comparison:
# Escape height of 3 m instead of 2 (sim4), and taking maximum growth instead of mean (sim5)
sim.all2 <- subset(sim.all, f == 'f = 1.0' & Scen == 'Scenario 1')
sim.all2$f <- NULL
for (s in 3:3){
for (i in 1:ITER){
H <- numeric(N)
sim <- data.frame(H)
sim$TC <- rep(seq(from = 0, to = 0.45, by = 0.05), each = 1000)
sim$Dam <- 0
sim$Esc <- 0
# Cycle through 500 time steps
for (t in 1:1000){
sim$H <- ifelse(sim$Dam == 0, sim$H + gr.u[i,1] + gr.u[i,2] * sim$TC,
sim$H + gr[i,1] + gr[i,2] * sim$TC)
sim$Dam <- rbinom(N, size = 1, prob = he[i] * f[s]) +
rbinom(N, size = 1, prob = fi[i] * f[s])
sim$Dam <- ifelse(sim$Dam == 2, 1, sim$Dam)
topkilled <- rbinom(N, size = 1, prob = tk[i] * f[s])
sim$H <- ifelse(topkilled == 1 & sim$Esc == 0, 0, sim$H)
# Probability of mortality (depends on TC and damage)
lp.m <- mo[i,1] + mo[i,2] * sim$TC + mo[i,3] * sim$Dam # Linear predictor for inverse logit
prob.m.3y <- exp(lp.m) / (exp(lp.m) + 1) # Three-year probability of mortality - annualize
prob.m.1y <- 1 - (1 - prob.m.3y) ^ (1/3)
Dead <- rbinom(N, size = 1, prob = prob.m.1y)
# Assume dead trees are removed and replaced with new seedlings on height 0
sim$H <- ifelse(Dead == 1 & sim$Esc == 0, 0, sim$H)
sim$Esc <- ifelse(sim$H > 3, 1, 0)
}
# Calculate proportion escaped for each TC class
sim.ag <- aggregate(Esc ~ TC, sim, mean)
sim.ag$ITER <- i
if (i == 1) sim4 <- sim.ag
else sim4 <- rbind(sim4, sim.ag)
cat(i, '\n')
}
}
sim4$Scen <- "Scenario 4"
# Recalculate growth parameters based on maximum rather than mean growth
for (s in 3:3){
for (i in 1:ITER){
H <- numeric(N)
sim <- data.frame(H)
sim$TC <- rep(seq(from = 0, to = 0.45, by = 0.05), each = 1000)
sim$Dam <- 0
sim$Esc <- 0
# Cycle through 500 time steps
for (t in 1:1000){
sim$H <- ifelse(sim$Dam == 0, sim$H + gr.u.max[i,1] + gr.u.max[i,2] * sim$TC,
sim$H + gr.max[i,1] + gr.max[i,2] * sim$TC)
sim$Dam <- rbinom(N, size = 1, prob = he[i] * f[s]) +
rbinom(N, size = 1, prob = fi[i] * f[s])
sim$Dam <- ifelse(sim$Dam == 2, 1, sim$Dam)
topkilled <- rbinom(N, size = 1, prob = tk[i] * f[s])
sim$H <- ifelse(topkilled == 1 & sim$Esc == 0, 0, sim$H)
# Probability of mortality (depends on TC and damage)
lp.m <- mo[i,1] + mo[i,2] * sim$TC + mo[i,3] * sim$Dam # Linear predictor for inverse logit
prob.m.3y <- exp(lp.m) / (exp(lp.m) + 1) # Three-year probability of mortality - annualize
prob.m.1y <- 1 - (1 - prob.m.3y) ^ (1/3)
Dead <- rbinom(N, size = 1, prob = prob.m.1y)
# Assume dead trees are removed and replaced with new seedlings on height 0
sim$H <- ifelse(Dead == 1 & sim$Esc == 0, 0, sim$H)
sim$Esc <- ifelse(sim$H > 2, 1, 0)
}
# Calculate proportion escaped for each TC class
sim.ag <- aggregate(Esc ~ TC, sim, mean)
sim.ag$ITER <- i
if (i == 1) sim5 <- sim.ag
else sim5 <- rbind(sim5, sim.ag)
cat(i, '\n')
}
}
sim5$Scen <- "Scenario 5"
sim45 <- rbind(sim4, sim5)
sim45.ag <- aggregate(Esc ~ Scen + TC, sim45, mean)
sim45.ag$ITER <- 0
sim45.ag <- sim45.ag[ ,c(1,2,4,3)]
sim45 <- sim45[ ,c(4,1:3)]
sim45 <- rbind(sim45.ag, sim45)
sim.all2 <- rbind(sim.all2, sim45)
figS3 <- ggplot(NULL, aes(x = TC, y = Esc)) +
geom_line(data = subset(sim.all2, ITER != "0"), aes(group = ITER)) +
geom_line(data = subset(sim.all2, ITER == "0"), col = 'red', lwd = 1.2) +
facet_wrap(~ Scen, nrow = 1) +
labs(x = expression(paste('TC'[30])),
y = 'Escape probability') +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.background = element_blank(),
strip.text = element_text(size = rel(1.2)),
axis.title.y = element_text(angle=90, vjust = 2, size = rel(1.5)),
axis.title.x = element_text(vjust = 0, size = rel(1.5)),
axis.text = element_text(size = rel(1.2)),
axis.line = element_line(colour="black"),
axis.ticks = element_line(colour="black"),
axis.ticks.length = unit(.25, "cm"))
# Plot the discontinuity in tree cover across the breakpoint
# Obtain the mean tree cover plus/minus 100 m from each breakpoint
# For each transect, recalculate distance in relation to the breakpoint
tcid$Dist <- tcid$Distpca - tcid$Brkdist
tcid <- tcid[, c(1,3,13,14)]
tcid <- subset(tcid, Dist >= -150 & Dist <= 150)
# Obtain a mean TC30 across all transects
tcid$Dbin30 <- cut(tcid$Dist, breaks = seq(from = -150, to = 150, by = 30),
labels = seq(from = -135, to = 135, by = 30))
tcid.ag <- aggregate(TC30 ~ Dbin30, tcid, mean)
tcid.ag.se <- aggregate(TC30 ~ Dbin30, tcid, sd)
names(tcid.ag.se)[2] <- 'TC30.se'
tcid.ag.se$TC30.se <- tcid.ag.se$TC30.se / sqrt(8)
tcid.ag <- merge(tcid.ag, tcid.ag.se, by = 'Dbin30')
# Plot actual TC along transition
fig9 <- ggplot(tcid.ag, aes(x = as.numeric(as.character(Dbin30)), y = TC30)) +
geom_line(lwd = 2) +
geom_errorbar(aes(ymin = TC30 - TC30.se,
ymax = TC30 + TC30.se),
width = 5, lwd = 1) +
labs(x = 'Distance (m)',
y = expression(paste('TC'[30]))) +
theme_bw() +
theme(panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
axis.title.y = element_text(angle=90, vjust = 2, size = rel(2)),
axis.title.x = element_text(size = rel(2)),
axis.text = element_text(size = rel(1.5)),
axis.line = element_line(colour="black"),
axis.ticks = element_line(colour="black"),
axis.ticks.length = unit(.25, "cm"))
|
cd352b74ae8c209c5c6781efc5b3ee0ec2823981
|
3ca04a6bce47b2bed1942e79d35ef47a2460a3c0
|
/man/packs.go.Rd
|
e1a1e66cc7aadd8b9d84b85ddf6004c05d8fc3c2
|
[] |
no_license
|
Rterial/Rtbase
|
d863c743f7ed5ac63afb2188b08643c2cd17e504
|
cdb784fb75a04718973852c4803067bbdc92dd2f
|
refs/heads/master
| 2021-01-10T15:32:10.379629
| 2016-05-13T07:04:38
| 2016-05-13T07:04:38
| 53,450,341
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 202
|
rd
|
packs.go.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/startup_utils.R
\name{packs.go}
\alias{packs.go}
\title{Startup Load}
\usage{
packs.go()
}
\description{
\code{packs.go}
}
|
7b791a693ed903b5480d5ae004f6bc4cccbcecc2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/apdesign/examples/apdesign_i.Rd.R
|
e586c192563f15e50d82d2e8b68a6c9df413d561
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 464
|
r
|
apdesign_i.Rd.R
|
library(apdesign)
### Name: apdesign_i
### Title: apdesign_i: AP coding for a single subject
### Aliases: apdesign_i
### ** Examples
y <- c(10, 15, 21, 20, 23, 25, 27, 25, 28, 29)
time <- c(c(0.2, 0.5, 0.7), c(0.3, 0.6, 0.75, 0.89), c(0.1, 0.3, 0.8))
cycle <- c(rep(1, 3), rep(2, 4), rep(3, 3))
df <- data.frame(y, time, cycle)
apdesign_i(data = df, time_var = "time", cycle_var = "cycle",
center_cycle = 1, center_time = 0, max_degree = c(2,1))
|
334676fefaf7dcc0634f2ef7f07f073241092392
|
93edbaaae57c83fb12869e46b645c31075af94aa
|
/wikidata2R2postGIS.R
|
c857740d71ff4d2cf79f3554c4fb10345869d542
|
[] |
no_license
|
cbittner/wikidatanalysis
|
c1aa273b843c8268376909da43b9c17fbd7d50cb
|
5dc2f3e980f3dd7d12449cc64ad1d5abce5dcde8
|
refs/heads/master
| 2021-09-16T01:45:50.643448
| 2018-06-14T16:57:30
| 2018-06-14T16:57:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,887
|
r
|
wikidata2R2postGIS.R
|
#required packages
#postgres connection parameters
{
pg_dbname="[DBNAME]"
pg_host="[HOST]"
pg_port=[PORT]
pg_user="[USERNAME]"
pg_password="[PASSWORD]"
}
#install.packages("RPostgreSQL")
require(RPostgreSQL)
drv <- dbDriver("PostgreSQL")
con <- dbConnect(drv,
dbname=pg_dbname,
host=pg_host,
port=pg_port,
user=pg_user,
password=pg_password)
#install.packages("sqldf")
require(sqldf)
options(sqldf.RPostgreSQL.user = pg_user,
sqldf.RPostgreSQL.password = pg_password,
sqldf.RPostgreSQL.dbname = pg_dbname,
sqldf.RPostgreSQL.host = pg_host,
sqldf.RPostgreSQL.port = pg_port)
}
#creating a dataframe from a Wikidata SPARQL query with a large result set (returns dataframe)
#the built-in parser of the SPARQL-Package cannot handle very large query-results
#therefore, this function saves the query into a local file in the R working directory
#and runs the SPARQL query via a curl command (curl must be installed on the commupter)
sparql_to_df <- function (query, endpoint, geoCol=TRUE){
#query: the SPARQL query (string)
#endpoint: the url of hte SPARQL endpoint (string)
#geocol: if true, the result contains geographic coordinates (https://www.wikidata.org/wiki/Property:P625)
#write the query to a file
write(query, file = "query.sparql")
# declare a name for the csv file of the query results
csv_name <- 'outputfile'
queryResultFileName <- paste(csv_name,'.csv', sep='')
#put together a curl command SPARQL2CSV
curlString <- 'curl -H \"Accept:text/csv\" --data-urlencode query@query.sparql '
curlCommand <- paste(curlString, endpoint, ' -o ', queryResultFileName,' -k', sep="")
# execute the curl command (curl must be installed on your PC)
system("cmd.exe", input=curlCommand)
#import the csv into an R dataframe
df = read.csv(queryResultFileName, header = TRUE, stringsAsFactors = FALSE)
if (geoCol==TRUE){
#write long and lat values from the coordinate string into separate columns
df$lng <- as.numeric(gsub(".*?([-]*[0-9]+[.][0-9]+).*", "\\1", df$coord))
df$lat <- as.numeric(gsub(".* ([-]*[0-9]+[.][0-9]+).*", "\\1", df$coord))
}
return(df)
}
#transfering the dataframe to a postgis table
df_to_postgis <- function(df, query, endpoint, pgtable, geoCol=TRUE){
#df: the dataframe you want to bring to postgis
#query: the SPARQL query (string)
#endpoint: the url of hte SPARQL endpoint (string)
#pgtable: a name fo the Postgis table
#geocol: if true, the result contains geographic coordinates (https://www.wikidata.org/wiki/Property:P625)
#creating the table
dbWriteTable(con, pgtable, df)
#adding the query details as a comment to the table
time_now <- Sys.time()
date_now <- Sys.Date()
pg_comment <- paste("COMMENT ON TABLE ", pgtable, " IS '",
"sparql query: \n", query,
"\n \n endpoint: ", endpoint,
"\n date: ", date_now, " ", time_now,"';", sep="")
sqldf(pg_comment)
#if input contains coordinates, create point geometries in PG (SRID 4326)
if (geoCol==TRUE){
pg_alter <- paste("alter table ",pgtable, " add column geom geometry;", sep="")
pg_makepoint <- paste("update ",pgtable, " set geom = st_MakePoint(lng, lat);", sep="")
pg_setsrid <- paste("update ",pgtable, " SET geom = ST_SetSRID(geom, 4326);", sep="")
sqldf(pg_alter)
sqldf(pg_makepoint)
sqldf(pg_setsrid)
sqldf("select populate_geometry_columns();")
}
}
|
108e1d1399aa343ad313a4b46ced072a287c0d73
|
72d9009d19e92b721d5cc0e8f8045e1145921130
|
/LongMemoryTS/man/fdiff.Rd
|
b198148fc344516d4c5585df1ac7c4e8c4c35049
|
[] |
no_license
|
akhikolla/TestedPackages-NoIssues
|
be46c49c0836b3f0cf60e247087089868adf7a62
|
eb8d498cc132def615c090941bc172e17fdce267
|
refs/heads/master
| 2023-03-01T09:10:17.227119
| 2021-01-25T19:44:44
| 2021-01-25T19:44:44
| 332,027,727
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 860
|
rd
|
fdiff.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/fdiff.R
\name{fdiff}
\alias{fdiff}
\title{Fast fractional differencing procedure of Jensen and Nielsen (2014).}
\usage{
fdiff(x, d)
}
\arguments{
\item{x}{series to be differenced}
\item{d}{memory parameter indicating order of the fractional difference.}
}
\description{
Takes the d-th difference of the series.
}
\details{
This code was first published on the \href{http://www.econ.queensu.ca/faculty/mon/software/}{university webpage of Morten Nielsen} and is
redistributed here with the author's permission.
}
\examples{
acf(fdiff(x=rnorm(500), d=0.4))
}
\references{
Jensen, A. N. and Nielsen, M. O. (2014):
A fast fractional difference algorithm, Journal of Time Series Analysis 35(5), pp. 428-436.
}
\author{
Jensen, A. N. and Nielsen, M. O.
}
|
613e8e7a74d1cc115d7235d8f044ac843c2ce885
|
90c8e41885c3259949f63b3aa9fe16d7763e02d9
|
/Scripts/create_blood_date.R
|
660931dbf33233de744ae687aae1024c18ea3721
|
[] |
no_license
|
CPRyan/Cebu_reproductive_histories
|
67a95f979cb3c78f65b395913b490190d70cd110
|
c2beee496bb297a383f3a3275681ade6d1bd9a89
|
refs/heads/master
| 2020-03-27T10:06:56.494721
| 2019-06-19T02:44:17
| 2019-06-19T02:44:17
| 146,392,851
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 688
|
r
|
create_blood_date.R
|
# Create Blood date
# Load the full data
date <- read_dta("~/Academic/PhD/Projects/Cebu/Methylation/Fall 2015/Full_cohort_for_Meaghan.dta")
labels <- lapply(date, attr, "label")
str(labels, list.len=ncol(date))
blood.date <-date %>%
select(uncchdid, icsex, dayblood, monthblood, yearblood)
blood.date$yearblood
blood.draw.date <-ymd(with(blood.date, paste(yearblood+2000, monthblood, dayblood, sep = "/")))
dim(blood.date)
length(blood.draw.date)
blood.date.new <-cbind(blood.date, blood.draw.date)
blood.date.new$icsex <-as.factor(blood.date.new$icsex)
blood.date.new$uncchdid <-as.character(blood.date.new$uncchdid)
write.csv(blood.date.new, "Data/blood.draw.dates.csv")
|
f4b3a30ffcdc4cd40ed1e735decd6ad92d0eafc3
|
eb53a9516cdc7f846e4c07ac4c0cdffcaa561a2c
|
/plo1.R
|
e8bb03bcf8e13c1544404e6af44dc6e0ce257cd5
|
[] |
no_license
|
hwyan0220/ExData_Plotting1
|
1961f253440dcd042165d34bfafaccf5892f31e4
|
43dc05db44d6e75fc7c413ddf073fcad00c442fa
|
refs/heads/master
| 2021-05-03T22:53:32.258636
| 2016-10-23T14:35:55
| 2016-10-23T14:35:55
| 71,701,681
| 0
| 0
| null | 2016-10-23T12:34:23
| 2016-10-23T12:34:22
| null |
UTF-8
|
R
| false
| false
| 590
|
r
|
plo1.R
|
setwd('H:/exploratory/assignment')
elec <- read.table('household_power_consumption.txt', header = T, sep = ';', na.strings = '?')
elec1 <- elec
elec1 <- subset(elec1, elec1$Date == '1/2/2007'|elec1$Date == '2/2/2007')
elec1$Time <- strptime(paste(elec1$Date, elec1$Time), '%e/%m/%Y %H:%M:%S')
elec1$Date <- as.Date(elec1$Date, '%e/%m/%Y')
par(mfrow = c(1,1), mar = c(5,4,2,2))
with(elec1, hist(Global_active_power, main = 'Global Active Power', xlab = 'Global Active Power (kilowatts)', col = 'red'))
dev.copy(png, file = 'plot1.png', width = 480, height = 480, units = 'px')
dev.off()
|
d179814a20b26589d141c66b30b2b2e5e720538c
|
bf74cef0ed028929d29eae883d9008c258130f62
|
/man/cotan_analysis.Rd
|
10f3d89b9f64296c2dbbd1240bd4c4284e3cb761
|
[] |
no_license
|
seriph78/COTAN_stable
|
039ff813d7ec5c4f2c41e57b563deea0950ab211
|
480f76b881ae99e3605229524c1412933f3d4146
|
refs/heads/main
| 2023-05-10T05:01:05.837838
| 2021-06-06T17:01:29
| 2021-06-06T17:01:29
| 374,412,971
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 661
|
rd
|
cotan_analysis.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/COTAN_main.R
\name{cotan_analysis}
\alias{cotan_analysis}
\alias{cotan_analysis,scCOTAN-method}
\title{cotan_analysis}
\usage{
cotan_analysis(object, cores = 1)
\S4method{cotan_analysis}{scCOTAN}(object, cores = 1)
}
\arguments{
\item{object}{A COTAN object}
\item{cores}{number of cores to use. Default is 11.}
}
\value{
a COTAN object
}
\description{
This is the main function that estimates the a vector to store all the
negative binomial
dispersion factors. It need to be run after \code{\link{clean}}
}
\examples{
data("ERCC.cotan")
ERCC.cotan = cotan_analysis(ERCC.cotan)
}
|
6d8de9079d992ff38ba0d5ec1673a48615d5d6ab
|
afb10b7c49c7e2f86ba319dbd55083ec6672ea20
|
/scripts/path_sampling_plotting.R
|
b8a843bbaa921a3dbabb4e92b72f2d27083fd300
|
[] |
no_license
|
ahmedmagds/TreePrior-ClockRate
|
ced3522da37f9b3ba50a7953e7028b4c2675bfd3
|
4d178e531848f91cbc9b55894bfad7a692f8d592
|
refs/heads/master
| 2021-09-09T17:55:27.710604
| 2018-03-18T19:34:52
| 2018-03-18T19:34:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,511
|
r
|
path_sampling_plotting.R
|
library(ggplot2)
library(xtable)
expand_model_names = function(df) {
models = vector(mode = "list", length = 6)
names(models) = c('bd', 'bdsky', 'const_coal', 'exp_coal', 'sky_coal', 'struct_coal')
models[['bd']] = 'Birth death'
models[['bdsky']] = 'Birth death skyline'
models[['const_coal']] = 'Constant coalescent'
models[['exp_coal']] = 'Exp. growth coalescent'
models[['sky_coal']] = 'Skyline coalescent'
models[['struct_coal']] = 'Structured coalescent'
for (m in levels(df$Model)) {
levels(df$Model)[levels(df$Model) == m] = models[[m]]
}
return(df)
}
create_plot = function(data_name) {
df <- read.table(paste("../results/", data_name, "/path_sampling/likelihood_summary.txt", sep="/"), quote="\"", comment.char="")
names(df) = c("Model", "Steps", "Likelihood")
df$Steps = as.factor(df$Steps)
for (s in levels(df$Steps)) {
df[df$Steps==s,"Likelihood"] = df[df$Steps==s,"Likelihood"] - df[df$Model=="struct_coal"&df$Steps==s,"Likelihood"]
}
df = expand_model_names(df)
ggplot(aes(Model, Likelihood), data = df) +
geom_bar(stat = "identity", position = "dodge") + ylab("log Bayes Factor") +
theme_bw(base_size = 10) +
theme(axis.text.x = element_text(angle = 45, hjust = 1), axis.title.x=element_blank())+
facet_grid(. ~ Steps)
ggsave(paste("../figures/suppl_", data_name, "_likelihoods.pdf", sep=""), height = 7, width = 17, units = "cm")
ggplot(aes(Model, Likelihood), data = subset(df, Steps == 16)) +
geom_bar(stat = "identity", position = "dodge") + ylab("log Bayes Factor") +
theme_bw(base_size = 10) +
theme(axis.text.x = element_text(angle = 45, hjust = 1), axis.title.x=element_blank())
ggsave(paste("../figures/", data_name, "_likelihoods.pdf", sep=""), height = 7, width = 8.5, units = "cm")
return(df)
}
create_table = function(df) {
rnames = c()
cnames = paste(levels(df$Steps), "steps")
result = c()
for (model in levels(df$Model)) {
rnames = c(rnames, model)
bfs = c()
for (steps in levels(df$Steps)) {
bfs = c(bfs, df$Likelihood[which(df$Model == model & df$Steps == steps)])
}
result = rbind(result, bfs)
}
rownames(result) = rnames
colnames(result) = cnames
return(result)
}
for (data_name in c('guinea', 'gire')) {
df <- create_plot(data_name)
print.xtable(xtable(create_table(df), align="lrrrr", label=paste0("table:",data_name,"_likelihoods"), caption=data_name), file=paste0("../figures/",data_name,"_likelihoods.tex"))
}
|
fae6c3ac6971349e6e5be877eb5ee9aa1e1543b6
|
f8f93db15f8f366fffb4728e209f42d12007bac9
|
/12/notes12.R
|
57fc444d285cf650069d0ad5c0af9ad42d3e75dd
|
[] |
no_license
|
ionides/531w18
|
065c45797e61554339d1735753debeead6a37b86
|
331ad8309f2514191f649b18f637be264292ba8e
|
refs/heads/master
| 2023-01-02T14:09:43.496031
| 2020-10-22T17:46:35
| 2020-10-22T17:46:35
| 113,875,595
| 7
| 87
| null | 2018-02-20T22:39:46
| 2017-12-11T15:27:32
|
HTML
|
UTF-8
|
R
| false
| false
| 10,809
|
r
|
notes12.R
|
## ----opts,include=FALSE,cache=FALSE--------------------------------------
options(
keep.source=TRUE,
stringsAsFactors=FALSE,
encoding="UTF-8"
)
## ----prelims,echo=F,cache=F----------------------------------------------
set.seed(594709947L)
require(ggplot2)
theme_set(theme_bw())
require(plyr)
require(reshape2)
require(foreach)
require(doMC)
require(pomp)
stopifnot(packageVersion("pomp")>="0.69-1")
## ----sir-construct-------------------------------------------------------
bsflu <- read.table("bsflu_data.txt")
sir_step <- Csnippet("
double dN_SI = rbinom(S,1-exp(-Beta*I/N*dt));
double dN_IR = rbinom(I,1-exp(-gamma*dt));
S -= dN_SI;
I += dN_SI - dN_IR;
R += dN_IR;
H += dN_IR;
")
sir_init <- Csnippet("
S = nearbyint(N)-1;
I = 1;
R = 0;
H = 0;
")
dmeas <- Csnippet("lik = dbinom(B,H,rho,give_log);")
rmeas <- Csnippet("B = rbinom(H,rho);")
pomp(bsflu,times="day",t0=0,
rprocess=euler.sim(sir_step,delta.t=1/5),
initializer=sir_init,rmeasure=rmeas,dmeasure=dmeas,
zeronames="H",statenames=c("H","S","I","R"),
paramnames=c("Beta","gamma","rho","N")) -> sir
## ----bbs-mc-like-2,results='markup'--------------------------------------
simulate(sir,params=c(Beta=2,gamma=1,rho=0.5,N=2600),
nsim=10000,states=TRUE) -> x
matplot(time(sir),t(x["H",1:50,]),type='l',lty=1,
xlab="time",ylab="H",bty='l',col='blue')
lines(time(sir),obs(sir,"B"),lwd=2,col='black')
## ----bbs-mc-like-3,results='markup',cache=T------------------------------
ell <- dmeasure(sir,y=obs(sir),x=x,times=time(sir),log=TRUE,
params=c(Beta=2,gamma=1,rho=0.5,N=2600))
dim(ell)
## ----bbs-mc-like-4,results='markup'--------------------------------------
ell <- apply(ell,1,sum); summary(exp(ell)); logmeanexp(ell,se=TRUE)
## ----sir-sim1------------------------------------------------------------
sims <- simulate(sir,params=c(Beta=2,gamma=1,rho=0.8,N=2600),nsim=20,
as.data.frame=TRUE,include.data=TRUE)
ggplot(sims,mapping=aes(x=time,y=B,group=sim,color=sim=="data"))+
geom_line()+guides(color=FALSE)
## ----sir-pfilter-1,results='markup',cache=T------------------------------
pf <- pfilter(sir,Np=5000,params=c(Beta=2,gamma=1,rho=0.8,N=2600))
logLik(pf)
## ----sir-pfilter-2,results='markup',cache=T------------------------------
pf <- replicate(10,pfilter(sir,Np=5000,params=c(Beta=2,gamma=1,rho=0.8,N=2600)))
ll <- sapply(pf,logLik); ll
logmeanexp(ll,se=TRUE)
## ----sir-like-slice,cache=TRUE,results='hide'----------------------------
sliceDesign(
c(Beta=2,gamma=1,rho=0.8,N=2600),
Beta=rep(seq(from=0.5,to=4,length=40),each=3),
gamma=rep(seq(from=0.5,to=2,length=40),each=3)) -> p
require(foreach)
require(doMC)
registerDoMC(cores=5)
## number of cores
## usually the number of cores on your machine, or slightly smaller
set.seed(998468235L,kind="L'Ecuyer")
mcopts <- list(preschedule=FALSE,set.seed=TRUE)
foreach (theta=iter(p,"row"),.combine=rbind,
.inorder=FALSE,.options.multicore=mcopts) %dopar%
{
pfilter(sir,params=unlist(theta),Np=5000) -> pf
theta$loglik <- logLik(pf)
theta
} -> p
## ----sir-like-slice-plot,cache=TRUE,results="hide"-----------------------
foreach (v=c("Beta","gamma")) %do%
{
x <- subset(p,slice==v)
plot(x[[v]],x$loglik,xlab=v,ylab="loglik")
}
## ----sir-grid1-----------------------------------------------------------
expand.grid(Beta=seq(from=1,to=4,length=50),
gamma=seq(from=0.7,to=3,length=50),
rho=0.8,
N=2600) -> p
foreach (theta=iter(p,"row"),.combine=rbind,
.inorder=FALSE,.options.multicore=mcopts) %dopar%
{
pfilter(sir,params=unlist(theta),Np=5000) -> pf
theta$loglik <- logLik(pf)
theta
} -> p
## ----sir-grid1-plot------------------------------------------------------
pp <- mutate(p,loglik=ifelse(loglik>max(loglik)-100,loglik,NA))
ggplot(data=pp,mapping=aes(x=Beta,y=gamma,z=loglik,fill=loglik))+
geom_tile(color=NA)+
geom_contour(color='black',binwidth=3)+
scale_fill_gradient()+
labs(x=expression(beta),y=expression(gamma))
## ----load_bbs------------------------------------------------------------
bsflu_data <- read.table("bsflu_data.txt")
## ----bsflu_names---------------------------------------------------------
bsflu_statenames <- c("S","I","R1","R2")
bsflu_paramnames <- c("Beta","mu_I","rho","mu_R1","mu_R2")
## ----bsflu_obsnames------------------------------------------------------
(bsflu_obsnames <- colnames(bsflu_data)[1:2])
## ----csnippets_bsflu-----------------------------------------------------
bsflu_dmeasure <- "
lik = dpois(B,rho*R1+1e-6,give_log);
"
bsflu_rmeasure <- "
B = rpois(rho*R1+1e-6);
C = rpois(rho*R2);
"
bsflu_rprocess <- "
double t1 = rbinom(S,1-exp(-Beta*I*dt));
double t2 = rbinom(I,1-exp(-dt*mu_I));
double t3 = rbinom(R1,1-exp(-dt*mu_R1));
double t4 = rbinom(R2,1-exp(-dt*mu_R2));
S -= t1;
I += t1 - t2;
R1 += t2 - t3;
R2 += t3 - t4;
"
bsflu_fromEstimationScale <- "
TBeta = exp(Beta);
Tmu_I = exp(mu_I);
Trho = expit(rho);
"
bsflu_toEstimationScale <- "
TBeta = log(Beta);
Tmu_I = log(mu_I);
Trho = logit(rho);
"
bsflu_initializer <- "
S=762;
I=1;
R1=0;
R2=0;
"
## ----pomp_bsflu----------------------------------------------------------
require(pomp)
stopifnot(packageVersion("pomp")>="0.75-1")
bsflu2 <- pomp(
data=bsflu_data,
times="day",
t0=0,
rprocess=euler.sim(
step.fun=Csnippet(bsflu_rprocess),
delta.t=1/12
),
rmeasure=Csnippet(bsflu_rmeasure),
dmeasure=Csnippet(bsflu_dmeasure),
fromEstimationScale=Csnippet(bsflu_fromEstimationScale),
toEstimationScale=Csnippet(bsflu_toEstimationScale),
obsnames = bsflu_obsnames,
statenames=bsflu_statenames,
paramnames=bsflu_paramnames,
initializer=Csnippet(bsflu_initializer)
)
plot(bsflu2)
## ----run_level-----------------------------------------------------------
run_level <- 3
switch(run_level,
{bsflu_Np=100; bsflu_Nmif=10; bsflu_Neval=10; bsflu_Nglobal=10; bsflu_Nlocal=10},
{bsflu_Np=20000; bsflu_Nmif=100; bsflu_Neval=10; bsflu_Nglobal=10; bsflu_Nlocal=10},
{bsflu_Np=60000; bsflu_Nmif=300; bsflu_Neval=10; bsflu_Nglobal=100; bsflu_Nlocal=20}
)
## ----bsflu_params--------------------------------------------------------
bsflu_params <- data.matrix(read.table("mif_bsflu_params.csv",row.names=NULL,header=TRUE))
bsflu_mle <- bsflu_params[which.max(bsflu_params[,"logLik"]),][bsflu_paramnames]
## ----fixed_params--------------------------------------------------------
bsflu_fixed_params <- c(mu_R1=1/(sum(bsflu_data$B)/512),mu_R2=1/(sum(bsflu_data$C)/512))
## ----parallel-setup,cache=FALSE------------------------------------------
require(doParallel)
cores <- 20 # The number of cores on this machine
registerDoParallel(cores)
mcopts <- list(set.seed=TRUE)
set.seed(396658101,kind="L'Ecuyer")
## ----alternative_parallel, eval=FALSE------------------------------------
## require(doMC)
## registerDoMC(cores=20)
## ----pf------------------------------------------------------------------
stew(file=sprintf("pf-%d.rda",run_level),{
t_pf <- system.time(
pf <- foreach(i=1:20,.packages='pomp',
.options.multicore=mcopts) %dopar% try(
pfilter(bsflu2,params=bsflu_mle,Np=bsflu_Np)
)
)
},seed=1320290398,kind="L'Ecuyer")
(L_pf <- logmeanexp(sapply(pf,logLik),se=TRUE))
## ----set_cache, eval=FALSE-----------------------------------------------
## opts_chunk$set(
## cache=TRUE,
## )
## ----box_search_local----------------------------------------------------
bsflu_rw.sd <- 0.02
bsflu_cooling.fraction.50 <- 0.5
stew(file=sprintf("local_search-%d.rda",run_level),{
t_local <- system.time({
mifs_local <- foreach(i=1:bsflu_Nlocal,.packages='pomp', .combine=c, .options.multicore=mcopts) %dopar% {
mif2(
bsflu2,
start=bsflu_mle,
Np=bsflu_Np,
Nmif=bsflu_Nmif,
cooling.type="geometric",
cooling.fraction.50=bsflu_cooling.fraction.50,
transform=TRUE,
rw.sd=rw.sd(
Beta=bsflu_rw.sd,
mu_I=bsflu_rw.sd,
rho=bsflu_rw.sd
)
)
}
})
},seed=900242057,kind="L'Ecuyer")
## ----lik_local_eval------------------------------------------------------
stew(file=sprintf("lik_local-%d.rda",run_level),{
t_local_eval <- system.time({
liks_local <- foreach(i=1:bsflu_Nlocal,.packages='pomp',.combine=rbind) %dopar% {
evals <- replicate(bsflu_Neval, logLik(pfilter(bsflu2,params=coef(mifs_local[[i]]),Np=bsflu_Np)))
logmeanexp(evals, se=TRUE)
}
})
},seed=900242057,kind="L'Ecuyer")
results_local <- data.frame(logLik=liks_local[,1],logLik_se=liks_local[,2],t(sapply(mifs_local,coef)))
summary(results_local$logLik,digits=5)
## ----pairs_local---------------------------------------------------------
pairs(~logLik+Beta+mu_I+rho,data=subset(results_local,logLik>max(logLik)-50))
## ----box-----------------------------------------------------------------
bsflu_box <- rbind(
Beta=c(0.001,0.01),
mu_I=c(0.5,2),
rho = c(0.5,1)
)
## ----box_eval------------------------------------------------------------
stew(file=sprintf("box_eval-%d.rda",run_level),{
t_global <- system.time({
mifs_global <- foreach(i=1:bsflu_Nglobal,.packages='pomp', .combine=c, .options.multicore=mcopts) %dopar% mif2(
mifs_local[[1]],
start=c(apply(bsflu_box,1,function(x)runif(1,x[1],x[2])),bsflu_fixed_params)
)
})
},seed=1270401374,kind="L'Ecuyer")
## ----lik_global_eval-----------------------------------------------------
stew(file=sprintf("lik_global_eval-%d.rda",run_level),{
t_global_eval <- system.time({
liks_global <- foreach(i=1:bsflu_Nglobal,.packages='pomp',.combine=rbind, .options.multicore=mcopts) %dopar% {
evals <- replicate(bsflu_Neval, logLik(pfilter(bsflu2,params=coef(mifs_global[[i]]),Np=bsflu_Np)))
logmeanexp(evals, se=TRUE)
}
})
},seed=442141592,kind="L'Ecuyer")
results_global <- data.frame(logLik=liks_global[,1],logLik_se=liks_global[,2],t(sapply(mifs_global,coef)))
summary(results_global$logLik,digits=5)
## ----save_params,eval=FALSE----------------------------------------------
## if (run_level>2)
## write.table(rbind(results_local,results_global),
## file="mif_bsflu_params.csv",append=TRUE,col.names=FALSE,row.names=FALSE)
## ----pairs_global--------------------------------------------------------
pairs(~logLik+Beta+mu_I+rho,data=subset(results_global,logLik>max(logLik)-250))
## ----class_mifs_global---------------------------------------------------
class(mifs_global)
class(mifs_global[[1]])
class(c(mifs_global[[1]],mifs_global[[2]]))
## ----mifs_global_plot----------------------------------------------------
plot(mifs_global)
|
52f29dbf793d91071929f038dd8f28eb8686bfbd
|
3eab62651715a8a416ce8a5cc0cb370dfc8d4e8a
|
/Distance_Script.R
|
664fedd218533ecaba818e1531909e381cbe8314
|
[] |
no_license
|
capstone-pilot/DistanceMatrixScript
|
10c81df7ce045db0d87f14d90707298c468cb55b
|
1f354970b9d956724924f3245adf2a9212e99fa7
|
refs/heads/master
| 2020-07-23T11:00:49.908959
| 2019-09-19T10:39:59
| 2019-09-19T10:39:59
| 207,536,872
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 6,739
|
r
|
Distance_Script.R
|
#Package for geospatial distance between two points
if(!require(geosphere)){
install.packages("geosphere")
require(geosphere)
}
if(!require(stringr)){
install.packages("stringr")
require(stringr)
}
#Set working directory where files exist
setwd("~/Desktop/Pilot/Data")
#Main locations file
PFJ <- read.csv("./Pilot_Updated/Master_PFJ_US_Locations_Aggregated.csv")
#List of Travel Centers
travelCenters <- read.csv("./Pilot_Updated/Location_To_Opis.csv")
#Incomplete list of competitors
competitor <- read.csv("Matt_Price_Competitor_List.csv")
#Subset data frames to useful information
PFJ_Unique <- unique(PFJ[,c(1,25,26)])
PFJ_Idx <- which(PFJ_Unique$location_id%in%travelCenters$LOCATION_ID)
DF1_Dist <- PFJ_Unique[PFJ_Idx,]
numDF1 <- nrow(DF1_Dist)
DF1_Dist$STATION_NAME<- paste0(rep("PFJ_", numDF1), DF1_Dist$location_id)
##Begin Data Cleaning
#Competitor Dataset
# competitor$Station.Name<-str_trim(as.character(competitor$Station.Name))
# competitor$Station.Name<-as.factor(competitor$Station.Name)
competitor$City <- str_trim(competitor$City)
competitor$Longitude <- round(competitor$Longitude, 4)
competitor$Latitude <- round(competitor$Latitude, 4)
#Standardize PFJ names
anti_comp_Idx <- which(competitor$OPIS.ID%in%travelCenters$OPIS_TRUCKSTOP_ID)
#Subset Competitors dataset
DF2_Dist <- competitor[-anti_comp_Idx,c(1,6,7)]
numDF2 <- nrow(DF2_Dist)
DF2_Dist <- cbind(DF2_Dist,"STATION_NAME"=paste0(rep("COMP_",numDF2), DF2_Dist$OPIS.ID))
#Match column names for join
names(DF2_Dist) <- names(DF1_Dist)
#Initialize output matrix
compMatrix <- matrix( rep(NA,numDF1*numDF2), nrow = numDF1, ncol = numDF2 )
PFJMatrix <- matrix( rep(NA,numDF1*numDF1), nrow = numDF1, ncol = numDF1 )
#Rename Rows and Columns accordingly
rownames(compMatrix) <- DF1_Dist$STATION_NAME
colnames(compMatrix) <- DF2_Dist$STATION_NAME
rownames(PFJMatrix) <- DF1_Dist$STATION_NAME
colnames(PFJMatrix) <- DF1_Dist$STATION_NAME
#Calculate distances between stores
start=Sys.time()
for(i in 1:numDF1 ){
for(j in 1:numDF2){
if(i==j){
compMatrix[i,j] <- NA
}else{
compMatrix[i,j] <- distHaversine( c(DF1_Dist$ADDRESS_LONGITUDE[i],DF1_Dist$ADDRESS_LATITUDE[i]), c(DF2_Dist$ADDRESS_LONGITUDE[j], DF2_Dist$ADDRESS_LATITUDE[j]) )
}
}
print( paste0( ( ( i*j)/(numDF1*numDF2) )*100, "%" ) )
}
Sys.time()-start
start=Sys.time()
for(i in 1:numDF1 ){
for(j in 1:numDF1){
if(i==j){
PFJMatrix[i,j] <- NA
}else{
PFJMatrix[i,j] <- distHaversine( c(DF1_Dist$ADDRESS_LONGITUDE[i],DF1_Dist$ADDRESS_LATITUDE[i]), c(DF1_Dist$ADDRESS_LONGITUDE[j], DF1_Dist$ADDRESS_LATITUDE[j]) )
}
}
print( paste0( ( ( i*j)/(numDF1*numDF1) )*100, "%" ) )
}
Sys.time()-start
#Converts distance from meters to miles (3.28084ft/m and 5280ft/mi)
compMatrix <- (compMatrix*3.28084)/5280
PFJMatrix <- (PFJMatrix*3.28084)/5280
###---------------------------------------READ IN MATRICES---------------------------------------------###
# PFJ_Matrix_Source <- read.csv(paste0(getwd(),"/PFJ_Matrix.csv"))
# Comp_Matrix_Source <- read.csv(paste0(getwd(),"/Competitor_Matrix.csv"))
##-------------------------PFJ Distance----------------------------##
PFJ_Matrix_Source <- PFJMatrix
Comp_Matrix_Source <- compMatrix
start=Sys.time()
PFJ_OneMile <- numeric()
PFJ_FiveMile <- numeric()
PFJ_FifteenMile <- numeric()
PFJ_MinDist <- numeric()
for(i in 1:nrow(PFJ_Matrix_Source)){
PFJ_OneMile[i] <- sum(ifelse( !is.na(PFJ_Matrix_Source[i,]) & (PFJ_Matrix_Source[i,]>0) & (PFJ_Matrix_Source[i,]<1) ,1,0))
PFJ_FiveMile[i] <- sum(ifelse( !is.na(PFJ_Matrix_Source[i,]) & (PFJ_Matrix_Source[i,]>=1) & (PFJ_Matrix_Source[i,]<5),1,0 ))
PFJ_FifteenMile[i] <- sum(ifelse( !is.na(PFJ_Matrix_Source[i,]) & (PFJ_Matrix_Source[i,]>=5) & (PFJ_Matrix_Source[i,]<15),1,0 ))
PFJ_MinDist[i] <- min(PFJ_Matrix_Source[i,], na.rm = TRUE)
PFJ_Matrix_Source[i,i] <- ifelse(is.na(PFJ_Matrix_Source[i,i]),0,PFJ_Matrix_Source[i,i])
}
Sys.time()-start
##------------------------Competitor Distance------------------------##
start=Sys.time()
Comp_OneMile <- numeric()
Comp_FiveMile <- numeric()
Comp_FifteenMile <- numeric()
Comp_MinDist <- numeric()
for(i in 1:nrow(Comp_Matrix_Source)){
Comp_OneMile[i] <- sum(ifelse( !is.na(Comp_Matrix_Source[i,]) & (Comp_Matrix_Source[i,]>0) & (Comp_Matrix_Source[i,]<1) ,1,0))
Comp_FiveMile[i] <- sum(ifelse( !is.na(Comp_Matrix_Source[i,]) & (Comp_Matrix_Source[i,]>=1) & (Comp_Matrix_Source[i,]<5),1,0 ))
Comp_FifteenMile[i] <- sum(ifelse( !is.na(Comp_Matrix_Source[i,]) & (Comp_Matrix_Source[i,]>=5) & (Comp_Matrix_Source[i,]<15),1,0 ))
Comp_MinDist[i] <- min(Comp_Matrix_Source[i,], na.rm = TRUE)
Comp_Matrix_Source[i,i] <- ifelse(is.na(Comp_Matrix_Source[i,i]),0,Comp_Matrix_Source[i,i])
print(paste0( (i/nrow(Comp_Matrix_Source))*100, "%" ))
}
Sys.time()-start
PFJ_Matrix_Source <- cbind(PFJ_Matrix_Source, PFJ_OneMile, PFJ_FiveMile, PFJ_FifteenMile, PFJ_MinDist)
Comp_Matrix_Source <- cbind(Comp_Matrix_Source, Comp_OneMile, Comp_FiveMile, Comp_FifteenMile, Comp_MinDist)
fileName1 <- paste0(getwd(),"/PFJ_Matrix.csv")
fileName2 <- paste0(getwd(),"/Competitor_Matrix.csv")
write.csv(PFJ_Matrix_Source, file = fileName1)
write.csv(Comp_Matrix_Source, file = fileName2)
#-------------------------------------------BitRot-----------------------------------------------#
#pilotTest <- Pilot_Idx[c(8,57,70,97,195,275,329,335,341,349,350,447,476)]
#Pilot Stores with store# only
# temp <- lapply(
# competitor$Station.Name[Pilot_Idx[!(Pilot_Idx%in%pilotTest)]],
# function(x){
# if(!is.na(str_match(x, "#(.*?) ")[,2]) && str_count(str_match(x, "#(.*?) ")[,2])){
# str_match(x, "#(.*?) ")[,2]
# }
# else if(!is.na(str_match(x, "#(.*?)")[,2]) && str_count(str_match(x, "#(.*?)")[,2])){
# str_match(x, "#(.*?)")[,2]
# }
# else{
# str_match(x, "# (.*?) ")[,2]
# }
# }
# )
##Pilot Stores with store# and special symbol
#Implies location only has disel lanes and doesn't own convenience store
# temp2 <- c()
# temp2 <- lapply(
# competitor$Station.Name[pilotTest],
# function(x){
# if(!is.na(str_match(x, "#(.*?)- ")[,2]) && str_count(str_match(x, "#(.*?)- ")[,2])){
# str_match(x, "#(.*?)- ")[,2]
# }
# else if(!is.na(str_match(x, "#(.*?)-")[,2]) && str_count(str_match(x, "#(.*?)-")[,2])){
# str_match(x, "#(.*?)-")[,2]
# }
# else if(!is.na(str_match(x, "# (.*?)- ")[,2]) && str_count(str_match(x, "# (.*?)- ")[,2])){
# str_match(x, "#(.*?)-")[,2]
# }
# else{
# str_match(x, "#(.*?)/")[,2]
# }
# }
# )
#Final list of standardized Pilot names
#finalTemp <- c(temp,temp2)
# j<-1
# for(i in Pilot_Idx){
# competitor$Station.Name[i] <- paste0("Pilot #",finalTemp[[j]][1])
# j<-j+1 }
|
b1b6b8d15a5a4e31661c3fd3e385736e24c2d536
|
cb66ae3bf5bd2422e70df574340e0d5f5388eb8e
|
/functions.r
|
5474855703a0a3069975b378844b75c2cdbb90ca
|
[] |
no_license
|
jvoorheis/MSA_Ineq
|
779f28947f243495d4c28b6841b56d2c51dc97e6
|
3dbec52e82d0ae86d6d88c6550aadba4b43cb81a
|
refs/heads/master
| 2016-08-02T22:44:29.331869
| 2013-12-28T07:50:20
| 2013-12-28T07:50:20
| 11,228,792
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 18,173
|
r
|
functions.r
|
library(ggplot2)
library(GB2)
ratio9010f<-function(inc){
return(quantile(inc, probs=0.9)/(quantile(inc,probs=0.1)+0.000001))
}
ratio95medf<-function(inc){
return(quantile(inc, probs=0.95)/(quantile(inc,probs=0.5)+0.000001))
}
ratio99medf<-function(inc){
return(quantile(inc, probs=0.99)/(quantile(inc,probs=0.5)+0.000001))
}
ratio8020f<-function(inc){
return(quantile(inc, probs=0.9)/(quantile(inc,probs=0.2)+0.000001))
}
generate_gini_plots<-function(units, inc_data){
for (i in units){
temp.df<-subset(inc_data, inc_data$MSA==i)
temp.MSA<-unlist(strsplit(i, "/"))[1]
filename<-paste(temp.MSA, ".jpg", sep="")
jpeg(file=filename)
print(ggplot(temp.df, aes(year)) +
geom_line(aes(y=Gini_GB2, colour="gini"))+
opts(title=i))
dev.off()
}
}
generate_gini_plots_state<-function(units, inc_data, type="gini"){
for (i in units){
temp.df<-subset(inc_data, inc_data$State==i)
temp.MSA<-unlist(strsplit(i, "/"))[1]
filename<-paste(temp.MSA, "_", type, ".png", sep="")
if (type=="gini"){
png(file=filename)
print(ggplot(temp.df, aes(year)) +
geom_line(aes(y=Gini_StateGB2, colour="gini"))+
opts(title=i))
dev.off()
}
if (type=="top1"){
png(file=filename)
print(ggplot(temp.df, aes(year)) +
geom_line(aes(y=Top1Share, colour="Top1"))+
opts(title=i))
dev.off()
}
}
}
sqrt_eq<-function(pernum){
return(sqrt(max(pernum)))
}
topcode_sub<-function(inc, fit2){
if (inc>0){
bottom<-pgb2(inc, fit2$opt1$par[1], fit2$opt1$par[2], fit2$opt1$par[3], fit2$opt1$par[4])
return(qgb2(runif(1,min=bottom, max=1), fit2$opt1$par[1], fit2$opt1$par[2], fit2$opt1$par[3], fit2$opt1$par[4]))
}
else{
return(inc)
}
}
top1share<-function(inc){
#temp.df<-subset(data.frame(inc, weights), data.frame(inc, weights)$weights==1)
top1<-quantile(inc, probs=0.99)
return(sum(subset(inc, inc>=top1))/sum(inc))
}
ACS_trunc<-function(inc){
if (inc>0){
return(1)
}
else{
return(0)
}
}
X_split<-function(year){
return(unlist(strsplit(year, "X"))[2])
}
state_strip<-function(msa){
return(unlist(strsplit(msa, ","))[2])
}
DC_switch<-function(state){
if (state=="Washington DC"){
return("District of Columbia")
}
else{
return(state)
}
}
lorenz_point<-function(income, weights=c(rep(1, length(income))), ordinate=0.5){
n <- length(income)
L_temp<-Lc(income, n=weights)
if (ordinate*n %% 1 ==0){
return(L_temp$L[as.integer(ordinate*n)])
}
else if (ordinate*n %% 1 != 0){
return(mean(c(L_temp$L[as.integer(ordinate*n)], L_temp$L[as.integer(ordinate*n)+1])))
}
}
Census_9010fix<-function(Census_NatlGB2.df){
#must load Census.work.hh first!
for (i in 1:length(Census_NatlGB2.df$year)){
if (Census_NatlGB2.df$X9010_ratio[i]>100){
Census_NatlGB2.df$X9010_ratio[i]<-ratio9010f(subset(Census.work.hh, Census.work.hh$cellmean_equivinc>0.01 &
Census.work.hh$MSA==Census_NatlGB2.df$MSA[i] &
Census.work.hh$year==Census_NatlGB2.df$year[i])$cellmean_equivinc)
}
}
return(Census_NatlGB2.df)
}
ACS_9010fix<-function(ACS_NatlGB2.df){
#must load ACS.work.hh first!
for (i in 1:length(ACS_NatlGB2.df$year)){
if (ACS_NatlGB2.df$X9010_ratio[i]>100){
ACS_NatlGB2.df$X9010_ratio[i]<-ratio9010f(subset(ACS.work.hh, ACS.work.hh$cellmean_equivinc>0.01 &
ACS.work.hh$MSA==ACS_NatlGB2.df$MSA[i] &
ACS.work.hh$year==ACS_NatlGB2.df$year[i])$cellmean_equivinc)
}
}
return(ACS_NatlGB2.df)
}
Lorenz_test_result<-function(test_stat){
if (max(test_stat, na.rm=T)>=3.01&min(test_stat, na.rm=T)>=-3.01){
return("A dominates B")
}
if (min(test_stat, na.rm=T)<=-3.01&max(test_stat, na.rm=T)<=3.01){
return("B dominates A")
}
else if (max(test_stat, na.rm=T)<3.01&min(test_stat, na.rm=T)>-3.01){
return("No dominance")
}
if (min(test_stat, na.rm=T)<=-3.01&max(test_stat, na.rm=T)>=3.01){
return("Lorenz curves cross")
}
}
A_dom_B<-function(test_stat){
if (max(test_stat, na.rm=T)>=3.01&min(test_stat, na.rm=T)>=-3.01){
return(1)
}
else{
return(0)
}
}
B_dom_A<-function(test_stat){
if (min(test_stat, na.rm=T)<=-3.01&max(test_stat, na.rm=T)<=3.01){
return(1)
}
else{
return(0)
}
}
Lorenz_cross<-function(test_stat){
if (min(test_stat, na.rm=T)<=-3.01&max(test_stat, na.rm=T)>=3.01){
return(1)
}
else{
return(0)
}
}
lorenz_point_vector<-function(income, weights=c(rep(1, length(income))), ordinates=19){
n <- length(income)
L_temp<-Lc(income, n=weights)
lorenz_ords<-c(rep(0, ordinates))
if ((i/(ordinates+1))*n %% 1 ==0){
for (i in 1:ordinates){
lorenz_ords[i]<-L_temp$L[as.integer((i/(ordinates+1))*n)]
}
return(lorenz_ords)
}
else if ((i/(ordinates+1))*n %% 1 != 0){
for (i in 1:ordinates){
lorenz_ords[i]<-mean(c(L_temp$L[as.integer((i/(ordinates+1))*n)], L_temp$L[as.integer((i/(ordinates+1))*n)+1]))
}
return(lorenz_ords)
}
}
lorenz_var_vector<-function(income, weights=c(rep(1, length(income))), ordinates=19){
lorenz_var<-c(rep(0, ordinates))
for (i in 1:ordinates){
lorenz_var[i]<-var(boot(income, lorenz_point, 200, ordinate=(i/(ordinates+1)))$t)
}
return(lorenz_var)
}
library(ggplot2)
library(GB2)
ratio9010f<-function(inc){
return(quantile(inc, probs=0.9)/(quantile(inc,probs=0.1)+0.000001))
}
ratio95medf<-function(inc){
return(quantile(inc, probs=0.95)/(quantile(inc,probs=0.5)+0.000001))
}
ratio99medf<-function(inc){
return(quantile(inc, probs=0.99)/(quantile(inc,probs=0.5)+0.000001))
}
generate_gini_plots<-function(units, inc_data){
for (i in units){
temp.df<-subset(inc_data, inc_data$MSA==i)
temp.MSA<-unlist(strsplit(i, "/"))[1]
filename<-paste(temp.MSA, ".jpg", sep="")
jpeg(file=filename)
print(ggplot(temp.df, aes(year)) +
geom_line(aes(y=Gini_GB2, colour="gini"))+
opts(title=i))
dev.off()
}
}
generate_gini_plots_state<-function(units, inc_data, type="gini"){
for (i in units){
temp.df<-subset(inc_data, inc_data$State==i)
temp.MSA<-unlist(strsplit(i, "/"))[1]
filename<-paste(temp.MSA, "_", type, ".png", sep="")
if (type=="gini"){
png(file=filename)
print(ggplot(temp.df, aes(year)) +
geom_line(aes(y=Gini_StateGB2, colour="gini"))+
opts(title=i))
dev.off()
}
if (type=="top1"){
png(file=filename)
print(ggplot(temp.df, aes(year)) +
geom_line(aes(y=Top1Share, colour="Top1"))+
opts(title=i))
dev.off()
}
}
}
sqrt_eq<-function(pernum){
return(sqrt(max(pernum)))
}
topcode_sub<-function(inc, fit2){
if (inc>0){
bottom<-pgb2(inc, fit2$opt1$par[1], fit2$opt1$par[2], fit2$opt1$par[3], fit2$opt1$par[4])
return(qgb2(runif(1,min=bottom, max=1), fit2$opt1$par[1], fit2$opt1$par[2], fit2$opt1$par[3], fit2$opt1$par[4]))
}
else{
return(inc)
}
}
top1share<-function(inc, weights=c(rep(1, length(inc)))){
#temp.df<-subset(data.frame(inc, weights), data.frame(inc, weights)$weights==1)
top1<-quantile(inc, probs=0.99)
return(sum(subset(inc, inc>=top1))/sum(inc))
}
ACS_trunc<-function(inc){
if (inc>0){
return(1)
}
else{
return(0)
}
}
X_split<-function(year){
return(unlist(strsplit(year, "X"))[2])
}
state_strip<-function(msa){
return(unlist(strsplit(msa, ","))[2])
}
DC_switch<-function(state){
if (state=="Washington DC"){
return("District of Columbia")
}
else{
return(state)
}
}
lorenz_point<-function(income, weights=c(rep(1, length(income))), ordinate=0.5){
n <- length(income)
L_temp<-Lc(income, n=weights)
if (ordinate*n %% 1 ==0){
return(L_temp$L[as.integer(ordinate*n)])
}
else if (ordinate*n %% 1 != 0){
return(mean(c(L_temp$L[as.integer(ordinate*n)], L_temp$L[as.integer(ordinate*n)+1])))
}
}
Census_9010fix<-function(Census_NatlGB2.df){
#must load Census.work.hh first!
for (i in 1:length(Census_NatlGB2.df$year)){
if (Census_NatlGB2.df$X9010_ratio[i]>100){
Census_NatlGB2.df$X9010_ratio[i]<-ratio9010f(subset(Census.work.hh, Census.work.hh$cellmean_equivinc>0.01 &
Census.work.hh$MSA==Census_NatlGB2.df$MSA[i] &
Census.work.hh$year==Census_NatlGB2.df$year[i])$cellmean_equivinc)
}
}
return(Census_NatlGB2.df)
}
ACS_9010fix<-function(ACS_NatlGB2.df){
#must load ACS.work.hh first!
for (i in 1:length(ACS_NatlGB2.df$year)){
if (ACS_NatlGB2.df$X9010_ratio[i]>100){
ACS_NatlGB2.df$X9010_ratio[i]<-ratio9010f(subset(ACS.work.hh, ACS.work.hh$cellmean_equivinc>0.01 &
ACS.work.hh$MSA==ACS_NatlGB2.df$MSA[i] &
ACS.work.hh$year==ACS_NatlGB2.df$year[i])$cellmean_equivinc)
}
}
return(ACS_NatlGB2.df)
}
Lorenz_test_result<-function(test_stat){
if (max(test_stat, na.rm=T)>=3.01&min(test_stat, na.rm=T)>=-3.01){
return("A dominates B")
}
if (min(test_stat, na.rm=T)<=-3.01&max(test_stat, na.rm=T)<=3.01){
return("B dominates A")
}
else if (max(test_stat, na.rm=T)<3.01&min(test_stat, na.rm=T)>-3.01){
return("No dominance")
}
if (min(test_stat, na.rm=T)<=-3.01&max(test_stat, na.rm=T)>=3.01){
return("Lorenz curves cross")
}
}
A_dom_B<-function(test_stat){
if (max(test_stat, na.rm=T)>=3.01&min(test_stat, na.rm=T)>=-3.01){
return(1)
}
else{
return(0)
}
}
B_dom_A<-function(test_stat){
if (min(test_stat, na.rm=T)<=-3.01&max(test_stat, na.rm=T)<=3.01){
return(1)
}
else{
return(0)
}
}
Lorenz_cross<-function(test_stat){
if (min(test_stat, na.rm=T)<=-3.01&max(test_stat, na.rm=T)>=3.01){
return(1)
}
else{
return(0)
}
}
lorenz_point_vector<-function(income, weights=c(rep(1, length(income))), ordinates=19){
n <- length(income)
L_temp<-Lc(income, n=weights)
lorenz_ords<-c(rep(0, ordinates))
if ((i/(ordinates+1))*n %% 1 ==0){
for (i in 1:ordinates){
lorenz_ords[i]<-L_temp$L[as.integer((i/(ordinates+1))*n)]
}
return(lorenz_ords)
}
else if ((i/(ordinates+1))*n %% 1 != 0){
for (i in 1:ordinates){
lorenz_ords[i]<-mean(c(L_temp$L[as.integer((i/(ordinates+1))*n)], L_temp$L[as.integer((i/(ordinates+1))*n)+1]))
}
return(lorenz_ords)
}
}
lorenz_var_vector<-function(income, weights=c(rep(1, length(income))), ordinates=19){
lorenz_var<-c(rep(0, ordinates))
for (i in 1:ordinates){
lorenz_var[i]<-var(boot(income, lorenz_point, 200, ordinate=(i/(ordinates+1)))$t)
}
return(lorenz_var)
}
jackknife_ineq<-function(inc, statistic, weights=NULL){
#Note: this will produce jackknife estimates of variance for the inequality statistics used in the GB2 imputation.
#Requires a parallel backend to be registered first
jack <- numeric(length(inc)-1)
pseudo <- numeric(length(inc))
if (statistic=="gini"){
pseudo <- foreach (i=1:(length(inc)), .combine=c)%dopar%{
for (j in 1:(length(inc))){
if (j<i){
jack[j]<-inc[j]
}
else if (j>i){
jack[j-1]<-inc[j]
}
}
length(inc)*gini(inc) - (length(inc)-1)*gini(jack)
}
return(var(pseudo))
}
if (statistic=="theil"){
pseudo <- foreach (i=1:length(inc), .combine=c)%dopar%{
for (j in 1:(length(inc))){
if (j<i){
jack[j]<-inc[j]
}
else if (j>i){
jack[j-1]<-inc[j]
}
}
length(inc)*Theil(inc) - (length(inc)-1)*Theil(jack)
}
return(var(pseudo))
}
if (statistic=="9010"){
pseudo <- foreach (i=1:length(inc), .combine=c)%dopar%{
for (j in 1:(length(inc))){
if (j<i){
jack[j]<-inc[j]
}
else if (j>i){
jack[j-1]<-inc[j]
}
}
length(inc)*ratio9010f(inc) - (length(inc)-1)*ratio9010f(jack)
}
return(var(pseudo))
}
if (statistic=="top1"){
pseudo <- foreach (i=1:length(inc), .combine=c)%dopar%{
for (j in 1:(length(inc))){
if (j<i){
jack[j]<-inc[j]
}
else if (j>i){
jack[j-1]<-inc[j]
}
}
length(inc)*top1share(inc) - (length(inc)-1)*top1share(jack)
}
return(var(pseudo))
}
if (statistic=="gini_w"){
weight_j<-numeric(length(inc)-1)
pseudo <- foreach (i=1:(length(inc)), .combine=c)%dopar%{
for (j in 1:(length(inc))){
if (j<i){
jack[j]<-inc[j]
weight_j[j]<-weights[j]
}
else if (j>i){
jack[j-1]<-inc[j]
weight_j[j-1]<-weights[j]
}
}
length(inc)*gini(inc, w=weights) - (length(inc)-1)*gini(jack, w=weight_j)
}
return(var(pseudo))
}
}
boot9010<-function(data, indices){
d<-data[indices]
return(quantile(d, probs=0.9)/quantile(d, probs=0.1))
}
boottop1<-function(data, indices){
d<-data[indices]
perc99<-quantile(d, probs=0.99)
return(sum(subset(d, d>=perc99))/sum(d))
}
Is_positive<-function(num){
if (is.na(num)==T){
return(NA)
}
else if (num>0){
return(1)
}
else{
return(0)
}
}
S_phi <- function(Lc1, Lc2, max_n){
#Use a grid of 0.1 times the largest LC vector (500 for now)
#max_size<-max(c(length(Lc1$p), length(Lc2$p)))
max_size<-max_n
grid <- seq(0+1/(max_size), 1-1/(max_size), 1/(max_size))
phi <- numeric(length(grid))
for (i in 1:length(grid)){
phi[i]<-Lc2$L[as.integer(grid[i]*length(Lc2$L))]-Lc1$L[as.integer(grid[i]*length(Lc1$L))]
}
return(phi)
}
I_phi <- function(Lc1, Lc2, max_n, min_fallback){
max_size<-min(max_n, min_fallback)
grid <- seq(0+1/(max_size), 1-1/(max_size), 1/(max_size))
phi <- numeric(length(grid))
phi<-Lc2$L[as.integer(grid*length(Lc2$L))]-Lc1$L[as.integer(grid*length(Lc1$L))]
phi<- apply(data.frame(phi), 1, Is_positive)
return((1/(max_size))*sum(phi))
}
I_phi_star<-function(Lc1_star, Lc2_star, Lc1, Lc2, max_n, min_fallback){
max_size<-min(max_n, min_fallback)
grid <- seq(0+1/(max_size), 1-1/(max_size), 1/(max_size))
phi<-(Lc2_star$L[as.integer(grid*length(Lc2_star$L))]-Lc1_star$L[as.integer(grid*length(Lc1_star$L))]) - (Lc2$L[as.integer(grid*length(Lc2$L))]-Lc1$L[as.integer(grid*length(Lc1$L))])
phi<- apply(data.frame(phi), 1, Is_positive)
return((1/(max_size))*sum(phi))
}
S_phi_bootstrap<-function(pop1, pop2, reps, gridfactor){
max_n<-gridfactor*min(c(length(pop1),length(pop2)))
pop_phi<-S_phi(Lc(pop1), Lc(pop2), max_n)
T_n <- (as.numeric(length(pop1))*as.numeric(length(pop2)))/(as.numeric(length(pop1))+as.numeric(length(pop2)))
phi_hat <- foreach(i=1:reps, .combine=c)%do%{
T_n * max(S_phi(Lc(sample(pop1, replace=T)), Lc(sample(pop2, replace=T)), max_n)-pop_phi)-T_n*max(pop_phi)
}
phi_hat<-apply(data.frame(phi_hat), 1, Is_positive)
# phi_hat<-numeric(reps)
# for (i in 1:reps){
# phi_hat[i]<-T_n * max(S_phi(Lc(sample(pop1, replace=T)), Lc(sample(pop2, replace=T)), max_n)-pop_phi)
# }
return(sum(phi_hat, na.rm=T))
#return(phi_hat)
#return(ecdf(phi_hat)(T_n*max(pop_phi)))
}
I_phi_bootstrap<-function(pop1, pop2, reps, gridfactor, min_fallback){
max_n<-gridfactor*min(c(length(pop1),length(pop2)))
pop_phi<-I_phi(Lc(pop1), Lc(pop2), max_n, min_fallback)
T_n <- (as.numeric(length(pop1))*as.numeric(length(pop2)))/(as.numeric(length(pop1))+as.numeric(length(pop2)))
phi_hat <- foreach(i=1:reps, .combine=c)%do%{
T_n * I_phi_star(Lc(sample(pop1, replace=T)), Lc(sample(pop2, replace=T)), Lc(pop1), Lc(pop2), max_n, min_fallback)-T_n*pop_phi
}
phi_hat<-apply(data.frame(phi_hat), 1, Is_positive)
# phi_hat<-numeric(reps)
# for (i in 1:reps){
# phi_hat[i]<-T_n * max(S_phi(Lc(sample(pop1, replace=T)), Lc(sample(pop2, replace=T)), max_n)-pop_phi)
# }
return(sum(phi_hat, na.rm=T))
#return(phi_hat)
#return(ecdf(phi_hat)(T_n*max(pop_phi)))
}
phi_bootstrap<-function(pop1, pop2, reps){
max_n<-min(c(length(pop1),length(pop2)))
pop_phi<-I_phi(Lc(pop1), Lc(pop2), max_n)
T_n <- (as.numeric(length(pop1))*as.numeric(length(pop2)))/(as.numeric(length(pop1))+as.numeric(length(pop2)))
phi_hat <- foreach(i=1:reps, .combine=c)%do%{
T_n * I_phi_star(Lc(sample(pop1, replace=T)), Lc(sample(pop2, replace=T)), Lc(pop1), Lc(pop2), max_n)-T_n*pop_phi
}
phi_hat<-apply(data.frame(phi_hat), 1, Is_positive)
# phi_hat<-numeric(reps)
# for (i in 1:reps){
# phi_hat[i]<-T_n * max(S_phi(Lc(sample(pop1, replace=T)), Lc(sample(pop2, replace=T)), max_n)-pop_phi)
# }
return(sum(phi_hat, na.rm=T))
#return(phi_hat)
#return(ecdf(phi_hat)(T_n*max(pop_phi)))
}
davidson_gini<-function(x, type="point"){
y_i<-sort(x)
y_args<-1:length(y_i)
w_i <- ((2*y_args-1)*y_i)/(2*length(y_i))
v_i <- apply(data.frame(y_args), 1, function(x) (1/length(y_i))*sum(y_i[1:x]))
I_hat <- mean(w_i)
G_hat <- length(y_i)*((2*I_hat/mean(y_i))-1)/(length(y_i)-1)
if (type=="point"){
return(G_hat)
}
else if (type=="var"){
Z_hat <- 2*(w_i - v_i)-(G_hat + 1)*y_i
var_G <- sum((Z_hat - mean(Z_hat))^2)/((length(y_i)*mean(y_i))^2)
return(var_G)
}
else if (type=="both"){
Z_hat <- 2*(w_i - v_i)-(G_hat + 1)*y_i
var_G <- sum((Z_hat - mean(Z_hat))^2)/((length(y_i)*mean(y_i))^2)
return(c(G_hat,var_G))
}
}
Lorenz_KB<-function(inc, weight="default", ordinate, type="mean"){
if (weight=="default"){
weight<-rep(1, length(inc))
}
#K&B survey sampling weights not implemented yet.
N <- length(inc)
xi_p <- quantile(inc, probs=ordinate)
N_hat<-sum(weight)
mu_hat <- mean(inc)
I_vec<-apply(data.frame(inc),1,function(x) if (x<=xi_p){1} else{0})
L_hat<-(1/(N_hat*mu_hat))*sum(weight*inc*I_vec)
if (type=="mean"){
return(L_hat)
}
else if (type == "variance"){
u_i <- (1/(N_hat*mu_hat))*((inc-xi_p)*I_vec + ordinate*xi_p - inc*L_hat)
var_hat <- N*var(u_i)*(sum(weight^2))
return(var_hat)
}
}
|
db8b0c88af9702389563c74d74c958e21b443426
|
94b3ae0be487a1824ee98c5bf6c8cd81e9f0bd18
|
/tests/test-all.R
|
cd055b97466f0f46e8910dcbb231802afc6af603
|
[] |
no_license
|
cran/rticles
|
4e50100f04289440b1ef12544b393ecb74e5a8f2
|
2cd04a5ef4698a9ae352b97250b55ac220cb2741
|
refs/heads/master
| 2023-06-22T06:23:58.674846
| 2023-05-15T12:30:02
| 2023-05-15T12:30:02
| 54,406,556
| 0
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 40
|
r
|
test-all.R
|
library(testit)
test_pkg("rticles")
|
d7763edd041a928fcf25f75c53ec8879f3104ff4
|
d4918568929a592a40ee705dc91614be17603c2c
|
/man/num2imp.Rd
|
4dc03a5eab05998db518dffc8a7c6a3087c2052d
|
[] |
no_license
|
kevin05jan/iop
|
d722f6c8520cd457872f9a4f2d83294e1a3dc675
|
8a8b391976982985f1cfe66535d58a1606d4099b
|
refs/heads/master
| 2020-08-02T14:25:27.256169
| 2019-10-19T09:43:06
| 2019-10-19T09:43:06
| 211,387,761
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 625
|
rd
|
num2imp.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/tmp.R
\name{num2imp}
\alias{num2imp}
\title{Feature selection using randomForest}
\usage{
num2imp(x, y, thresh = 0.5, ntree = 10)
}
\arguments{
\item{x}{: data.frame with numerical variables and class label}
\item{thresh}{: numerical; selects variables with importance >= thresh}
\item{ntree}{: integer; number of trees}
\item{f}{: formula}
}
\value{
num2imp object
}
\description{
Features are selected on the basis of variable$importance
}
\examples{
m = num2imp(Species ~ ., iris)
dat = predict(m, iris)
}
|
e3c71d1c2a0a635d0e0aecdcfebd5ec84459e868
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/queueing/examples/Inputs.o_MMInf.Rd.R
|
2d9edaaaff07e953c574de50bd117ed8bfb60c45
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 364
|
r
|
Inputs.o_MMInf.Rd.R
|
library(queueing)
### Name: Inputs.o_MMInf
### Title: Returns the input parameters of a M/M/Infinite queueing model
### Aliases: Inputs.o_MMInf
### Keywords: M/M/Infinite
### ** Examples
## create input parameters
i_mminf <- NewInput.MMInf(lambda=0.25, mu=4, n=0)
## Build the model
o_mminf <- QueueingModel(i_mminf)
## Retunns the Inputs
Inputs(o_mminf)
|
d2fe920bab89262766748d07695d1c10448df27b
|
d60a4a66919a8c54d29a4677574b418107b4131d
|
/man/perwinba.Rd
|
7bf32ab0f33c76e0b648614649ca761f0f8f59fe
|
[] |
no_license
|
cran/tsapp
|
65203e21a255e832f0ad9471f9ee308793eb7983
|
f2679a3d5ee0e3956a4ba013b7879324f77cf95f
|
refs/heads/master
| 2021-11-12T21:18:18.835475
| 2021-10-30T10:30:02
| 2021-10-30T10:30:02
| 248,760,597
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 540
|
rd
|
perwinba.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/frequdom.r
\name{perwinba}
\alias{perwinba}
\title{\code{perwinba} Bartlett-Priestley window for direct spectral estimation}
\usage{
perwinba(e, n)
}
\arguments{
\item{e}{equal bandwidth (at most n frequencies are used for averaging)}
\item{n}{length of time series}
}
\value{
w weights (symmetric)
}
\description{
\code{perwinba} Bartlett-Priestley window for direct spectral estimation
}
\examples{
data(WHORMONE)
w <- perwinba(0.1,length(WHORMONE))
}
|
924992d1e101c40e659e758d35135b1d4ad643b6
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/ade4/examples/fruits.Rd.R
|
fefc9bd40c4e3a04577a9d0eec9089759e903021
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 815
|
r
|
fruits.Rd.R
|
library(ade4)
### Name: fruits
### Title: Pair of Tables
### Aliases: fruits
### Keywords: datasets
### ** Examples
data(fruits)
pcajug <- dudi.pca(fruits$jug, scann = FALSE)
pcavar <- dudi.pca(fruits$var, scann = FALSE)
if(adegraphicsLoaded()) {
g1 <- s.corcircle(pcajug$co, plot = FALSE)
g2 <- s.class(pcajug$li, fac = fruits$type, plot = FALSE)
g3 <- s.corcircle(pcavar$co, plot = FALSE)
g4 <- s.class(pcavar$li, fac = fruits$type, plot = FALSE)
G1 <- ADEgS(list(g1, g2, g3, g4), layout = c(2, 2))
G2 <- plot(coinertia(pcajug, pcavar, scan = FALSE))
} else {
par(mfrow = c(2,2))
s.corcircle(pcajug$co)
s.class(pcajug$li, fac = fruits$type)
s.corcircle(pcavar$co)
s.class(pcavar$li, fac = fruits$type)
par(mfrow = c(1,1))
plot(coinertia(pcajug, pcavar, scan = FALSE))
}
|
03a4a11bebdc9c6944abd9ff0b088ba3b101847c
|
fb2136663e6986a9e04abea2891863888bd27c57
|
/man/dummy.code.lab.Rd
|
3a5aaf6f2b177bee021399e8b766b362d229b04f
|
[] |
no_license
|
stevepowell99/omnivr
|
192f1b0660a711bfa42e85259e7bb53ba090f3a3
|
122404abef49a6588d07adbf33cf31eaa110e8b9
|
refs/heads/master
| 2016-08-06T22:50:20.472001
| 2014-11-13T12:49:55
| 2014-11-13T12:49:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 568
|
rd
|
dummy.code.lab.Rd
|
\name{dummy.code.lab}
\alias{dummy.code.lab}
\title{Same as psych::dummy.code but with labels}
\usage{
dummy.code.lab(x)
}
\arguments{
\item{x}{nominal dataset}
}
\value{
labelled dataset
}
\description{
Same as psych::dummy.code but with labels
}
\examples{
Here are some examples
}
\seealso{
Other experimental: \code{\link{findr}};
\code{\link{ggheat}}; \code{\link{loopr2}};
\code{\link{loopr}}; \code{\link{omnicor}};
\code{\link{otrans_inner}}; \code{\link{outerF}};
\code{\link{pseudocor}}; \code{\link{sigHeat}};
\code{\link{writeprop}}; \code{\link{xt}}
}
|
279ba1e348fd8903b150fa69feff090109d0c1a1
|
d837a6b0beaede292aa430adbeb38d7ccc1a3363
|
/man/ftpDownload.Rd
|
85a5d61963878553b6ccd91912bf2b24d7b7cc84
|
[] |
no_license
|
jameslhao/dbgapr_dev
|
0503d740e4ffb394426638be31da609fb8f7f950
|
4d67dff0b4bd8dbda0bc74c6ca26362e93619f55
|
refs/heads/master
| 2020-12-30T13:09:21.487245
| 2017-08-15T12:20:00
| 2017-08-15T12:20:00
| 91,335,051
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,864
|
rd
|
ftpDownload.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/commons_public.R
\docType{methods}
\name{ftpDownload}
\alias{ftpDownload}
\alias{ftpDownload,Commons-method}
\title{Download supplemental data files from dbGaP ftp}
\usage{
ftpDownload(object, ...)
\S4method{ftpDownload}{Commons}(object, ..., phsAcc = "",
dataStudyOnly = TRUE, overwrite = FALSE)
}
\arguments{
\item{object}{Commons class object}
\item{...}{There are optional arguments.}
\item{phsAcc}{a character string. The dbGaP study accession. Only the files of the given study are downloaded when a study accession is provided.}
\item{dataStudyOnly}{a logical value. When TRUE (default), only downloads the dataset and variable metadata of the stdudies that have data files in the project directory. When FALSE, downloads the dataset and variable metadata of all dbGaP released studies, regardless the actual phenotype data files of the studies are downloaded or not.}
\item{overwrite}{a logical value. When TRUE, downloads the supplemental metadata files if they aren't already downloaded. When FALSE (default, downloads the metadata files even the they already exist.}
}
\value{
a logical value. If TRUE, all files are downloaded OK. If FALSE, there is at least one failed download.
}
\description{
The method downloads the supplemental metadata files from the dbGaP ftp site. It is called by the \code{\link{prepareData}} function.
}
\section{Methods (by class)}{
\itemize{
\item \code{Commons}: A method of class Commons
}}
\examples{
\dontrun{
c <- Commons()
ftpDownload(c)
# or
ftpDownload(c, phsAcc = 'phs000001.v3.p1')
ftpDownload(c, dataStudyOnly=FALSE)
# or to include the data meta-info of all the studies released in dbGaP.
# Note: This step may take serveral hours.
ftpDownload(c, dataStudyOnly = FALSE)
}
}
|
aa37634523710960b6ea9e6495301adaa23eb510
|
1c5f4e06ec247883606011e82cee318be87112fc
|
/man/search_seed.Rd
|
ecf79e374984930187647fa15dbbd5e3edb4e201
|
[
"MIT"
] |
permissive
|
Hide-Fun/metabarcode
|
888228f867fcc905446ed77e042c06e966b748d2
|
a9866d300dae4796188e6ea5fb3b5ea9e47eb717
|
refs/heads/master
| 2023-03-06T12:05:25.724825
| 2021-02-22T01:33:22
| 2021-02-22T01:33:22
| 237,951,173
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 407
|
rd
|
search_seed.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/search_seed.R
\name{search_seed}
\alias{search_seed}
\title{search seed}
\usage{
search_seed(n = 100, .data, k = 2, trymax = 1000, maxit = 1000, ...)
}
\arguments{
\item{n}{number of seed}
\item{.data}{data}
\item{k}{dimention}
\item{trymax}{trymax}
\item{maxit}{maxit}
\item{...}{see metaMDS}
}
\description{
search seed
}
|
9666461759e3a249d0e4a83dd00bd40f74c0e918
|
c2ace3da6f7d0d46ada5b8e3a13e236f8dd83ec3
|
/unsupervised_modeling/watershed_score.R
|
ec4db3f5e917ee91999aa03d20b830c39033caf3
|
[] |
no_license
|
BennyStrobes/gtex_v8_rare_splice
|
9c028d5cec442f7180d5c442be67ae9100a175d1
|
c1d77b6a367cadc5d0020c55b868dd9b4961cf69
|
refs/heads/master
| 2020-04-08T13:37:04.811893
| 2020-02-28T14:04:06
| 2020-02-28T14:04:06
| 159,398,828
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 56,405
|
r
|
watershed_score.R
|
args = commandArgs(trailingOnly=TRUE)
source("watershed.R")
library(cowplot)
library(RColorBrewer)
library(ggplot2)
library(Biobase)
initialize_phi<- function(num_bins,dim) {
phi_outlier <- matrix(1,dim,num_bins)
phi_inlier <- matrix(1,dim,num_bins)
phi_inlier[,1] = .8
phi_inlier[,2] = .1
phi_inlier[,3] = .1
phi_outlier[,1] = .01
phi_outlier[,2] = .29
phi_outlier[,3] = .7
####################
# Total expression
####################
phi_inlier[2,1] = .05
phi_inlier[2,2] = .9
phi_inlier[2,3] = .05
phi_outlier[2,1] = .49
phi_outlier[2,2] = .02
phi_outlier[2,3] = .49
phi_init <- list(inlier_component = phi_inlier, outlier_component = phi_outlier)
return(phi_init)
}
#######################################
# Extract ROC curves and precision recall curves for test set (in each dimension seperately) using:
#### 1. Watershed
#### 2. GAM
#### 3. RNA-only
#######################################
compute_roc_across_dimensions <- function(number_of_dimensions, dimension_labels, posterior_prob_test, real_valued_outliers_test1, gam_posteriors, binary_outliers_test2) {
roc_object_across_dimensions <- list()
# Loop through dimensions
for (dimension in 1:number_of_dimensions) {
# Name of dimension
dimension_name <- strsplit(dimension_labels[dimension],"_pval")[[1]][1]
# Pseudo gold standard
test_outlier_status <- binary_outliers_test2[,dimension]
# river predictions
# roc_obj <- roc(test_outlier_status, posterior_prob_test[,dimension])
roc_obj <- roc.curve(scores.class0 = posterior_prob_test[,dimension][test_outlier_status==1], scores.class1 = posterior_prob_test[,dimension][test_outlier_status==0], curve = T)
pr_obj <- pr.curve(scores.class0 = posterior_prob_test[,dimension][test_outlier_status==1], scores.class1 = posterior_prob_test[,dimension][test_outlier_status==0], curve = T)
# Predictions with only RNA
#rna_only_roc_obj <- roc(test_outlier_status, real_valued_outliers_test1[,dimension])
rna_only_roc_obj <- roc.curve(scores.class0 = real_valued_outliers_test1[,dimension][test_outlier_status==1], scores.class1 = real_valued_outliers_test1[,dimension][test_outlier_status==0], curve = T)
rna_only_pr_obj <- pr.curve(scores.class0 = real_valued_outliers_test1[,dimension][test_outlier_status==1], scores.class1 = real_valued_outliers_test1[,dimension][test_outlier_status==0], curve = T)
# predictions with only genomic annotations
#gam_roc_obj <- roc(test_outlier_status, gam_posteriors[,dimension])
gam_roc_obj <- roc.curve(scores.class0 = gam_posteriors[,dimension][test_outlier_status==1], scores.class1 = gam_posteriors[,dimension][test_outlier_status==0], curve = T)
gam_pr_obj <- pr.curve(scores.class0 = gam_posteriors[,dimension][test_outlier_status==1], scores.class1 = gam_posteriors[,dimension][test_outlier_status==0], curve = T)
evaROC <-
list(watershed_sens=roc_obj$curve[,2],
watershed_spec=1-roc_obj$curve[,1],
watershed_auc=roc_obj$auc,
watershed_pr_auc=pr_obj$auc.integral,
watershed_recall=pr_obj$curve[,1],
watershed_precision=pr_obj$curve[,2],
GAM_sens=gam_roc_obj$curve[,2],
GAM_spec=1-gam_roc_obj$curve[,1],
GAM_auc=gam_roc_obj$auc,
GAM_pr_auc=gam_pr_obj$auc.integral,
GAM_recall=gam_pr_obj$curve[,1],
GAM_precision=gam_pr_obj$curve[,2],
rna_only_pr_auc=rna_only_pr_obj$auc.integral,
rna_only_recall=rna_only_pr_obj$curve[,1],
rna_only_precision=rna_only_pr_obj$curve[,2],
rna_only_sens=rna_only_roc_obj$curve[,2],
rna_only_spec=1-rna_only_roc_obj$curve[,1],
rna_only_auc=rna_only_roc_obj$auc)
roc_object_across_dimensions[[dimension]] <- list(name=dimension_name, evaROC=evaROC)
}
return(roc_object_across_dimensions)
}
extract_pairwise_observed_labels <- function(binary_outliers_train) {
nrow <- dim(binary_outliers_train)[1]
num_dim <- dim(binary_outliers_train)[2]
pairwise_mat <- matrix(0, nrow, choose(number_of_dimensions, 2))
dimension_counter <- 1
for (dim1 in 1:num_dim) {
for (dim2 in dim1:num_dim) {
if (dim1 != dim2) {
pairwise_mat[,dimension_counter] <- binary_outliers_train[,dim1]*binary_outliers_train[,dim2]
dimension_counter <- dimension_counter + 1
}
}
}
return(pairwise_mat)
}
initialize_genomic_annotation_variables <- function(number_of_features, number_of_dimensions, independent_variables) {
if (independent_variables == "true") {
pair_value = 0
theta_pair = matrix(pair_value,1, choose(number_of_dimensions, 2))
} else if (independent_variables == "false") {
pair_value = 1e-7
theta_pair = matrix(pair_value,1, choose(number_of_dimensions, 2))
} else if (independent_variables == "false_geno") {
pair_value = 1e-7
theta_pair = matrix(0, number_of_features+1, choose(number_of_dimensions, 2))
theta_pair[1,] <- numeric(choose(number_of_dimensions, 2)) + pair_value
}
beta_init = matrix(0,number_of_features+1, number_of_dimensions)
theta_singleton = beta_init[1,]
theta = beta_init[2:(number_of_features + 1),]
# Initialize vector
x <- c()
# Add theta_singleton (intercepts)
x <- c(x, theta_singleton)
# Add theta for each dimension (betas)
for (dimension in 1:number_of_dimensions) {
x <- c(x, theta[, dimension])
}
# Add theta_pair (edges between unobserved nodes)
for (row_number in 1:(dim(theta_pair)[1])) {
x <- c(x, theta_pair[row_number,])
}
return(x)
}
genomic_annotation_model_cv <- function(feat_train, binary_outliers_train, nfolds, costs, independent_variables) {
number_of_dimensions <- dim(binary_outliers_train)[2]
number_of_features <- dim(feat_train)[2]
gradient_variable_vec <- initialize_genomic_annotation_variables(number_of_features, number_of_dimensions, independent_variables)
if (independent_variables == "true") {
pair_value = 0
theta_pair = matrix(pair_value,1, choose(number_of_dimensions, 2))
} else if (independent_variables == "false") {
pair_value = 1e-7
theta_pair = matrix(pair_value,1, choose(number_of_dimensions, 2))
} else if (independent_variables == "false_geno") {
pair_value = 1e-7
theta_pair = matrix(0, number_of_features+1, choose(number_of_dimensions, 2))
theta_pair[1,] <- numeric(choose(number_of_dimensions, 2)) + pair_value
}
# Create GAM parameters section
beta_init = matrix(0,number_of_features+1, number_of_dimensions)
theta_singleton = beta_init[1,]
theta = beta_init[2:(number_of_features + 1),]
gam_parameters = list(theta_pair=theta_pair, theta_singleton=theta_singleton, theta=theta)
phi_placeholder <- initialize_phi(3, number_of_dimensions)
pairwise_binary_outliers_train <- extract_pairwise_observed_labels(binary_outliers_train)
#Randomly shuffle the data
random_shuffling_indices <- sample(nrow(feat_train))
feat_train_shuff <- feat_train[random_shuffling_indices,]
binary_outliers_train_shuff <- binary_outliers_train[random_shuffling_indices,]
pairwise_binary_outliers_train_shuff <- pairwise_binary_outliers_train[random_shuffling_indices,]
#Create nfolds equally size folds
folds <- cut(seq(1,nrow(feat_train_shuff)),breaks=nfolds,labels=FALSE)
avg_aucs <- c()
for (cost_iter in 1:length(costs)) {
lambda <- costs[cost_iter]
#Perform nfolds-fold cross validation
aucs <- c()
for(i in 1:nfolds){
#Segement your data by fold using the which() function
testIndexes <- which(folds==i,arr.ind=TRUE)
feat_test_fold <- feat_train_shuff[testIndexes,]
outliers_test_fold <- binary_outliers_train_shuff[testIndexes,]
pairwise_outliers_test_fold <- pairwise_binary_outliers_train_shuff[testIndexes,]
feat_train_fold <- feat_train_shuff[-testIndexes,]
outliers_train_fold <- binary_outliers_train_shuff[-testIndexes,]
pairwise_outliers_train_fold <- pairwise_binary_outliers_train_shuff[-testIndexes,]
# num_grad <- grad(compute_exact_crf_likelihood_for_lbfgs, gradient_variable_vec, feat=feat_test_fold, discrete_outliers=outliers_test_fold, posterior=outliers_test_fold, posterior_pairwise=pairwise_outliers_test_fold, phi=phi_placeholder, lambda=lambda, lambda_pair=0, lambda_singleton=0, independent_variables=independent_variables)
# actual_grad <- compute_exact_crf_gradient_for_lbfgs(gradient_variable_vec, feat=feat_test_fold, discrete_outliers=outliers_test_fold, posterior=outliers_test_fold, posterior_pairwise=pairwise_outliers_test_fold, phi=phi_placeholder, lambda=lambda, lambda_pair=0, lambda_singleton=0, independent_variables=independent_variables)
lbfgs_output <- lbfgs(compute_exact_crf_likelihood_for_lbfgs, compute_exact_crf_gradient_for_lbfgs, gradient_variable_vec, feat=feat_train_fold, discrete_outliers=outliers_train_fold, posterior=outliers_train_fold, posterior_pairwise=pairwise_outliers_train_fold, phi=phi_placeholder, lambda=lambda, lambda_pair=0, lambda_singleton=0, independent_variables=independent_variables,invisible=1)
# Check to make sure LBFGS converged OK
if (lbfgs_output$convergence != 0) {
print(paste0("LBFGS optimazation on CRF did not converge. It reported convergence error of: ", lbfgs_output$convergence))
print(lbfgs_output$message)
}
# Get optimized crf coefficients back into model_params format
gam_parameters$theta_singleton <- lbfgs_output$par[1:number_of_dimensions]
for (dimension in 1:number_of_dimensions) {
gam_parameters$theta[,dimension] <- lbfgs_output$par[(number_of_dimensions + 1 + ncol(feat_train)*(dimension-1)):(number_of_dimensions + ncol(feat_train)*(dimension))]
}
#gam_parameters$theta_pair[1,] <- lbfgs_output$par[(number_of_dimensions + (number_of_dimensions*ncol(feat_train)) + 1):length(lbfgs_output$par)]
#theta_pair[1,] <- x[(number_of_dimensions + (number_of_dimensions*num_genomic_features) + 1):length(x)]
gam_parameters$theta_pair <- matrix(lbfgs_output$par[(number_of_dimensions + (number_of_dimensions*ncol(feat_train)) + 1):length(lbfgs_output$par)], ncol=choose(number_of_dimensions, 2),byrow=TRUE)
# Compute expected value of the CRFs (mu)
mu_list_test <- update_marginal_probabilities_exact_inference_cpp(feat_test_fold, outliers_test_fold, gam_parameters$theta_singleton, gam_parameters$theta_pair, gam_parameters$theta, phi_placeholder$inlier_component, phi_placeholder$outlier_component, number_of_dimensions, choose(number_of_dimensions, 2), FALSE)
mu_test <- mu_list_test$probability
# Compute roc
test_predictions <- as.vector(mu_test)
test_labels <- as.vector(outliers_test_fold)
roc_obj <- roc.curve(scores.class0 = test_predictions[test_labels==1], scores.class1 = test_predictions[test_labels==0], curve = T)
auc <- roc_obj$auc
aucs <- c(aucs, auc)
}
avg_aucs <- c(avg_aucs, mean(aucs))
}
# Get best (one with highest avg auc across folds) lambda
print(avg_aucs)
best_index <- which(avg_aucs==max(avg_aucs))
best_lambda <- costs[best_index]
# Using best lambda, recompute GAM
lbfgs_output <- lbfgs(compute_exact_crf_likelihood_for_lbfgs, compute_exact_crf_gradient_for_lbfgs, gradient_variable_vec, feat=feat_train_shuff, discrete_outliers=binary_outliers_train_shuff, posterior=binary_outliers_train_shuff, posterior_pairwise=pairwise_binary_outliers_train_shuff, phi=phi_placeholder, lambda=best_lambda, lambda_pair=0, lambda_singleton=0, independent_variables=independent_variables,invisible=1)
# Get optimized crf coefficients back into model_params format
gam_parameters$theta_singleton <- lbfgs_output$par[1:number_of_dimensions]
for (dimension in 1:number_of_dimensions) {
gam_parameters$theta[,dimension] <- lbfgs_output$par[(number_of_dimensions + 1 + ncol(feat_train)*(dimension-1)):(number_of_dimensions + ncol(feat_train)*(dimension))]
}
#gam_parameters$theta_pair[1,] <- lbfgs_output$par[(number_of_dimensions + (number_of_dimensions*ncol(feat_train)) + 1):length(lbfgs_output$par)]
gam_parameters$theta_pair <- matrix(lbfgs_output$par[(number_of_dimensions + (number_of_dimensions*ncol(feat_train)) + 1):length(lbfgs_output$par)], ncol=choose(number_of_dimensions, 2),byrow=TRUE)
return(list(lambda=best_lambda, gam_parameters=gam_parameters))
}
fully_observed_gam_outlier_scatterplot_comparison_in_one_dimension <- function(df, outlier_type) {
#PLOT!
scatter <- ggplot(df,aes(coloring,gam)) + geom_point(size=.1,aes(colour=watershed)) + theme(text = element_text(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black"))
scatter <- scatter + scale_color_gradient(low="pink",high="blue")
scatter <- scatter + labs(colour="Watershed posterior",x = "mean[-log10(outlier pvalue)]", y = "GAM posterior", title=outlier_type)
scatter <- scatter + theme(legend.position="right")
return(scatter)
}
# Limit to fully observed cases
# Have one plot per outlier type showing:
### x-axis median outlier pvalue across observed tissues
### Y-axis GAM posterior
### Colored by watershed posterior
########################################
fully_observed_gam_outlier_scatterplot_comparison_colored_by_watershed <- function(data, output_file) {
options(bitmapType = 'cairo', device = 'pdf')
# Remove instances that do not have all 3 expression signals
fully_observed_indices <- !is.nan(data$splicing_outlier_pvalue) & !is.nan(data$total_expression_outlier_pvalue) & !is.nan(data$ase_outlier_pvalue)
fully_observed_data <- data[fully_observed_indices,]
# Remove directionality/sign from total expression pvalues
fully_observed_data$total_expression_outlier_pvalue <- abs(fully_observed_data$total_expression_outlier_pvalue)
# Compute mean outlier score (acrosss all 3 outlier classes)
outlier_means <- rowMeans(-log10(1e-6 + fully_observed_data[,2:4]))
# Put each outlier class into it own neat data frame
splicing_df <- data.frame(gam=fully_observed_data$splicing_gam_crf_posterior,watershed=fully_observed_data$splicing_watershed_posterior, coloring=outlier_means)
te_df <- data.frame(gam=fully_observed_data$total_expression_gam_crf_posterior,watershed=fully_observed_data$total_expression_watershed_posterior, coloring=outlier_means)
ase_df <- data.frame(gam=fully_observed_data$ase_gam_crf_posterior,watershed=fully_observed_data$ase_watershed_posterior, coloring=outlier_means)
splicing_plot <- fully_observed_gam_outlier_scatterplot_comparison_in_one_dimension(splicing_df, "splice")
te_plot <- fully_observed_gam_outlier_scatterplot_comparison_in_one_dimension(te_df, "total expression")
ase_plot <- fully_observed_gam_outlier_scatterplot_comparison_in_one_dimension(ase_df, "ase")
combined_plot <- plot_grid(ase_plot, splicing_plot, te_plot, ncol=1)
ggsave(combined_plot, file=output_file, width=19,height=20,units="cm")
}
fully_observed_watershed_river_scatterplot_comparison_in_one_dimension <- function(df, outlier_type, coloring_label) {
spearman_rho = cor(df$river, df$watershed, method="spearman")
#PLOT!
scatter <- ggplot(df,aes(river,watershed)) + geom_point(size=.1,aes(colour=coloring)) + theme(text = element_text(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black"))
scatter <- scatter + scale_color_gradient(low="pink",high="blue")
scatter <- scatter + labs(colour=coloring_label,x = "RIVER posterior", y = "Watershed posterior", title=paste0(outlier_type, " / spearman rho: ", round(spearman_rho,digits=2)))
scatter <- scatter + theme(legend.position="right")
return(scatter)
}
fully_observed_watershed_river_scatterplot_comparison_colored_by_median_outlier_score <- function(data, output_file) {
options(bitmapType = 'cairo', device = 'pdf')
# Remove instances that do not have all 3 expression signals
fully_observed_indices <- !is.nan(data$splicing_outlier_pvalue) & !is.nan(data$total_expression_outlier_pvalue) & !is.nan(data$ase_outlier_pvalue)
fully_observed_data <- data[fully_observed_indices,]
# Remove directionality/sign from total expression pvalues
fully_observed_data$total_expression_outlier_pvalue <- abs(fully_observed_data$total_expression_outlier_pvalue)
# Compute mean outlier score (acrosss all 3 outlier classes)
outlier_means <- rowMeans(-log10(1e-6 + fully_observed_data[,2:4]))
# Put each outlier class into it own neat data frame
splicing_df <- data.frame(river=fully_observed_data$splicing_river_posterior,watershed=fully_observed_data$splicing_watershed_posterior, coloring=outlier_means)
te_df <- data.frame(river=fully_observed_data$total_expression_river_posterior,watershed=fully_observed_data$total_expression_watershed_posterior, coloring=outlier_means)
ase_df <- data.frame(river=fully_observed_data$ase_river_posterior,watershed=fully_observed_data$ase_watershed_posterior, coloring=outlier_means)
splicing_plot <- fully_observed_watershed_river_scatterplot_comparison_in_one_dimension(splicing_df, "splice", "mean[-log10(pvalue)]")
te_plot <- fully_observed_watershed_river_scatterplot_comparison_in_one_dimension(te_df, "total expression", "mean[-log10(pvalue)]")
ase_plot <- fully_observed_watershed_river_scatterplot_comparison_in_one_dimension(ase_df, "ase", "mean[-log10(pvalue)]")
combined_plot <- plot_grid(ase_plot, splicing_plot, te_plot, ncol=1)
ggsave(combined_plot, file=output_file, width=19,height=20,units="cm")
}
fully_observed_watershed_river_scatterplot_comparison_colored_by_average_outlier_score <- function(data, output_file) {
options(bitmapType = 'cairo', device = 'pdf')
# Remove instances that do not have all 3 expression signals
fully_observed_indices <- !is.nan(data$splicing_outlier_pvalue) & !is.nan(data$total_expression_outlier_pvalue) & !is.nan(data$ase_outlier_pvalue)
fully_observed_data <- data[fully_observed_indices,]
# Remove directionality/sign from total expression pvalues
fully_observed_data$total_expression_outlier_pvalue <- abs(fully_observed_data$total_expression_outlier_pvalue)
# Compute mean outlier score (acrosss all 3 outlier classes)
outlier_medians <- rowMedians(as.matrix(-log10(1e-6 + fully_observed_data[,2:4])))
# Put each outlier class into it own neat data frame
splicing_df <- data.frame(river=fully_observed_data$splicing_river_posterior,watershed=fully_observed_data$splicing_watershed_posterior, coloring=outlier_medians)
te_df <- data.frame(river=fully_observed_data$total_expression_river_posterior,watershed=fully_observed_data$total_expression_watershed_posterior, coloring=outlier_medians)
ase_df <- data.frame(river=fully_observed_data$ase_river_posterior,watershed=fully_observed_data$ase_watershed_posterior, coloring=outlier_medians)
splicing_plot <- fully_observed_watershed_river_scatterplot_comparison_in_one_dimension(splicing_df, "splice", "median[-log10(pvalue)]")
te_plot <- fully_observed_watershed_river_scatterplot_comparison_in_one_dimension(te_df, "total expression", "median[-log10(pvalue)]")
ase_plot <- fully_observed_watershed_river_scatterplot_comparison_in_one_dimension(ase_df, "ase", "median[-log10(pvalue)]")
combined_plot <- plot_grid(ase_plot, splicing_plot, te_plot, ncol=1)
ggsave(combined_plot, file=output_file, width=19,height=20,units="cm")
}
fully_observed_watershed_river_scatterplot_comparison_colored_by_classes_outlier_score <- function(data, output_file) {
options(bitmapType = 'cairo', device = 'pdf')
# Remove instances that do not have all 3 expression signals
fully_observed_indices <- !is.nan(data$splicing_outlier_pvalue) & !is.nan(data$total_expression_outlier_pvalue) & !is.nan(data$ase_outlier_pvalue)
fully_observed_data <- data[fully_observed_indices,]
# Remove directionality/sign from total expression pvalues
fully_observed_data$total_expression_outlier_pvalue <- abs(fully_observed_data$total_expression_outlier_pvalue)
# Put each outlier class into it own neat data frame
splicing_df <- data.frame(river=fully_observed_data$splicing_river_posterior,watershed=fully_observed_data$splicing_watershed_posterior, coloring=-log10(1e-6 + fully_observed_data$splicing_outlier_pvalue))
te_df <- data.frame(river=fully_observed_data$total_expression_river_posterior,watershed=fully_observed_data$total_expression_watershed_posterior, coloring=-log10(1e-6 + fully_observed_data$total_expression_outlier_pvalue))
ase_df <- data.frame(river=fully_observed_data$ase_river_posterior,watershed=fully_observed_data$ase_watershed_posterior, coloring=-log10(1e-6 + fully_observed_data$ase_outlier_pvalue))
splicing_plot <- fully_observed_watershed_river_scatterplot_comparison_in_one_dimension(splicing_df, "splice", "-log10(pvalue)")
te_plot <- fully_observed_watershed_river_scatterplot_comparison_in_one_dimension(te_df, "total expression", "-log10(pvalue)")
ase_plot <- fully_observed_watershed_river_scatterplot_comparison_in_one_dimension(ase_df, "ase", "-log10(pvalue)")
combined_plot <- plot_grid(ase_plot, splicing_plot, te_plot, ncol=1)
ggsave(combined_plot, file=output_file, width=19,height=20,units="cm")
}
fully_observed_watershed_river_scatterplot_comparison_colored_by_classes_independent_gam_score <- function(data, output_file) {
options(bitmapType = 'cairo', device = 'pdf')
# Remove instances that do not have all 3 expression signals
fully_observed_indices <- !is.nan(data$splicing_outlier_pvalue) & !is.nan(data$total_expression_outlier_pvalue) & !is.nan(data$ase_outlier_pvalue)
fully_observed_data <- data[fully_observed_indices,]
# Remove directionality/sign from total expression pvalues
fully_observed_data$total_expression_outlier_pvalue <- abs(fully_observed_data$total_expression_outlier_pvalue)
# Put each outlier class into it own neat data frame
splicing_df <- data.frame(river=fully_observed_data$splicing_river_posterior,watershed=fully_observed_data$splicing_watershed_posterior, coloring=fully_observed_data$splicing_gam_posterior)
te_df <- data.frame(river=fully_observed_data$total_expression_river_posterior,watershed=fully_observed_data$total_expression_watershed_posterior, coloring=fully_observed_data$total_expression_gam_posterior)
ase_df <- data.frame(river=fully_observed_data$ase_river_posterior,watershed=fully_observed_data$ase_watershed_posterior, coloring=fully_observed_data$ase_gam_posterior)
splicing_plot <- fully_observed_watershed_river_scatterplot_comparison_in_one_dimension(splicing_df, "splice", "GAM posterior")
te_plot <- fully_observed_watershed_river_scatterplot_comparison_in_one_dimension(te_df, "total expression", "GAM posterior")
ase_plot <- fully_observed_watershed_river_scatterplot_comparison_in_one_dimension(ase_df, "ase", "GAM posterior")
combined_plot <- plot_grid(ase_plot, splicing_plot, te_plot, ncol=1)
ggsave(combined_plot, file=output_file, width=19,height=20,units="cm")
}
visualize_watershed_posterior_distributions <- function(data, output_file) {
options(bitmapType = 'cairo', device = 'pdf')
thresh=.5
#################################################
#### First part is to put data in correct format
#################################################
# Initialize arrays
posteriors <- c()
outlier_types <- c()
observed_types <- c()
# Fully observed case
indices <- !is.nan(data$splicing_outlier_pvalue) & !is.nan(data$total_expression_outlier_pvalue) & !is.nan(data$ase_outlier_pvalue)
posteriors <- c(posteriors, sum(data[indices,]$splicing_watershed_posterior >= thresh)/sum(indices) , sum(data[indices,]$total_expression_watershed_posterior >= thresh)/sum(indices), sum(data[indices,]$ase_watershed_posterior >= thresh)/sum(indices))
outlier_types <- c(outlier_types, "splice", "total expression", "ase")
observed_types <- c(observed_types, rep("ase,s,te",3))
# TE unobserved
indices <- !is.nan(data$splicing_outlier_pvalue) & is.nan(data$total_expression_outlier_pvalue) & !is.nan(data$ase_outlier_pvalue)
posteriors <- c(posteriors, sum(data[indices,]$splicing_watershed_posterior >= thresh)/sum(indices) , sum(data[indices,]$total_expression_watershed_posterior >= thresh)/sum(indices), sum(data[indices,]$ase_watershed_posterior >= thresh)/sum(indices))
outlier_types <- c(outlier_types, "splice", "total expression", "ase")
observed_types <- c(observed_types, rep("ase,s",3))
# ase unobserved
indices <- !is.nan(data$splicing_outlier_pvalue) & !is.nan(data$total_expression_outlier_pvalue) & is.nan(data$ase_outlier_pvalue)
posteriors <- c(posteriors, sum(data[indices,]$splicing_watershed_posterior >= thresh)/sum(indices) , sum(data[indices,]$total_expression_watershed_posterior >= thresh)/sum(indices), sum(data[indices,]$ase_watershed_posterior >= thresh)/sum(indices))
outlier_types <- c(outlier_types, "splice", "total expression", "ase")
observed_types <- c(observed_types, rep("s,te",3))
# splice unobserved
indices <- is.nan(data$splicing_outlier_pvalue) & !is.nan(data$total_expression_outlier_pvalue) & !is.nan(data$ase_outlier_pvalue)
posteriors <- c(posteriors, sum(data[indices,]$splicing_watershed_posterior >= thresh)/sum(indices) , sum(data[indices,]$total_expression_watershed_posterior >= thresh)/sum(indices), sum(data[indices,]$ase_watershed_posterior >= thresh)/sum(indices))
outlier_types <- c(outlier_types, "splice", "total expression", "ase")
observed_types <- c(observed_types, rep("ase,te",3))
# ase observed
indices <- is.nan(data$splicing_outlier_pvalue) & is.nan(data$total_expression_outlier_pvalue) & !is.nan(data$ase_outlier_pvalue)
posteriors <- c(posteriors, sum(data[indices,]$splicing_watershed_posterior >= thresh)/sum(indices) , sum(data[indices,]$total_expression_watershed_posterior >= thresh)/sum(indices), sum(data[indices,]$ase_watershed_posterior >= thresh)/sum(indices))
outlier_types <- c(outlier_types, "splice", "total expression", "ase")
observed_types <- c(observed_types, rep("ase",3))
# splicing observed
indices <- !is.nan(data$splicing_outlier_pvalue) & is.nan(data$total_expression_outlier_pvalue) & is.nan(data$ase_outlier_pvalue)
posteriors <- c(posteriors, sum(data[indices,]$splicing_watershed_posterior >= thresh)/sum(indices) , sum(data[indices,]$total_expression_watershed_posterior >= thresh)/sum(indices), sum(data[indices,]$ase_watershed_posterior >= thresh)/sum(indices))
outlier_types <- c(outlier_types, "splice", "total expression", "ase")
observed_types <- c(observed_types, rep("s",3))
# TE observed
indices <- is.nan(data$splicing_outlier_pvalue) & !is.nan(data$total_expression_outlier_pvalue) & is.nan(data$ase_outlier_pvalue)
posteriors <- c(posteriors, sum(data[indices,]$splicing_watershed_posterior >= thresh)/sum(indices) , sum(data[indices,]$total_expression_watershed_posterior >= thresh)/sum(indices), sum(data[indices,]$ase_watershed_posterior >= thresh)/sum(indices))
outlier_types <- c(outlier_types, "splice", "total expression", "ase")
observed_types <- c(observed_types, rep("te",3))
df <- data.frame(posterior=posteriors, outlier_type=factor(outlier_types,levels=c("ase","splice","total expression")), observed_type=factor(observed_types, levels=c("ase","s","te","ase,s","s,te","ase,te","ase,s,te")))
p_5 <- ggplot(data=df, aes(x=observed_type, y=posterior, fill=outlier_type)) +
geom_bar(stat="identity", color="black", position=position_dodge())+
theme(text = element_text(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black")) +
labs(x="Observed outlier type", y=paste0("% posterior >= ", thresh), fill="Outlier type")
thresh=.7
#################################################
#### First part is to put data in correct format
#################################################
# Initialize arrays
posteriors <- c()
outlier_types <- c()
observed_types <- c()
# Fully observed case
indices <- !is.nan(data$splicing_outlier_pvalue) & !is.nan(data$total_expression_outlier_pvalue) & !is.nan(data$ase_outlier_pvalue)
posteriors <- c(posteriors, sum(data[indices,]$splicing_watershed_posterior >= thresh)/sum(indices) , sum(data[indices,]$total_expression_watershed_posterior >= thresh)/sum(indices), sum(data[indices,]$ase_watershed_posterior >= thresh)/sum(indices))
outlier_types <- c(outlier_types, "splice", "total expression", "ase")
observed_types <- c(observed_types, rep("ase,s,te",3))
# TE unobserved
indices <- !is.nan(data$splicing_outlier_pvalue) & is.nan(data$total_expression_outlier_pvalue) & !is.nan(data$ase_outlier_pvalue)
posteriors <- c(posteriors, sum(data[indices,]$splicing_watershed_posterior >= thresh)/sum(indices) , sum(data[indices,]$total_expression_watershed_posterior >= thresh)/sum(indices), sum(data[indices,]$ase_watershed_posterior >= thresh)/sum(indices))
outlier_types <- c(outlier_types, "splice", "total expression", "ase")
observed_types <- c(observed_types, rep("ase,s",3))
# ase unobserved
indices <- !is.nan(data$splicing_outlier_pvalue) & !is.nan(data$total_expression_outlier_pvalue) & is.nan(data$ase_outlier_pvalue)
posteriors <- c(posteriors, sum(data[indices,]$splicing_watershed_posterior >= thresh)/sum(indices) , sum(data[indices,]$total_expression_watershed_posterior >= thresh)/sum(indices), sum(data[indices,]$ase_watershed_posterior >= thresh)/sum(indices))
outlier_types <- c(outlier_types, "splice", "total expression", "ase")
observed_types <- c(observed_types, rep("s,te",3))
# splice unobserved
indices <- is.nan(data$splicing_outlier_pvalue) & !is.nan(data$total_expression_outlier_pvalue) & !is.nan(data$ase_outlier_pvalue)
posteriors <- c(posteriors, sum(data[indices,]$splicing_watershed_posterior >= thresh)/sum(indices) , sum(data[indices,]$total_expression_watershed_posterior >= thresh)/sum(indices), sum(data[indices,]$ase_watershed_posterior >= thresh)/sum(indices))
outlier_types <- c(outlier_types, "splice", "total expression", "ase")
observed_types <- c(observed_types, rep("ase,te",3))
# ase observed
indices <- is.nan(data$splicing_outlier_pvalue) & is.nan(data$total_expression_outlier_pvalue) & !is.nan(data$ase_outlier_pvalue)
posteriors <- c(posteriors, sum(data[indices,]$splicing_watershed_posterior >= thresh)/sum(indices) , sum(data[indices,]$total_expression_watershed_posterior >= thresh)/sum(indices), sum(data[indices,]$ase_watershed_posterior >= thresh)/sum(indices))
outlier_types <- c(outlier_types, "splice", "total expression", "ase")
observed_types <- c(observed_types, rep("ase",3))
# splicing observed
indices <- !is.nan(data$splicing_outlier_pvalue) & is.nan(data$total_expression_outlier_pvalue) & is.nan(data$ase_outlier_pvalue)
posteriors <- c(posteriors, sum(data[indices,]$splicing_watershed_posterior >= thresh)/sum(indices) , sum(data[indices,]$total_expression_watershed_posterior >= thresh)/sum(indices), sum(data[indices,]$ase_watershed_posterior >= thresh)/sum(indices))
outlier_types <- c(outlier_types, "splice", "total expression", "ase")
observed_types <- c(observed_types, rep("s",3))
# TE observed
indices <- is.nan(data$splicing_outlier_pvalue) & !is.nan(data$total_expression_outlier_pvalue) & is.nan(data$ase_outlier_pvalue)
posteriors <- c(posteriors, sum(data[indices,]$splicing_watershed_posterior >= thresh)/sum(indices) , sum(data[indices,]$total_expression_watershed_posterior >= thresh)/sum(indices), sum(data[indices,]$ase_watershed_posterior >= thresh)/sum(indices))
outlier_types <- c(outlier_types, "splice", "total expression", "ase")
observed_types <- c(observed_types, rep("te",3))
df <- data.frame(posterior=posteriors, outlier_type=factor(outlier_types,levels=c("ase","splice","total expression")), observed_type=factor(observed_types, levels=c("ase","s","te","ase,s","s,te","ase,te","ase,s,te")))
p_7 <- ggplot(data=df, aes(x=observed_type, y=posterior, fill=outlier_type)) +
geom_bar(stat="identity", color="black", position=position_dodge())+
theme(text = element_text(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black")) +
labs(x="Observed outlier type", y=paste0("% posterior >= ", thresh), fill="Outlier type")
thresh=.9
#################################################
#### First part is to put data in correct format
#################################################
# Initialize arrays
posteriors <- c()
outlier_types <- c()
observed_types <- c()
# Fully observed case
indices <- !is.nan(data$splicing_outlier_pvalue) & !is.nan(data$total_expression_outlier_pvalue) & !is.nan(data$ase_outlier_pvalue)
posteriors <- c(posteriors, sum(data[indices,]$splicing_watershed_posterior >= thresh)/sum(indices) , sum(data[indices,]$total_expression_watershed_posterior >= thresh)/sum(indices), sum(data[indices,]$ase_watershed_posterior >= thresh)/sum(indices))
outlier_types <- c(outlier_types, "splice", "total expression", "ase")
observed_types <- c(observed_types, rep("ase,s,te", 3))
# TE unobserved
indices <- !is.nan(data$splicing_outlier_pvalue) & is.nan(data$total_expression_outlier_pvalue) & !is.nan(data$ase_outlier_pvalue)
posteriors <- c(posteriors, sum(data[indices,]$splicing_watershed_posterior >= thresh)/sum(indices) , sum(data[indices,]$total_expression_watershed_posterior >= thresh)/sum(indices), sum(data[indices,]$ase_watershed_posterior >= thresh)/sum(indices))
outlier_types <- c(outlier_types, "splice", "total expression", "ase")
observed_types <- c(observed_types, rep("ase,s",3))
# ase unobserved
indices <- !is.nan(data$splicing_outlier_pvalue) & !is.nan(data$total_expression_outlier_pvalue) & is.nan(data$ase_outlier_pvalue)
posteriors <- c(posteriors, sum(data[indices,]$splicing_watershed_posterior >= thresh)/sum(indices) , sum(data[indices,]$total_expression_watershed_posterior >= thresh)/sum(indices), sum(data[indices,]$ase_watershed_posterior >= thresh)/sum(indices))
outlier_types <- c(outlier_types, "splice", "total expression", "ase")
observed_types <- c(observed_types, rep("s,te",3))
# splice unobserved
indices <- is.nan(data$splicing_outlier_pvalue) & !is.nan(data$total_expression_outlier_pvalue) & !is.nan(data$ase_outlier_pvalue)
posteriors <- c(posteriors, sum(data[indices,]$splicing_watershed_posterior >= thresh)/sum(indices) , sum(data[indices,]$total_expression_watershed_posterior >= thresh)/sum(indices), sum(data[indices,]$ase_watershed_posterior >= thresh)/sum(indices))
outlier_types <- c(outlier_types, "splice", "total expression", "ase")
observed_types <- c(observed_types, rep("ase,te",3))
# ase observed
indices <- is.nan(data$splicing_outlier_pvalue) & is.nan(data$total_expression_outlier_pvalue) & !is.nan(data$ase_outlier_pvalue)
posteriors <- c(posteriors, sum(data[indices,]$splicing_watershed_posterior >= thresh)/sum(indices) , sum(data[indices,]$total_expression_watershed_posterior >= thresh)/sum(indices), sum(data[indices,]$ase_watershed_posterior >= thresh)/sum(indices))
outlier_types <- c(outlier_types, "splice", "total expression", "ase")
observed_types <- c(observed_types, rep("ase",3))
# splicing observed
indices <- !is.nan(data$splicing_outlier_pvalue) & is.nan(data$total_expression_outlier_pvalue) & is.nan(data$ase_outlier_pvalue)
posteriors <- c(posteriors, sum(data[indices,]$splicing_watershed_posterior >= thresh)/sum(indices) , sum(data[indices,]$total_expression_watershed_posterior >= thresh)/sum(indices), sum(data[indices,]$ase_watershed_posterior >= thresh)/sum(indices))
outlier_types <- c(outlier_types, "splice", "total expression", "ase")
observed_types <- c(observed_types, rep("s",3))
# TE observed
indices <- is.nan(data$splicing_outlier_pvalue) & !is.nan(data$total_expression_outlier_pvalue) & is.nan(data$ase_outlier_pvalue)
posteriors <- c(posteriors, sum(data[indices,]$splicing_watershed_posterior >= thresh)/sum(indices) , sum(data[indices,]$total_expression_watershed_posterior >= thresh)/sum(indices), sum(data[indices,]$ase_watershed_posterior >= thresh)/sum(indices))
outlier_types <- c(outlier_types, "splice", "total expression", "ase")
observed_types <- c(observed_types, rep("te",3))
df <- data.frame(posterior=posteriors, outlier_type=factor(outlier_types,levels=c("ase","splice","total expression")), observed_type=factor(observed_types, levels=c("ase","s","te","ase,s","s,te","ase,te","ase,s,te")))
p_9 <- ggplot(data=df, aes(x=observed_type, y=posterior, fill=outlier_type)) +
geom_bar(stat="identity", color="black", position=position_dodge())+
theme(text = element_text(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black")) +
labs(x="Observed outlier type", y=paste0("% posterior >= ", thresh), fill="Outlier type")
combined <- plot_grid(p_5,p_7,p_9,ncol=1)
ggsave(combined, file=output_file, width=19,height=20,units="cm")
}
missing_gam_outlier_scatterplot_in_one_dimension <- function(df, outlier_type) {
#PLOT!
scatter <- ggplot(df,aes(gam,outlier)) + geom_point(size=.1,aes(colour=watershed)) + theme(text = element_text(), panel.grid.major = element_blank(), panel.grid.minor = element_blank(),panel.background = element_blank(), axis.line = element_line(colour = "black"))
scatter <- scatter + scale_color_gradient(low="pink",high="blue")
scatter <- scatter + labs(x = "GAM posterior", y = "mean[-log10(outlier)]", colour="Watershed posterior", title=outlier_type)
scatter <- scatter + theme(legend.position="right")
return(scatter)
}
# Limit to missing cases for each outlier type
# Have one plot per outlier type showing:
### x-axis median outlier pvalue across observed tisssues
### Y-axis GAM Posterior
### Colored by watershed score
########################################
missing_gam_outlier_scatterplot_comparison_colored_by_watershed_score <- function(data, output_file) {
options(bitmapType = 'cairo', device = 'pdf')
data$total_expression_outlier_pvalue <- abs(data$total_expression_outlier_pvalue)
outlier_means <- rowMeans(-log10(1e-6 + data[,2:4]), na.rm=TRUE)
missing_splice_indices <- is.nan(data$splicing_outlier_pvalue)
missing_ase_indices <- is.nan(data$ase_outlier_pvalue)
missing_te_indices <- is.nan(data$total_expression_outlier_pvalue)
te_df <- data.frame(watershed=data[missing_te_indices,]$total_expression_watershed_posterior, gam=data[missing_te_indices,]$total_expression_gam_crf_posterior, outlier=outlier_means[missing_te_indices])
ase_df <- data.frame(watershed=data[missing_ase_indices,]$ase_watershed_posterior, gam=data[missing_ase_indices,]$ase_gam_crf_posterior, outlier=outlier_means[missing_ase_indices])
splice_df <- data.frame(watershed=data[missing_splice_indices,]$splicing_watershed_posterior, gam=data[missing_splice_indices,]$splicing_gam_crf_posterior, outlier=outlier_means[missing_splice_indices])
ase_plot <- missing_gam_outlier_scatterplot_in_one_dimension(ase_df, "ase")
te_plot <- missing_gam_outlier_scatterplot_in_one_dimension(te_df, "total expression")
splice_plot <- missing_gam_outlier_scatterplot_in_one_dimension(splice_df, "splice")
combined_plot <- plot_grid(ase_plot, splice_plot, te_plot, ncol=1)
ggsave(combined_plot, file=output_file, width=19,height=20,units="cm")
}
######################
# Command Line args
######################
pvalue_fraction <- as.numeric(args[1])
number_of_dimensions <- as.numeric(args[2])
inference_method <- args[3]
pseudocount <- as.numeric(args[4])
fully_observed_input_file <- args[5]
all_variants_input_file <- args[6]
output_stem <- args[7]
binary_pvalue_threshold <- as.numeric(args[8])
watershed_3_class_roc_run_dir <- args[9]
#######################################
## Load in data
#######################################
# Load in fully observed data. Note that pvalue fraction is just here as dummy variable (as we do not use binary outlier calls for this analysis)
# This is the data we are training on
training_data <- load_watershed_data(fully_observed_input_file, number_of_dimensions, pvalue_fraction, binary_pvalue_threshold)
mean_feat <- apply(training_data$feat, 2, mean)
sd_feat <- apply(training_data$feat, 2, sd)
training_feat <- scale(training_data$feat, center=mean_feat, scale=sd_feat)
training_discretized_outliers <- training_data$outliers_discrete
training_binary_outliers <- training_data$outliers_binary
# Load in data for all variants
# This is the data we are making predictions on
predictions_data <- load_watershed_data(all_variants_input_file, number_of_dimensions, pvalue_fraction, binary_pvalue_threshold)
predictions_feat <- scale(predictions_data$feat, center=mean_feat, scale=sd_feat)
predictions_discretized_outliers <- predictions_data$outliers_discrete
predictions_pvalues_outliers <- predictions_data$outlier_pvalues
predictions_binary_outliers <- predictions_data$outliers_binary
print(all_variants_input_file)
#######################################
## Load in previously trained watershed model (this was done exactly on the "fully observed data")
#######################################
watershed_data <- readRDS(paste0(watershed_3_class_roc_run_dir, "fully_observed_te_ase_splicing_outliers_gene_pvalue_0.01_n2_pair_outlier_fraction_.01_binary_pvalue_threshold_.01_pseudocount_30_inference_exact_independent_false_roc_object.rds"))
watershed_model <- watershed_data$model_params
river_data <- readRDS(paste0(watershed_3_class_roc_run_dir, "fully_observed_te_ase_splicing_outliers_gene_pvalue_0.01_n2_pair_outlier_fraction_.01_binary_pvalue_threshold_.01_pseudocount_30_inference_exact_independent_true_roc_object.rds"))
river_model <- river_data$model_params
gam_model <- river_data$gam_model_params
#######################################
## Compute posterior probabilities on all variants based on fitted models
#######################################
gam_posterior_obj <- update_independent_marginal_probabilities_exact_inference_cpp(predictions_feat, predictions_binary_outliers, gam_model$gam_parameters$theta_singleton, gam_model$gam_parameters$theta_pair, gam_model$gam_parameters$theta, matrix(0,2,2), matrix(0,2,2), number_of_dimensions, choose(number_of_dimensions, 2), FALSE)
gam_posteriors <- gam_posterior_obj$probability
watershed_info_test <- update_marginal_posterior_probabilities(predictions_feat, predictions_discretized_outliers, watershed_model)
watershed_posteriors <- watershed_info_test$probability # Marginal posteriors
river_info_test <- update_marginal_posterior_probabilities(predictions_feat, predictions_discretized_outliers, river_model)
river_posteriors <- river_info_test$probability # Marginal posteriors
print(summary(gam_posteriors))
#######################################
## Save predictions to output file
#######################################
posterior_mat <- cbind(rownames(predictions_feat), predictions_pvalues_outliers, gam_posteriors, river_posteriors, watershed_posteriors)
colnames(posterior_mat) = c("sample_names","splicing_outlier_pvalue", "total_expression_outlier_pvalue", "ase_outlier_pvalue", "splicing_gam_posterior", "total_expression_gam_posterior", "ase_gam_posterior", "splicing_river_posterior", "total_expression_river_posterior", "ase_river_posterior", "splicing_watershed_posterior", "total_expression_watershed_posterior", "ase_watershed_posterior")
write.table(posterior_mat,file=paste0(output_stem,"_posteriors.txt"), sep="\t", quote=FALSE, row.names=FALSE)
if (FALSE) {
# Watershed model
watershed_posterior_list <- update_marginal_probabilities_exact_inference_cpp(predictions_feat, predictions_discretized_outliers, watershed_model$theta_singleton, watershed_model$theta_pair, watershed_model$theta, watershed_model$phi$inlier_component, watershed_model$phi$outlier_component, watershed_model$number_of_dimensions, choose(watershed_model$number_of_dimensions, 2), TRUE)
watershed_posteriors <- watershed_posterior_list$probability
# Watershed-independent model
river_posterior_list <- update_marginal_probabilities_exact_inference_cpp(predictions_feat, predictions_discretized_outliers, river_model$theta_singleton, river_model$theta_pair, river_model$theta, river_model$phi$inlier_component, river_model$phi$outlier_component, river_model$number_of_dimensions, choose(river_model$number_of_dimensions, 2), TRUE)
river_posteriors <- river_posterior_list$probability
# Tied GAM model
tied_gam_posterior_test <- update_marginal_probabilities_exact_inference_cpp(predictions_feat, predictions_discretized_outliers, tied_gam_data$gam_parameters$theta_singleton, tied_gam_data$gam_parameters$theta_pair, tied_gam_data$gam_parameters$theta, river_model$phi$outlier_component, river_model$phi$outlier_component, number_of_dimensions, choose(number_of_dimensions, 2), FALSE)
tied_gam_posteriors <- tied_gam_posterior_test$probability
# Tied GAM model
gam_posterior_test <- update_marginal_probabilities_exact_inference_cpp(predictions_feat, predictions_discretized_outliers, gam_data$gam_parameters$theta_singleton, gam_data$gam_parameters$theta_pair, gam_data$gam_parameters$theta, river_model$phi$outlier_component, river_model$phi$outlier_component, number_of_dimensions, choose(number_of_dimensions, 2), FALSE)
gam_posteriors <- gam_posterior_test$probability
}
if (FALSE) {
phi_init <- initialize_phi(3, number_of_dimensions)
costs= c(.1, .01, 1e-3, 1e-4)
nfolds <- 4
lambda_singleton <- 0
lambda_pair <- 0
independent_variables="false"
tied_gam_data <- genomic_annotation_model_cv(training_feat, training_binary_outliers, nfolds, costs, independent_variables)
watershed_model <- integratedEM(training_feat, training_discretized_outliers, phi_init, tied_gam_data$gam_parameters$theta_pair, tied_gam_data$gam_parameters$theta_singleton, tied_gam_data$gam_parameters$theta, pseudocount, tied_gam_data$lambda, lambda_singleton, lambda_pair, number_of_dimensions, inference_method, independent_variables)
saveRDS(watershed_model, file=paste0(output_stem, "_watershed_model.rds"))
saveRDS(tied_gam_data, file=paste0(output_stem, "_tied_gam_model.rds"))
independent_variables="true"
gam_data <- genomic_annotation_model_cv(training_feat, training_binary_outliers, nfolds, costs, independent_variables)
river_model <- integratedEM(training_feat, training_discretized_outliers, phi_init, gam_data$gam_parameters$theta_pair, gam_data$gam_parameters$theta_singleton, gam_data$gam_parameters$theta, pseudocount, gam_data$lambda, lambda_singleton, lambda_pair, number_of_dimensions, inference_method, independent_variables)
saveRDS(river_model, file=paste0(output_stem, "_river_model.rds"))
saveRDS(gam_data, file=paste0(output_stem, "_independent_gam_model.rds"))
#######################################
## Compute posterior probabilities based on fitted models
#######################################
# Watershed model
watershed_posterior_list <- update_marginal_probabilities_exact_inference_cpp(predictions_feat, predictions_discretized_outliers, watershed_model$theta_singleton, watershed_model$theta_pair, watershed_model$theta, watershed_model$phi$inlier_component, watershed_model$phi$outlier_component, watershed_model$number_of_dimensions, choose(watershed_model$number_of_dimensions, 2), TRUE)
watershed_posteriors <- watershed_posterior_list$probability
# Watershed-independent model
river_posterior_list <- update_marginal_probabilities_exact_inference_cpp(predictions_feat, predictions_discretized_outliers, river_model$theta_singleton, river_model$theta_pair, river_model$theta, river_model$phi$inlier_component, river_model$phi$outlier_component, river_model$number_of_dimensions, choose(river_model$number_of_dimensions, 2), TRUE)
river_posteriors <- river_posterior_list$probability
# Tied GAM model
tied_gam_posterior_test <- update_marginal_probabilities_exact_inference_cpp(predictions_feat, predictions_discretized_outliers, tied_gam_data$gam_parameters$theta_singleton, tied_gam_data$gam_parameters$theta_pair, tied_gam_data$gam_parameters$theta, river_model$phi$outlier_component, river_model$phi$outlier_component, number_of_dimensions, choose(number_of_dimensions, 2), FALSE)
tied_gam_posteriors <- tied_gam_posterior_test$probability
# Tied GAM model
gam_posterior_test <- update_marginal_probabilities_exact_inference_cpp(predictions_feat, predictions_discretized_outliers, gam_data$gam_parameters$theta_singleton, gam_data$gam_parameters$theta_pair, gam_data$gam_parameters$theta, river_model$phi$outlier_component, river_model$phi$outlier_component, number_of_dimensions, choose(number_of_dimensions, 2), FALSE)
gam_posteriors <- gam_posterior_test$probability
#######################################
## Save predictions to output file
#######################################
posterior_mat <- cbind(rownames(predictions_feat), predictions_pvalues_outliers, gam_posteriors, tied_gam_posteriors, river_posteriors, watershed_posteriors)
colnames(posterior_mat) = c("sample_names","splicing_outlier_pvalue", "total_expression_outlier_pvalue", "ase_outlier_pvalue", "splicing_gam_posterior", "total_expression_gam_posterior", "ase_gam_posterior", "splicing_gam_crf_posterior", "total_expression_gam_crf_posterior", "ase_gam_crf_posterior", "splicing_river_posterior", "total_expression_river_posterior", "ase_river_posterior", "splicing_watershed_posterior", "total_expression_watershed_posterior", "ase_watershed_posterior")
write.table(posterior_mat,file=paste0(output_stem,"_posteriors.txt"), sep="\t", quote=FALSE, row.names=FALSE)
#######################################
## Compute posterior probabilities for variant level samples based on fitted models
#######################################
#
watershed_model <- readRDS(paste0(output_stem, "_watershed_model.rds"))
tied_gam_data <- readRDS(paste0(output_stem, "_tied_gam_model.rds"))
river_model <- readRDS(paste0(output_stem, "_river_model.rds"))
gam_data <- readRDS(paste0(output_stem, "_independent_gam_model.rds"))
#
# Watershed model
watershed_posterior_list <- update_marginal_probabilities_exact_inference_cpp(predictions_variant_level_feat, predictions_variant_level_discretized_outliers, watershed_model$theta_singleton, watershed_model$theta_pair, watershed_model$theta, watershed_model$phi$inlier_component, watershed_model$phi$outlier_component, watershed_model$number_of_dimensions, choose(watershed_model$number_of_dimensions, 2), TRUE)
watershed_posteriors <- watershed_posterior_list$probability
# Watershed-independent model
river_posterior_list <- update_marginal_probabilities_exact_inference_cpp(predictions_variant_level_feat, predictions_variant_level_discretized_outliers, river_model$theta_singleton, river_model$theta_pair, river_model$theta, river_model$phi$inlier_component, river_model$phi$outlier_component, river_model$number_of_dimensions, choose(river_model$number_of_dimensions, 2), TRUE)
river_posteriors <- river_posterior_list$probability
# Tied GAM model
tied_gam_posterior_test <- update_marginal_probabilities_exact_inference_cpp(predictions_variant_level_feat, predictions_variant_level_discretized_outliers, tied_gam_data$gam_parameters$theta_singleton, tied_gam_data$gam_parameters$theta_pair, tied_gam_data$gam_parameters$theta, river_model$phi$outlier_component, river_model$phi$outlier_component, number_of_dimensions, choose(number_of_dimensions, 2), FALSE)
tied_gam_posteriors <- tied_gam_posterior_test$probability
# Tied GAM model
gam_posterior_test <- update_marginal_probabilities_exact_inference_cpp(predictions_variant_level_feat, predictions_variant_level_discretized_outliers, gam_data$gam_parameters$theta_singleton, gam_data$gam_parameters$theta_pair, gam_data$gam_parameters$theta, river_model$phi$outlier_component, river_model$phi$outlier_component, number_of_dimensions, choose(number_of_dimensions, 2), FALSE)
gam_posteriors <- gam_posterior_test$probability
#######################################
## Save predictions to output file
#######################################
posterior_mat <- cbind(rownames(predictions_variant_level_feat), predictions_variant_level_pvalues_outliers, gam_posteriors, tied_gam_posteriors, river_posteriors, watershed_posteriors)
colnames(posterior_mat) = c("sample_names","splicing_outlier_pvalue", "total_expression_outlier_pvalue", "ase_outlier_pvalue", "splicing_gam_posterior", "total_expression_gam_posterior", "ase_gam_posterior", "splicing_gam_crf_posterior", "total_expression_gam_crf_posterior", "ase_gam_crf_posterior", "splicing_river_posterior", "total_expression_river_posterior", "ase_river_posterior", "splicing_watershed_posterior", "total_expression_watershed_posterior", "ase_watershed_posterior")
write.table(posterior_mat,file=paste0(output_stem,"_variant_level_posteriors.txt"), sep="\t", quote=FALSE, row.names=FALSE)
}
if (FALSE) {
#######################################
## Visualize Predictions
#######################################
data <- read.table(paste0(output_stem,"_posteriors.txt"), header=TRUE)
# Look at distribution of watershed posteriors depending on:
#### Which outlier signals are observed (x-axis)
#### which outlier posterior we are looking at (y-axis)
########################################
output_file <- paste0(output_stem, "_watershed_posterior_distributions.pdf")
#visualize_watershed_posterior_distributions(data, output_file)
# Limit to fully observed cases
# Have one plot per outlier type showing:
### x-axis River score
### Y-axis watershed score
### Colored by classes independent-GAM score
########################################
output_file <- paste0(output_stem, "_fully_observed_watershed_river_scatterplot_comparison_colored_by_classes_independent_gam_score.pdf")
#fully_observed_watershed_river_scatterplot_comparison_colored_by_classes_independent_gam_score(data, output_file)
# Limit to fully observed cases
# Have one plot per outlier type showing:
### x-axis River score
### Y-axis watershed score
### Colored by average outlier score
########################################
output_file <- paste0(output_stem, "_fully_observed_watershed_river_scatterplot_comparison_colored_by_classes_outlier_score.pdf")
#fully_observed_watershed_river_scatterplot_comparison_colored_by_classes_outlier_score(data, output_file)
# Limit to fully observed cases
# Have one plot per outlier type showing:
### x-axis River score
### Y-axis watershed score
### Colored by median outlier score
########################################
output_file <- paste0(output_stem, "_fully_observed_watershed_river_scatterplot_comparison_colored_by_median_outlier_score.pdf")
#fully_observed_watershed_river_scatterplot_comparison_colored_by_median_outlier_score(data, output_file)
# Limit to fully observed cases
# Have one plot per outlier type showing:
### x-axis River score
### Y-axis watershed score
### Colored by classes outlier score
########################################
output_file <- paste0(output_stem, "_fully_observed_watershed_river_scatterplot_comparison_colored_by_average_outlier_score.pdf")
#fully_observed_watershed_river_scatterplot_comparison_colored_by_average_outlier_score(data, output_file)
# Limit to fully observed cases
# Have one plot per outlier type showing:
### x-axis median outlier pvalue across observed tissues
### Y-axis GAM posterior
### Colored by watershed posterior
########################################
output_file <- paste0(output_stem, "_fully_observed_gam_outlier_scatterplot_comparison_colored_by_watershed_score.pdf")
fully_observed_gam_outlier_scatterplot_comparison_colored_by_watershed(data, output_file)
# Limit to missing cases for each outlier type
# Have one plot per outlier type showing:
### x-axis median outlier pvalue across observed tisssues
### Y-axis GAM Posterior
### Colored by watershed score
########################################
output_file <- paste0(output_stem, "_missing_gam_outlier_scatterplot_comparison_colored_by_watershed score.pdf")
missing_gam_outlier_scatterplot_comparison_colored_by_watershed_score(data, output_file)
}
|
1fdffd7bb45b293506be1c2a360dfe2ff42127c2
|
69a0b504229a3ad9002e72612f4528fcc0baef0b
|
/R/index.R
|
bed433715a511040c56da49545c276a2544ca59a
|
[] |
no_license
|
seaman248/An.-atroparvus-genome-comparison
|
91b37184f22848cbc55a351e2fb178efd283093a
|
d04b6672b335003b1a4aed21b0098e9feb688cf7
|
refs/heads/master
| 2021-07-03T18:57:47.445050
| 2017-03-07T06:33:01
| 2017-03-07T06:33:01
| 80,265,913
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 777
|
r
|
index.R
|
### Query data
## Get table of orthologs sp1_gene_id / sp2_gene_id ... / ... spX_gene_id
source('./R/Query/getOrthologs.R')
## Get coorinates for every gene of every species sp1: gene_id / chr / start / stop / strand
source("./R/Query/getCoords.R")
## All data saved in ./R/Query/output_data
### Clean data
## Separate file per species
lapply(c('albimanus', 'atroparvus', 'gambiae'), function(sp){
files <- list.files('./R/Clean')
file <- paste0('./R/Clean/' ,files[grep(sp, files)])
source(file, echo=F)
return(NULL)
})
## Conmbine all sps in one table for GRIMM_synteny
source('./R/Clean/GRIMM_format.R')
### Make GRIMM
source('./R/GRIMM/make_blocks.R')
### Visualize Results
source('./R/Visualize/visualize_blocks.R')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.