blob_id
stringlengths 40
40
| directory_id
stringlengths 40
40
| path
stringlengths 2
327
| content_id
stringlengths 40
40
| detected_licenses
listlengths 0
91
| license_type
stringclasses 2
values | repo_name
stringlengths 5
134
| snapshot_id
stringlengths 40
40
| revision_id
stringlengths 40
40
| branch_name
stringclasses 46
values | visit_date
timestamp[us]date 2016-08-02 22:44:29
2023-09-06 08:39:28
| revision_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| committer_date
timestamp[us]date 1977-08-08 00:00:00
2023-09-05 12:13:49
| github_id
int64 19.4k
671M
⌀ | star_events_count
int64 0
40k
| fork_events_count
int64 0
32.4k
| gha_license_id
stringclasses 14
values | gha_event_created_at
timestamp[us]date 2012-06-21 16:39:19
2023-09-14 21:52:42
⌀ | gha_created_at
timestamp[us]date 2008-05-25 01:21:32
2023-06-28 13:19:12
⌀ | gha_language
stringclasses 60
values | src_encoding
stringclasses 24
values | language
stringclasses 1
value | is_vendor
bool 2
classes | is_generated
bool 2
classes | length_bytes
int64 7
9.18M
| extension
stringclasses 20
values | filename
stringlengths 1
141
| content
stringlengths 7
9.18M
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a7825abaacdbdf8ec8a19b6cdd2eac7ec0bcd8fb
|
49f984561d088814fbe3a8ac9543dd9deda61759
|
/R/plot_path_network2.R
|
0723d82b017b41d69c35b9efe12831aba5bc74c6
|
[
"MIT"
] |
permissive
|
KWB-R/fakin.path.app
|
a396a1e142e0c3c3bdac86d13f5337dfb474719b
|
3882d91321a5b40a2bda685c1e40f6acb2f503a1
|
refs/heads/master
| 2022-09-25T04:24:01.362223
| 2020-10-01T14:45:20
| 2020-10-01T14:45:20
| 194,630,996
| 0
| 0
|
MIT
| 2022-08-19T15:35:58
| 2019-07-01T08:22:38
|
R
|
UTF-8
|
R
| false
| false
| 3,472
|
r
|
plot_path_network2.R
|
# prepare_paths_for_network2 ---------------------------------------------------
prepare_paths_for_network2 <- function(paths)
{
if (inherits(paths, "pathlist")) {
return(paths)
}
# If a path tree is given, flatten the tree into a vector of character
pathlist::pathlist(
paths = if (inherits(paths, "path_tree")) {
stop_(
"Object of class 'path_tree' not expected in ",
"prepare_paths_for_network2()!"
)
#flatten_tree(paths)
} else {
paths
}
)
}
# get_path_network2 ------------------------------------------------------------
get_path_network2 <- function(
paths, max_depth = 3, reverse = FALSE,
weight_by = c("n_files", "size", "none")[1], sizes = NULL
)
{
is_pathlist <- inherits(paths, "pathlist")
# Create data frame with each column representing a folder depth level
folder_data <- kwb.utils::asNoFactorDataFrame(
if (is_pathlist) {
paths@folders
} else {
kwb.file::to_subdir_matrix(paths, dbg = FALSE)
}
)
# Reduce max_depth to the number of available columns
max_depth <- min(
max_depth,
if (is_pathlist) max(pathlist::depth(paths)) else ncol(folder_data)
)
# We need at least a depth of two
stopifnot(max_depth >= 2)
links <- do.call(rbind, lapply(
2:max_depth, get_links_at_depth2, folder_data, weight_by = weight_by,
sizes = sizes
))
node_names <- unique(unlist(links[, -3]))
get_matching_index <- function(x) match(x, node_names) - 1
links$source <- get_matching_index(links$source)
links$target <- get_matching_index(links$target)
# Swap the names of columns "source" and "target" for reverse = TRUE
if (isTRUE(reverse)) {
elements <- c("source", "target")
indices <- match(elements, names(links))
names(links)[indices] <- rev(elements)
}
nodes <- kwb.utils::noFactorDataFrame(
path = node_names, name = basename(node_names)
)
list(links = links, nodes = nodes)
}
# get_links_at_depth2 ----------------------------------------------------------
get_links_at_depth2 <- function(
i, folder_data, weight_by = c("n_files", "size", "none")[1], sizes = NULL
)
{
stopifnot(weight_by %in% c("n_files", "size", "none"))
is_pathlist <- inherits(folder_data, "pathlist")
if (is_pathlist) {
is_deeper <- pathlist::depth(folder_data) >= i
folder_data <- folder_data[is_deeper]
source_data <- folder_data@folders[, seq_len(i)]
source_data <- kwb.utils::asNoFactorDataFrame(source_data)
} else {
# Select the first i columns
source_data <- folder_data[, seq_len(i)]
# Exclude rows being empty in the i-th column
is_deeper <- source_data[, i] != ""
source_data <- source_data[is_deeper, ]
}
sizes <- sizes[is_deeper]
source_data$size <- kwb.utils::defaultIfNULL(sizes, 1)
n_levels <- ncol(source_data) - 1
# Count the number of files per path
FUN <- list(n_files = length, size = sum, none = function(x) 1)[[weight_by]]
stats <- stats::aggregate(
x = source_data$size,
by = kwb.utils::removeColumns(source_data, "size"),
FUN = FUN
)
# Define helper function
n_columns_to_path <- function(data, n) {
kwb.utils::pasteColumns(data[, seq_len(n), drop = FALSE], sep = "/")
}
# Create the data frame linking source to target nodes with value as weight
kwb.utils::noFactorDataFrame(
source = n_columns_to_path(stats, i - 1),
target = n_columns_to_path(stats, i),
value = stats$x
)
}
|
f3911fd790f88d9516476ed9ad4d0e0bbf040fa1
|
d4917fb5c96856ac3f5e64bda6bf0ad2c56a72eb
|
/OCA/baseline.R
|
fe7fae00a4b75d5724d83dfba63393cb0cc705d0
|
[] |
no_license
|
mariosegal/Consulting
|
a45a3668f92d9a4a6e4bb04e02e38f1704b4d85f
|
3bb424679bae299421b29159b0b7df32e8c9e536
|
refs/heads/master
| 2016-09-10T20:01:01.789353
| 2015-06-12T18:12:57
| 2015-06-12T18:12:57
| 37,323,705
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 263
|
r
|
baseline.R
|
head(complaints)
table(complaints$level_1,useNA='ifany')
table(complaints$level_2,complaints$level_1,useNA='ifany')
table(complaints$level_3[complaints$level_1=='Bank Account or Service' & complaints$level_2=='Checking account'])
dim(complaints)
|
4559cf8a4751422d4912cc519591b51170e443a5
|
a6214d7ecd758270d27592c6affe5df3bfd316a2
|
/ledgerplots/man/generate.price.table.Rd
|
c40dab83722e6a0894934cbc9834a26d8b7b8737
|
[
"MIT"
] |
permissive
|
RastiGiG/ledger-plots
|
3c56fa0a98f0f347ad4a2045742f3d70f76913ff
|
b8ddb3bf32d51f9ad01ec60cb259fe9815495d38
|
refs/heads/master
| 2023-03-18T00:09:24.522680
| 2018-11-26T20:38:41
| 2018-11-26T20:38:41
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 669
|
rd
|
generate.price.table.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/ledger-functions.R
\name{generate.price.table}
\alias{generate.price.table}
\title{Generate a latex table with food prices}
\usage{
generate.price.table(FUN, query, ofile = "food-prices.tex",
ledger.options, ledger.path = NULL, conversion = c("1kg = 1000g"))
}
\arguments{
\item{FUN}{list of functions to call on prices}
\item{query}{ledger query}
\item{ofile}{output filename}
\item{ledger.options}{extra options passed to ledger}
\item{ledger.path}{path to the ledger binary}
\item{conversion}{volume unit conversion rules}
}
\description{
Generate a latex table with food prices
}
|
df5ac5b2cd1893cfebff4a447267464e458c8475
|
6a28ba69be875841ddc9e71ca6af5956110efcb2
|
/Miller_And_Freund_S_Probability_And_Statistics_For_Engineers_by_Richard_A._Johnson/CH7/EX7.3/EX7_3.R
|
e5886d20f614a37af65cfc26a3f98234e7c2efcc
|
[] |
permissive
|
FOSSEE/R_TBC_Uploads
|
1ea929010b46babb1842b3efe0ed34be0deea3c0
|
8ab94daf80307aee399c246682cb79ccf6e9c282
|
refs/heads/master
| 2023-04-15T04:36:13.331525
| 2023-03-15T18:39:42
| 2023-03-15T18:39:42
| 212,745,783
| 0
| 3
|
MIT
| 2019-10-04T06:57:33
| 2019-10-04T05:57:19
| null |
UTF-8
|
R
| false
| false
| 158
|
r
|
EX7_3.R
|
n=150
sigma=6.2
Z0.05=2.575
E=sigma*Z0.05/sqrt(n)
E
message("Thus, the engineer can assert with probability 0.99 that his error will be at
most 1.30.")
|
5c0c26ad15f344e8d38b66ef318a953fa635a3f2
|
f9f2dbb4dafe94e4fa9fa9cad75944188c023632
|
/data/eda/plot_pr.R
|
a99aff7f5d51d56a2edd6428c029f7e0beb835ac
|
[] |
no_license
|
ABlack-git/yolo_object_detection
|
2c3c869a3867bb79274bd9ea236ae3f7e955bbab
|
069b86c2cc5702e095d59a5e9c99549be8393675
|
refs/heads/master
| 2021-04-09T14:14:49.303720
| 2019-01-30T12:17:15
| 2019-01-30T12:17:15
| 125,732,815
| 0
| 0
| null | 2018-12-23T17:35:40
| 2018-03-18T14:23:45
|
Python
|
UTF-8
|
R
| false
| false
| 4,792
|
r
|
plot_pr.R
|
library(ggplot2)
library(dplyr)
library(RColorBrewer)
library(scales)
###MODEL 6L#####
#precision
tr_prec_1 <- read.csv('/Users/mac/Desktop/model+6l_valid/train/prec/run_10_11_2018__18-12-tag-train_avg_prec.csv')
tr_prec_2 <- read.csv('/Users/mac/Desktop/model+6l_valid/train/prec/run_10_11_2018__21-10-tag-train_avg_prec.csv')
tr_prec_3 <- read.csv('/Users/mac/Desktop/model+6l_valid/train/prec/run_11_11_2018__0-20-tag-train_avg_prec.csv')
tr_prec_4 <- read.csv('/Users/mac/Desktop/model+6l_valid/train/prec/run_11_11_2018__3-22-tag-train_avg_prec.csv')
tr_prec <- rbind(tr_prec_1, tr_prec_2, tr_prec_3, tr_prec_4)
val_prec_1 <- read.csv('/Users/mac/Desktop/model+6l_valid/valid/prec/run_10_11_2018__18-12-tag-validation_avg_prec.csv')
val_prec_2 <- read.csv('/Users/mac/Desktop/model+6l_valid/valid/prec/run_10_11_2018__21-10-tag-validation_avg_prec.csv')
val_prec_3 <- read.csv('/Users/mac/Desktop/model+6l_valid/valid/prec/run_11_11_2018__0-20-tag-validation_avg_prec.csv')
val_prec_4 <- read.csv('/Users/mac/Desktop/model+6l_valid/valid/prec/run_11_11_2018__3-22-tag-validation_avg_prec.csv')
val_prec <- rbind(val_prec_1, val_prec_2, val_prec_3, val_prec_4)
ggplot(tr_prec, aes(x=Step, y=Value))+
geom_point(aes(color='Training precision'), shape=20)+
geom_line(aes(color='Training precision'))+
geom_point(data=val_prec, color='red', shape=20)+
geom_line(data=val_prec, aes(color='Validation precision'))+
scale_color_manual("", breaks=c('Validation precision', 'Training precision'), values=c('blue', 'red'))+
labs(y='Precision')+
theme(legend.position = 'top', legend.text = element_text(size=12))
#recall
tr_rec_1 <- read.csv('/Users/mac/Desktop/model+6l_valid/train/recall/1.csv')
tr_rec_2 <- read.csv('/Users/mac/Desktop/model+6l_valid/train/recall/2.csv')
tr_rec_3 <- read.csv('/Users/mac/Desktop/model+6l_valid/train/recall/3.csv')
tr_rec_4 <- read.csv('/Users/mac/Desktop/model+6l_valid/train/recall/4.csv')
tr_rec <- rbind(tr_rec_1, tr_rec_2, tr_rec_3, tr_rec_4)
val_rec_1 <- read.csv('/Users/mac/Desktop/model+6l_valid/valid/recall/1.csv')
val_rec_2 <- read.csv('/Users/mac/Desktop/model+6l_valid/valid/recall/2.csv')
val_rec_3 <- read.csv('/Users/mac/Desktop/model+6l_valid/valid/recall/3.csv')
val_rec_4 <- read.csv('/Users/mac/Desktop/model+6l_valid/valid/recall/4.csv')
val_rec <- rbind(val_rec_1, val_rec_2, val_rec_3, val_rec_4)
ggplot(tr_rec, aes(x=Step, y=Value))+
geom_point(aes(color='Training recall'), shape=20)+
geom_line(aes(color='Training recall'))+
geom_point(data=val_rec, color='red', shape=20)+
geom_line(data=val_rec, aes(color='Validation recall'))+
scale_color_manual("", breaks=c('Validation recall', 'Training recall'), values=c('blue', 'red'))+
labs(y='Recall')+
theme(legend.position = 'top', legend.text = element_text(size=12))
####MODEL 8L####
#precision
tr_prec_1 <- read.csv('/Users/mac/Desktop/model_8l/train/prec/1.csv')
tr_prec_2 <- read.csv('/Users/mac/Desktop/model_8l/train/prec/2.csv')
tr_prec_3 <- read.csv('/Users/mac/Desktop/model_8l/train/prec/3.csv')
tr_prec <- rbind(tr_prec_1, tr_prec_2, tr_prec_3)
val_prec_1 <- read.csv('/Users/mac/Desktop/model_8l/val/prec/1.csv')
val_prec_2 <- read.csv('/Users/mac/Desktop/model_8l/val/prec/2.csv')
val_prec_3 <- read.csv('/Users/mac/Desktop/model_8l/val/prec/3.csv')
val_prec <- rbind(val_prec_1, val_prec_2, val_prec_3)
ggplot(tr_prec, aes(x=Step, y=Value))+
geom_point(aes(color='Training precision'), shape=20)+
geom_line(aes(color='Training precision'))+
geom_point(data=val_prec, color='red', shape=20)+
geom_line(data=val_prec, aes(color='Validation precision'))+
scale_color_manual("", breaks=c('Validation precision', 'Training precision'), values=c('blue', 'red'))+
labs(y='Precision')+
theme(legend.position = 'top', legend.text = element_text(size=12))
#recall
tr_rec_1 <- read.csv('/Users/mac/Desktop/model_8l/train/recall/1.csv')
tr_rec_2 <- read.csv('/Users/mac/Desktop/model_8l/train/recall/2.csv')
tr_rec_3 <- read.csv('/Users/mac/Desktop/model_8l/train/recall/3.csv')
tr_rec <<- rbind(tr_rec_1, tr_rec_2, tr_rec_3)
val_rec_1 <- read.csv('/Users/mac/Desktop/model_8l/val/recall/1.csv')
val_rec_2 <- read.csv('/Users/mac/Desktop/model_8l/val/recall/2.csv')
val_rec_3 <- read.csv('/Users/mac/Desktop/model_8l/val/recall/3.csv')
val_rec <- rbind(val_rec_1, val_rec_2, val_rec_3)
ggplot(tr_rec, aes(x=Step, y=Value))+
geom_point(aes(color='Training recall'), shape=20)+
geom_line(aes(color='Training recall'))+
geom_point(data=val_rec, color='red', shape=20)+
geom_line(data=val_rec, aes(color='Validation recall'))+
scale_color_manual("", breaks=c('Validation recall', 'Training recall'), values=c('blue', 'red'))+
labs(y='Recall')+
theme(legend.position = 'top', legend.text = element_text(size=12))
|
72b09b31b38951d423100b92670a0887dd8adc35
|
1a5f9e53317490c0b14a0e81bf5f1e38193c1e5e
|
/man/english-package.Rd
|
57ceac8296a31e5465085ae9183ccfb570976ed8
|
[] |
no_license
|
spinkney/english
|
e3311613090ac4e145166c08db19884745b025a3
|
df4447982ba87e29cd4cfb62318e317d1d0390f6
|
refs/heads/master
| 2021-06-14T15:16:54.107056
| 2017-03-16T07:44:04
| 2017-03-16T07:44:04
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,598
|
rd
|
english-package.Rd
|
\name{english-package}
\alias{english-package}
\docType{package}
\title{
English
}
\description{
A simple facility to provide an english language representation of
integer vectors.
}
\details{
\tabular{ll}{
Package: \tab english\cr
Type: \tab Package\cr
Version: \tab 1.1-2\cr
Date: \tab 2017-03-16\cr
License: \tab GPL-2\cr
LazyLoad: \tab yes\cr
}
In answer to a question on R-help John Fox provided an elegant R
function to translate integers into English numbers. The present
package extends this code to an S3 class, with constructor functions and
methods to make this original idea more conveniently available.
The function \code{as.english} is intended to provide a parallel
facility to the function \code{as.roman} in the \code{utils} package.
The main purpose of the package is to present an interesting programming
example rather than to solve a likely real problem, though there could
well be some applications in unusual contexts.
}
\author{
John Fox and Bill Venables with additional ideas and code from Anthony Damico.
Maintainer: Bill Venables, <Bill.Venables@gmail.com>
}
\references{
See original note by John Fox in the Programmers Niche section of
\url{https://cran.r-project.org/doc/Rnews/Rnews_2005-1.pdf}.
}
\keyword{ package }
\seealso{
\code{\link[utils]{as.roman}}.
}
\examples{
english(1:10)^2 + 1:10
(x <- english(sample(1:100, 10)))
sort(x)
toupper(english(1:10))
## For mothers of small children:
cat(paste("This is the", ordinal(1:5), "time I've told you!"), sep = "\n")
}
|
7a3a8a737fbf7494b12240c727dd34e24b4d975d
|
a4595b26f0abe6c53242e2e0072f6b8750900f03
|
/src/gmy.R
|
7c418d67fa76988a2356039100bae0321685b7c2
|
[] |
no_license
|
xiaoran831213/DeepG
|
eedcd159594bcd2248b2e9a746e271b8e25ae666
|
de3b2ae9bdfca9cd3feb16f1ca876d0852d0cbc7
|
refs/heads/master
| 2020-05-22T06:54:28.898259
| 2020-05-15T15:39:47
| 2020-05-15T15:39:47
| 62,501,040
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,021
|
r
|
gmy.R
|
source('src/dsg.R')
source('src/utl.R')
source('src/hlp.R')
source('src/hwu.R')
source('src/gsm.R')
source('src/tgz.R')
## sim('sim/GXP/01/000011873')
sim <- function(.fn, ssz=NULL, mdl=~g, fam=NULL, r2=.05, pop=NULL, ...)
{
## fetch necessary data & infomation
cat(.fn, ': ', sep='')
dat <- readRDS(.fn)
gmx <- dat$gmx
enc <- dat$enc
seq <- dat$seq
## genome data matrics
dsg <- gmx[, , 1] + gmx[, , 2]
dim(dsg) <- dim(gmx)[1:2]
dsg <- impute(dsg) # imputation
dsg <- rmDgr(dsg) # cleanup
if(length(dsg) == 0)
{
cat('degenerated\n')
return(NULL)
}
xmx <- rbind((dsg > 0) * 1, (dsg > 1) * 1)
## * ---------- [ simulation ] ---------- *
if(is.null(ssz))
ssz <- ncol(dsg)
if(ssz < nrow(dsg))
{
idx <- sample.int(ncol(dsg), ssz)
dsg <- dsg[, idx, drop=F]
hof <- hof[, idx, drop=F]
rsd <- rsd[, idx, drop=F]
xmx <- xmx[, idx, drop=F]
}
## link function, distribution family
fam <- substitute(fam)
fam <- if(is.null(fam)) gau else fam
fam <- eval(fam, .families)
## multiple populations
.pp <- if(is.null(pop)) 1 else pop
if(length(.pp) == 1)
.pp <- rep(1, .pp)
.i <- cbind(1:ssz, sample.int(length(.pp), ssz, T, .pp))
xb <- sapply(1:length(.pp), function(.)
{
x <- gem(mdl, dsg, ...) # terms
b <- rnorm(ncol(x)) # coefs
xb <- x %*% b # effects
drop(xb)
})[.i]
y <- fam$ign(xb, r2=r2)
## * -------- U sta and P val -------- *
wgt.dsg <- .wct(.hwu.GUS(t(dsg)))
wgt.xmx <- .wct(.hwu.GUS(t(xmx)))
wgt.ibs <- .wct(.hwu.IBS(t(dsg)))
pvl.dsg <- hwu.dg2(y, wgt.dsg)
pvl.xmx <- hwu.dg2(y, wgt.xmx)
pvl.ibs <- hwu.dg2(y, wgt.ibs)
ret <- sapply(enc, function(e)
{
wgt.hof <- .wct(.hwu.GUS(t(e$hof)))
wgt.rsd <- .wct(.hwu.GUS(t(e$rsd)))
c(nhf=e$nhf,
pvl.hof=hwu.dg2(y, wgt.hof),
pvl.rsd=hwu.dg2(y, wgt.rsd))
}, USE.NAMES=F)
ret <- t(ret)
fam <- fam$tag
if(is.null(pop))
rm(pop)
else
pop <- paste(pop, collapse=',')
.m <- sprintf('%s %3.1e %3.1e', fam, pvl.dsg, pvl.xmx)
cat(.m, '\n')
mdl <- mdl.str(mdl)
ngv <- nrow(gmx)
ret <- cbind(do.call(data.frame, .record(...)), ret)
ret
}
main <- function(fr='sim/GMX/TE2', out=NULL, r2=.05, mdl=~g, rep=10, ...)
{
if(file.exists(fr) && file.info(fr)$isdir)
fns <- dir(fr, '*.tgz', full.names=T)
else
fns <- fr
## correct file suffix
fns[file.exists(paste0(fns, '.tgz'))] <- paste0(fns, '.tgz')
fns[file.exists(paste0(fns, '.tar.gz'))] <- paste0(fns, '.tar.gz')
fns <- sample(fns, rep, T)
ret <- lapply(fns, sim, r2=r2, mdl=mdl, ...)
ret <- lol2tab(ret)
if(!is.null(out))
{
out <- sub('[.].*$', '.rds', out)
saveRDS(ret, out)
}
invisible(ret)
}
|
6e91cdc116a6a9c76fb4abf629a2cc4f37e56e68
|
d07115488889d09ff1f5c61b5b469c78fb7473a6
|
/man-roxygen/all.R
|
ba1db6a107de54b335f8a9e7f00c59bed6bce266
|
[
"MIT"
] |
permissive
|
arturochian/gistr
|
68bd45c9937130d2b17d19ade5f5594942307db0
|
a033c72d36ed3c5042e2e4f627730652af1292ea
|
refs/heads/master
| 2021-01-21T18:17:58.513212
| 2014-10-14T15:34:18
| 2014-10-14T15:34:18
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 114
|
r
|
all.R
|
#' @param verbose Print informative messages (default TRUE)
#' @param callopts Curl options passed on to httr::GET
|
9c601e4a104fd5b4a3f48fdb586acd75439e1223
|
e89eb909b5c8920bdb9cb99041632e5d10807f6c
|
/man/impute.zscore.Rd
|
125a12a51d12106665cbeb809d777d5a44443193
|
[] |
no_license
|
drveera/metaxcanr
|
6ac732b3096f433ecdc52662d5d64aacc1f3f5b6
|
3c62801378f95f834c604b021abe79771798b3da
|
refs/heads/master
| 2021-01-25T12:13:33.402032
| 2019-04-05T09:16:21
| 2019-04-05T09:16:21
| 123,457,984
| 3
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 456
|
rd
|
impute.zscore.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/impute.zscore.R
\name{impute.zscore}
\alias{impute.zscore}
\title{impute the z score}
\usage{
impute.zscore(gene.name, gwas, db, snpcov, snpinfo)
}
\arguments{
\item{gene.name}{gene name}
\item{gwas}{gwas data frame}
\item{db}{data frame containing the prediction models}
\item{snpcov}{data frame containing the pairwise snp covariances}
}
\description{
impute the z score
}
|
620bdeacb4b2fee7330f2e099eb04bd0fd3522d7
|
48ff203020c29a310e0ff766497aa00abca12c52
|
/public/archivo/assignments/data/analisis-casencovid.R
|
b33a9307d4459738c1768c4f4a4b4c9ebd4466d4
|
[] |
no_license
|
juancarloscastillo/multivariada
|
027686435be0dc780a76c4adf6cfc64534499ce1
|
c30ee79f80c5d49487f0d428964c21983edba3b7
|
refs/heads/master
| 2022-08-02T04:55:54.364039
| 2022-07-20T08:52:49
| 2022-07-20T08:52:49
| 249,482,382
| 4
| 3
| null | 2022-06-29T13:11:35
| 2020-03-23T16:20:23
|
HTML
|
UTF-8
|
R
| false
| false
| 2,653
|
r
|
analisis-casencovid.R
|
###Codigo Analisis ------
## Practico 5- Regresion simple ----
# Trabajo 1 -----
# 1. Cargar librerías -----
pacman::p_load(dplyr, #Manipulacion de datos
sjmisc, # Tablas
summarytools, #Tablas
sjPlot, # Correlaciones
ggplot2, # Graficos
webshot) # Para guardar tablas
# 2. Fijar carpeta de trabajo
setwd("C:/Users/Valentina Andrade/Dropbox/3. Docencia/Estadistica Multivariada/05-code")
# 3. Cargar base de datos
load(file = "CASEN-COVID19.RData")
casen_covid19 <- dat01;remove(dat01)
# 4. Descriptivos de variables
# 4.1 Tabla de variables
view(dfSummary(casen_covid19, headings=FALSE), file = "tabla1.html")
webshot::install_phantomjs( force = T)
webshot("tabla1.html","tabla1.png")
#Grafico
# 1. Se crea un gráfico scatterplot "g" con la librería ggplot
# Y es nuestra variable dependiente
# X = es nuestra variable independiente
g=ggplot(casen_covid19, aes(x=hacinamiento, y=t_contagio)) +
geom_point()
#2. Guardar
#Deben indicar el nombre primero, y luego como lo van a guardar
ggsave("grafico1.png",
g)
# 5. Correlacion
# Calcular matriz de correlaciones
casen_covid19 <-
casen_covid19 %>% select(-nombre_comuna, -nombre_region) #Excluimos comuna pues las correlaciones son entre variables numéricas y comuna es un carácter
sjPlot::sjt.corr(casen_covid19,
triangle = "lower", file = "tabla2.html")
webshot("tabla2.html","tabla2.png")
#6. Modelo de regresion
reg1 <-lm(t_contagio ~ hacinamiento, data = casen_covid19)
sjPlot::tab_model(reg1, show.ci=FALSE, file = "reg1_tab.html")
webshot("reg1_tab.html","reg1_tab.png")
# 7. Residuos
#Variable de valores predichos
casen_covid19$estimado<- (6.441 + casen_covid19$hacinamiento*2158.467)
#Estimamos el residuo
casen_covid19$residuo <- casen_covid19$t_contagio - casen_covid19$estimado
g2 <- ggplot(casen_covid19, aes(x=hacinamiento, y=t_contagio))+
geom_smooth(method="lm", se=FALSE, color="lightgrey") +#Pendiente de regresion
geom_segment(aes(xend=hacinamiento, yend=estimado), alpha = .2) + #Distancia entre estimados y datos en lineas
geom_point(aes(color = abs(residuo), size = abs(residuo))) +
scale_color_continuous(low = "black", high = "red") +
guides(color = FALSE, size = FALSE) +
geom_point(aes(y=estimado), shape =1) + labs(title = "Gráfico 2. Residuos modelo de regresión", caption = "Fuente: Elaboración propia en base a CASEN-COVID19 (2020)", y = "tasa de contagio", x = "hacinamiento del hogar (media)")
theme_bw()
g2
#2. Guardar
#Deben indicar el nombre primero, y luego como lo van a guardar
ggsave("grafico2.png",
g2)
|
cc2d3dd0ba6dc18fa960254c8f583f136b7f6676
|
977d534f6842f5483c25a3b2a8760ac61e746dd2
|
/edgelist.for.supplement.R
|
f084a65fd13b0671d398e8e4a00aa12ccb439ce0
|
[] |
no_license
|
ErinGorsich/Swine-networks
|
27efbf4c19142cc50142e110bba0442ab80ab25d
|
9b78ec6f55ea323db9cd28a7384112755473dd69
|
refs/heads/master
| 2021-01-10T17:19:53.801571
| 2020-08-03T11:15:38
| 2020-08-03T11:15:38
| 47,015,959
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,767
|
r
|
edgelist.for.supplement.R
|
setwd("/Users/u1774615/Dropbox/Swine")
# Read in data
data.cvi<-read.csv("Swine_cvi_final.csv")
data.cvi <- data.cvi[!is.na(data.cvi$NUM_SWINE),]
data.cvi <- data.cvi[data.cvi$NUM_SWINE>0,]
data.cvi <- data.cvi[!is.na(data.cvi$SAMPLE_YEAR2),]
data.cvi <- data.cvi[data.cvi$NUM_SWINE>0,]
data.cvi <-data.cvi[!is.na(data.cvi$O_FIPS),]
data.cvi <-data.cvi[!is.na(data.cvi$D_FIPS),]
data.cvi$MOVE <- 1
colnames(data.cvi)
# Subset cvi data by year.
data10 <- data.cvi[data.cvi$SAMPLE_YEAR2=="2010",]
data11 <- data.cvi[data.cvi$SAMPLE_YEAR2=="2011",]
# remove 31, 40
data10 <- data10[data10$O_ST_FIPS != 31, ]
data10 <- data10[data10$O_ST_FIPS != 40, ]
cols <- c('O_ST_FIPS', "D_ST_FIPS", 'O_STATE', 'D_STATE')
data10[cols] <- sapply(data10[cols], as.character)
data11[cols] <- sapply(data11[cols], as.character)
el <- unique(cbind(data10[,"O_ST_FIPS"], data10[,"D_ST_FIPS"]))
edgelist.st.2010 <- data.frame(
origin.state = rep("", length(el[,1])),
destination.state = rep("", length(el[,1])),
origin.fips = el[, 1],
destination.fips = el[ , 2],
number.shipments = rep(0, length(el[, 1])),
number.swine = rep(0, length(el[, 1])))
edgelist.st.2010$origin.state <- data10$O_STATE[match(edgelist.st.2010$origin.fips, data10$O_ST_FIPS)]
edgelist.st.2010$destination.state <- data10$D_STATE[match(edgelist.st.2010$destination.fips, data10$D_ST_FIPS)]
table(edgelist.st.2010$origin.fips); table(edgelist.st.2010$origin.state)
el <- unique(cbind(data11[,"O_ST_FIPS"], data11[,"D_ST_FIPS"]))
edgelist.st.2011 <- data.frame(
origin.state = rep("", length(el[ ,1])),
destination.state = rep("", length(el[ ,1])),
origin.fips = el[, 1],
destination.fips = el[ , 2],
number.shipments = rep(0, length(el[, 1])),
number.swine = rep(0, length(el[, 1])))
edgelist.st.2011$origin.state <- data11$O_STATE[match(edgelist.st.2011$origin.fips, data11$O_ST_FIPS)]
edgelist.st.2011$destination.state <- data11$D_STATE[match(edgelist.st.2011$destination.fips, data11$D_ST_FIPS)]
for (i in 1:length(edgelist.st.2010[ ,1])) {
temp <- data10[c(data10$O_ST_FIPS == edgelist.st.2010$origin.fips[i] & data10$D_ST_FIPS == edgelist.st.2010$destination.fips[i]), ]
edgelist.st.2010$number.shipments[i] <- sum(temp$MOVE)
edgelist.st.2010$number.swine[i] <- sum(temp$NUM_SWINE)
rm (temp)
}
for (i in 1:length(edgelist.st.2011[ ,1])) {
temp <- data11[c(data11$O_ST_FIPS == edgelist.st.2011$origin.fips[i] & data11$D_ST_FIPS == edgelist.st.2011$destination.fips[i]), ]
edgelist.st.2011$number.shipments[i] <- sum(temp$MOVE)
edgelist.st.2011$number.swine[i] <- sum(temp$NUM_SWINE)
rm (temp)
}
write.csv(edgelist.st.2010, "Gorsich_Miller_et_al_state_edgelist_2010.csv")
write.csv(edgelist.st.2011, "Gorsich_Miller_et_al_state_edgelist_2011.csv")
|
7f3f62563115535273074fc91b0058c2d1aa17e9
|
0ca78ef5a8670fbdab55409eecda579cec2baf68
|
/DM/bias_computer.R
|
6308347cec8e39f1010a8a0829b03b0e9e67e378
|
[] |
no_license
|
zhurui1351/RSTOCK_TRAIL
|
ab83fdef790778a1e792d08a876522ef13a872e6
|
2396c512c8df81a931ea3ca0c925c151363a2652
|
refs/heads/master
| 2021-01-23T09:01:36.814253
| 2019-05-17T15:26:19
| 2019-05-17T15:26:19
| 23,482,375
| 9
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 987
|
r
|
bias_computer.R
|
getfee = function(contract_usage = 3000,actual_usage = 3500)
{
bias = actual_usage - contract_usage
bias_ratio = (actual_usage - contract_usage) / contract_usage
uplimit = 0.01
up_adjust_serive_fee = 0.75
up_b = 0.1
neglimit = -0.02
neg_adjust_serive_fee = 0.75
neg_b = 0.1
weighted_average = 0.75
if(bias_ratio > uplimit)
{
y = weighted_average * actual_usage + (actual_usage - contract_usage*(1+uplimit)) * up_adjust_serive_fee * up_b
}
if(bias_ratio > neglimit && bias_ratio<uplimit)
{
y = weighted_average * actual_usage
}
if(bias_ratio < neglimit)
{
y = weighted_average * actual_usage + (contract_usage * (1+neglimit) - actual_usage) * neg_adjust_serive_fee * neg_b
}
return(y)
}
actual_usage = 0 : 6000
actual_fee = c()
contract_usage = 3000
for(a in actual_usage)
{
fee = getfee(contract_usage = contract_usage,actual_usage = a)
actual_fee = c(actual_fee,fee)
}
plot(actual_usage,actual_fee)
|
24fbcd7f10b3a30f75a4c2c6cbaa26d96b767643
|
5d6156ff9113920d9cb1163d540c15548b31f0fd
|
/Module8/GDP_Year/GDPVisualization.R
|
e3394a2ccff240efac82c5dcaccc76ef6fde2db1
|
[] |
no_license
|
Karagul/PowerBI_With_R
|
a2b98632bcf4b1db0d72b6891f0cc8e461094887
|
0bf7f30cd5f84261b0b3184c588fb1b91d661a13
|
refs/heads/master
| 2020-07-23T18:29:18.752803
| 2018-06-17T20:59:46
| 2018-06-17T20:59:46
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,026
|
r
|
GDPVisualization.R
|
# Change the theme and center the chart title
library(ggplot2)
library(RColorBrewer)
library(ggthemes)
library(ggrepel)
library(dplyr)
dataset <-
GDPData %>%
mutate(Year = as.numeric(Year)) %>%
filter(Year == 2017 & Stat == "GDP")
if (length(unique(dataset$Year)) == 1 & length(unique(dataset$Stat)) == 1) {
ec.colors <- c(
"Emerging/Developing" = "#FFD700", # Hex value for gold
"Advanced" = "#FF0000" # Hex value for red
)
chart.title = switch(
as.character(dataset$Stat[1])
, "GDP" = "Gross Domestic Product"
, "GDPPPP" = "Gross Domestic Product (Purchase Power Parity)"
, "GDPPC" = "Gross Domestic Product Per Capita"
, "GDPPPPPC" = "Gross Domestic Product Per Capita (Purchase Power Parity)"
)
gdp.legend.title = switch(
as.character(dataset$Stat[1])
, "GDP" = "GDP ($Billions)"
, "GDPPPP" = "GDP (PPP) ($Billions)"
, "GDPPC" = "GDP Per Capita ($)"
, "GDPPPPPC" = "GDP Per Capita (PPP) ($)"
)
p <- ggplot(dataset, aes(Scaled.Population, Scaled.Area, size = `Value`)) +
geom_point(
shape = 21
, stroke = 1
, aes(fill = Scaled.HDI, color = `Economy Classification`)
) +
geom_label_repel(
aes(label = Country)
, show.legend = FALSE
, size = 4
) +
scale_fill_continuous(
guide =
guide_colorbar(
title = "2015 Human Development Index"
, size = 18
)
) +
scale_color_manual(
values = ec.colors
, guide = guide_legend(title = "2017 Economy Classification")
) +
scale_size_continuous(
guide = guide_legend(title = gdp.legend.title)
) +
labs(x = "Population", y = "Land Area") +
ggtitle(chart.title) +
theme_classic() +
theme(plot.title = element_text(hjust = 0.5, size = 20))
p
} else {
p <- ggplot(dataset, aes(x = Scaled.Population, y = Scaled.Area)) +
geom_blank() +
scale_x_continuous(
labels = NULL
, breaks = NULL
) +
scale_y_continuous(
labels = NULL
, breaks = NULL
) +
labs(x = NULL, y = NULL) +
theme_classic()
p
}
|
b27ce9c91b55d1e45fd43b75f0c6ef0996efc0d7
|
d33f3de15441549a8dbf1ef9ec3beccd02ec173a
|
/plot1.R
|
833055921589fdbada2182cecfc6a308335de55a
|
[] |
no_license
|
jvidyad/ExData_Plotting1
|
85addbc82d8d1c79441c2111cdcbd9b36f9b97b5
|
8130d4845787bb9640db3fa4218e4bf570409a32
|
refs/heads/master
| 2021-01-11T15:20:54.555394
| 2017-01-29T12:26:39
| 2017-01-29T12:26:39
| 80,336,272
| 0
| 0
| null | 2017-01-29T08:34:16
| 2017-01-29T08:34:15
| null |
UTF-8
|
R
| false
| false
| 552
|
r
|
plot1.R
|
data_file = "data/household_power_consumption.zip"
target_file = "household_power_consumption.txt"
data <- read.table(unz(data_file, target_file), header=TRUE, sep=";",
na.strings="?",
stringsAsFactors=FALSE)
data <- subset(data, Date=="1/2/2007"|Date=="2/2/2007")
temp <- paste(data$Date, data$Time)
data$Date <- as.Date(data$Date, format="%d/%m/%Y")
data$Time <- strptime(temp, "%d/%m/%Y %H:%M:%S")
png("plot1.png")
hist(data$Global_active_power, xlab="Global Active Power (kilowatts)",
col="red", main="Global Active Power")
dev.off()
|
804fa0d7cb487f99fada0c8c80f888d61159afd9
|
9faa3cfb92ff2cb58db8739fefffe4fd248bcf48
|
/lib/R/sma.R
|
b8647992b56b3dc9ab9f5a49b5a1396a0e3328ae
|
[
"MIT"
] |
permissive
|
joshterrell805-historic/StockPrediction
|
fd135e9b0d6f8033207511c2c5b6b2ba24cf150b
|
15795889d21421b2ab59c3a4669c36e0956a888d
|
refs/heads/master
| 2021-01-10T09:20:24.380517
| 2017-07-04T19:00:20
| 2017-07-04T19:00:20
| 49,034,347
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 344
|
r
|
sma.R
|
# calculate simple moving average of data
# assumes data is at least 'count' long
sma = function(data, column, count) {
return(sapply(1:nrow(data), function(i) {
if (i < count) {
return(NA);
} else {
# left sided
start = i - count + 1;
return(sum(data[start:(start+count-1), column] / count));
}
}));
};
|
a3184cdea893110fd065f69e63910262ccaa0411
|
a38d76df0dd29c5f8494a7750f62acac76062389
|
/tests/testthat.R
|
e9f43fcfaab9f7d89ae7b9b84e2b5d87d1661fb9
|
[
"MIT"
] |
permissive
|
explore-n-learn/rtrek
|
d98a68bb160caff445eaa7af66bede6554060a2a
|
8199dfeb26602f9d6910f83621bf00b6a6812717
|
refs/heads/master
| 2023-05-17T03:15:43.273023
| 2021-05-29T18:55:52
| 2021-05-29T18:55:52
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 54
|
r
|
testthat.R
|
library(testthat)
library(rtrek)
test_check("rtrek")
|
69864c139d4c9dcd6bd7fd71323c4067733060ba
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/medicalrisk/examples/charlson_weights_orig.Rd.R
|
0c72b9ab346ccc582ef495b88c4a1ccf293bb16e
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 225
|
r
|
charlson_weights_orig.Rd.R
|
library(medicalrisk)
### Name: charlson_weights_orig
### Title: Map of Charlson comorbidity categories to weights
### Aliases: charlson_weights_orig
### Keywords: datasets
### ** Examples
charlson_weights_orig["aids"]
|
218b3734c95d9851162214f53cdc128c7f5d6564
|
fc400478eb86e39e640edf56a50d42e06651a03b
|
/Droping duplicates exploration.R
|
9f59f80f0bc64185c1cdbb155e1cf907f7a065e5
|
[] |
no_license
|
d2squared/NCAA-March-Madness-Scrape
|
7a606983529b8e1dff27f4c6acaef904fc7dbe39
|
11e51a03dd4508404a121fec558c899201a92fe9
|
refs/heads/master
| 2022-04-19T06:35:15.404753
| 2020-04-01T03:29:27
| 2020-04-01T03:29:27
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 569
|
r
|
Droping duplicates exploration.R
|
villanova1 <- all_info_2002 %>%
filter(Team_1 == "Villanova")
villanova2 <- all_info_2002 %>%
filter(Team_2 == "Villanova")
villanova_all <- rbind(villanova1, villanova2)
test_1 <- villanova_all %>%
filter(Team_1 == "Bucknell")
test_2 <- villanova_all %>%
filter(Team_2 == "Bucknell")
test3 <- rbind(test_1, test_2)
test4 <- test3[,1:20]
test4$Drop_1 <- paste(test4$Date_Number, test4$Team_1, test4$Team_2, sep="_")
test4$Drop_2 <- paste(test4$Date_Number, test4$Team_2, test4$Team_1, sep="_")
test4$Drop_final <- paste(test4$Drop_1, test4$Drop_2, sep="_")
|
1bcc1d29ef8dbf00d659adbc9659ee5b86dd21c7
|
259b21039f27e16d00161a233808283c3a0b99cd
|
/Neural Network (Concrete data).R
|
56d29e8e63bcb990c8669e67fb1e9b068e8c6d18
|
[] |
no_license
|
vikasbevoor/Neural-Network-with-R
|
96c48b6c00ce980504d0899cab9fe05e0c4fab20
|
9e351a6e67e2b4009d65d8f2199054d6fb85dd04
|
refs/heads/main
| 2022-12-20T05:11:31.234209
| 2020-10-13T07:49:28
| 2020-10-13T07:49:28
| 303,627,711
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,816
|
r
|
Neural Network (Concrete data).R
|
concrete <- read.csv("D:/Data science videos/R Codes/Assignments docs/Neural Networks/concrete (4).csv")
View(concrete)
attach(concrete)
install.packages("moments")
library(moments)
library(caTools)
library(caret)
# Data exploration
summary(concrete)
str(concrete)
# Graphical exploration
hist(strength)
summary(strength)
skewness(strength)
kurtosis(strength)
boxplot(strength)
hist(cement)
summary(cement)
skewness(cement)
kurtosis(cement)
boxplot(cement)
hist(water)
summary(water)
skewness(water)
kurtosis(water)
boxplot(water)
# custom normalization function
normalize <- function(x) {
return((x - min(x)) / (max(x) - min(x)))
}
# apply normalization to entire data frame
conc_norm <- as.data.frame(lapply(concrete[-9], normalize))
View(conc_norm)
conc_norm <- cbind.data.frame(conc_norm, strength)
conc_norm <- conc_norm[-10]
# create training and test data
conc_train <- conc_norm[1:750, ]
conc_test <- conc_norm[751:1030, ]
## Training a model on the data
install.packages("neuralnet")
library(neuralnet)
colnames(conc_norm)
# simple ANN with only a single hidden neuron
conc_model <- neuralnet(formula = strength ~ ., data = conc_train, hidden = 1)
# visualize the network topology
plot(conc_model)
## Evaluating model performance
results_model <- compute(conc_model, conc_test[1:8])
str(results_model)
predicted_profit <- results_model$net.result
head(predicted_profit)
# examine the correlation between predicted and actual values
cor(predicted_profit, conc_test$Profit)
# Improving model performance ----
conc_model2 <- neuralnet(formula = strength ~ ., data = conc_train, hidden = 10, stepmax = 1e+06, threshold = 0.1 )
# plot the network
plot(conc_model2)
# evaluate the results as weconc did before
model_results2 <- compute(conc_model2, conc_test[1:8])
predicted_strength2 <- model_results2$net.result
cor(predicted_strength2, conc_test$Profit)
# More complex neural network topology with 10 hidden neurons
start_model3 <- neuralnet(formula = Profit ~ R.D.Spend+Administration+Marketing.Spend, data = start_train, hidden = 10)
# plot the network
plot(start_model3)
# evaluate the results as we did before
model_results3 <- compute(start_model3, start_test[1:3])
predicted_strength3 <- model_results3$net.result
cor(predicted_strength3, start_test$Profit)
# More complex neural network topology with 5 hidden neurons
start_model4 <- neuralnet(formula = strength ~ ., data = conc_train, hidden = 5, stepmax = 1e+06, threshold = 0.2)
# plot the network
plot(start_model4)
# evaluate the results as we did before
model_results4 <- compute(start_model4, conc_test[1:8])
predicted_strength4 <- model_results4$net.result
cor(predicted_strength4, conc_test$strength)
|
7296b1d15f5533c95fb9b508b224543651ba0b75
|
931eced08131bf4d96c0722b6d8f90e1f7f38c95
|
/Portfolio_Optimization.R
|
2d0c5eea91f0ef766c3f283a50364f24b5a9cf53
|
[] |
no_license
|
pekova13/SPL_MeanVar_ThreeFund
|
e82939812ad0597fd3107445719d666c5dbedd6b
|
1cae4a940c8a66551fd771cb78e07897b68fccac
|
refs/heads/master
| 2021-06-25T21:07:43.843234
| 2020-12-04T15:56:39
| 2020-12-04T15:56:39
| 176,032,450
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 25,570
|
r
|
Portfolio_Optimization.R
|
# set the working directory
setwd("C:\\Users/TO/Desktop")
# load the dataset
data=read.csv("ten-roberto-wessels.csv",sep=";",header=TRUE)
# Install all necessary packages
install.packages("ggplot2")
install.packages("dygraphs")
install.packages("tidyverse")
install.packages("lubridate")
install.packages("zoo")
install.packages("xts")
install.packages("reshape2")
install.packages("plotly")
install.packages("PerformanceAnalytics")
install.packages("corrplot")
set.seed(123)
# Load the packages
library(ggplot2)
library(dygraphs)
library(tidyverse)
library(lubridate)
library(zoo)
library(xts)
library(reshape2)
library(plotly)
library(PerformanceAnalytics)
library(corrplot)
###################################################################################################################
# STRATEGY-INDEPENDENT FUNCTIONS
# Function for calculating the means of every column/asset in the dataset:
get_means_vector=function(data){
means_vector=apply(data,2,mean)
return(means_vector)
}
# Function for calculating the SD of each column/asset in the dataset:
get_sd_vector=function(data){
sd_vector=apply(data,2,sd)
return(sd_vector)
}
# Function for calculating the relative weights of the assets in the dataset:
get_rel_weights_vector=function(weights_vector){
# Calculate the absolute sum of the weights vector:
abs_sum=abs(sum(weights_vector))
# Divide each of the values in the absolute weights vector by the absolute sum:
rel_weights=weights_vector/abs_sum
#return the vector with the relative asset weights
return (rel_weights)
}
# Function for getting the in-sample sharpe ratio OR the in-sample portfolio returns
get_insample_sharperatio_returns = function (data, absolute_weights_vector, sharperatio){
# Create a new vector to be filled with the portfolio returns in sample in a loop rowise:
pf_rtr_in_sample=c(length=length(data[,1]))
for(i in 1:(length(data[,1]))){
pf_rtr_in_sample[i]=unlist(data[i,])%*%(get_rel_weights_vector(absolute_weights_vector))
}
# an if-else construct to determine whether the in-sample sharpe ratio or the in-sample portfolio returns must be returned
if (sharperatio == TRUE) { # compute the sharpe ratio in-sample applying the formula from DeMiguel page 1928
return (mean(pf_rtr_in_sample)/sd(pf_rtr_in_sample))
} else { # return the portfolio returns in-sample
return (pf_rtr_in_sample)
}
}
# Function for getting the out-of-sample sharpe ratio OR the out-of-sample portfolio returns
get_outofsample_sharperatio_returns = function (M, data, sharperatio, strategy) {
# set the rolling window:
rolling_window=M
# Calculate length of the new vector with the portfolio returns:
len_portfolio_returns=length(data[,1])-rolling_window
# Create the vector with the respective length:
portfolio_returns_outofsample=c(length=len_portfolio_returns)
# Calculate the (in this case 264 - 120) excess returns and add each value in the portfolio_returns vector:
for(i in 1:len_portfolio_returns){
# Set the start index for each iteration:
start_window=i
# Set the start index for each iteration:
end_window=rolling_window+i-1
# Create a new "time"-matrix, which contains the data for a certain 120-days period
# with the start index and the end index (rowise):
time_matrix=data[start_window:end_window,]
# Create the covariance matrix of the "time"-matrix:
cov_time_matrix=cov(time_matrix)
# Calculate the absolute weights of the assets in row end_window + 1 based on the last 120 rows:
# an if-else construct to differentiate between both strategies, since these require different functions
if (strategy == "mv") { # mean-variance strategy
weights_vct=get_weights_vector(get_means_vector(time_matrix),cov_time_matrix)
} else { # Kan and Zhou three-fund portfolio strategy
weights_vct=get_weights(time_matrix, length(time_matrix[,1]), length(time_matrix[1,]))
}
# Calculate the portfolio return using the excess returns in row
# end_window + 1 of the initial data and the computed relative weights:
single_pf_return=unlist(data[end_window+1,])%*%get_rel_weights_vector(weights_vct)
# Add each value in the vector portfolio returns:
portfolio_returns_outofsample[start_window]=single_pf_return
}
portfolio_returns_outofsample = c(t(portfolio_returns_outofsample))
# an if-else construct to determine whether the out-of-sample sharpe ratio or the out-of-sample portfolio returns must be returned
if (sharperatio == TRUE) { # compute the out-of-sample sharperatio
sharpe_ratio_out_of_sample=mean(portfolio_returns_outofsample)/sd(portfolio_returns_outofsample)
return (sharpe_ratio_out_of_sample)
} else { # return the out-of-sample portfolio returns
return (portfolio_returns_outofsample)
}
}
# Function for calculating the dynamics of weights all of the T-M periods (T = total observations, M = rolling window)
# description of the function's parameters
# 1. M: a numeric variable ->length of the rolling window
# 2. data: a data frame -> the relevant data
# 3. assets: a numeric variable -> amount of assets considered
# 4. cov_matrix: a boolean variable -> the cov_matrix needed to compute the absolute weights vector for the mean-variance strategy;
# the Khan and Zou strategy doesn't require a cov-matrix for the computation of the absolute weights vector
# => if assigned TRUE, the function computes the absolute weights vector according to the mean-variance strategy
# if assigned FALSE, the function computes the absolute weights vector according to the Kan and Zhou strategy
get_weights_dynamics = function(M, data, assets, cov_matrix) {
# create an empty matrix which should collect the relative weights vector for each period considered
collector = matrix(, nrow = assets, ncol = (length(data[,1])-M))
colnames(collector) = c(1:(length(data[,1])-M))
rownames(collector) = c(colnames(data))
for(i in 1:(length(data[,1])-M)) {
# Set the start index for each iteration:
start_window=i
# Set the start index for each iteration:
end_window=M+i-1
# Create a new "time"-matrix with the start index and the end index (rowise) to collect the absolute r.w. difference in t:
time_matrix=data[start_window:end_window,]
# an if-else construct to distinguish between the mean-variance and the Khan and Zou strategy, since the computation of
# the abssolute weights vectors are different for both strategies
if (cov_matrix == TRUE) { # mean-variance strategy
# Create the covariance matrix of the time matrix
cov_time_matrix=cov(time_matrix)
# Calculate the absolute weights of the assets over time (always based on the previous 120 rows)
weights_vct=get_weights_vector(get_means_vector(time_matrix), cov_time_matrix)
} else { # Khan and Zou three-fund portfolio strategy
weights_vct=get_weights(time_matrix, M, assets)
}
# Calculate the relative weights (the function for the computation of the relative weights is not strategy-dependent)
rel_weights_vct=get_rel_weights_vector(weights_vct)
# Collect the relative weights vectors for each period in the collector-matrix
collector[,i] = rel_weights_vct
}
# transpose the collector matrix
weight_matrix = t(collector)
# plot a basic ggplot
p1= ggplot(melt(weight_matrix), aes(x=Var1, y=value, col=Var2))+geom_point()+ggtitle("Dynamics of Weights")+ylab("Relative weights")+xlab("Periods")
# make the basic ggplot interactive
dynamics_return = ggplotly(p1)
return(dynamics_return)
}
# Function for plotting the SD dynamics OR the development of a portfolio's returns over time
# description of the function's parameters
# 1. portfolio_returns: a data frame or a matrix, containing the in-sample or out of sample portfolio returns over time
# 2. width: the interval (2 months, 6 months, 1 year etc.), over which the function should be applied -> in months!
# 3. SD: a boolean variable
# => if assigned TRUE, the function plots the SD dynamics
# => if assigned FALSE, the function plots the portfolio's returns dynamics
get_sd_dynamics_or_devreturn = function(portfolio_returns, width, sd, dates_seq) {
pf_returns = portfolio_returns
pf_returns = data.frame(pf_returns)
# assign the sequence the rows of the return's data frame
rownames(pf_returns)=dates_seq
# format the data frame as time series
pf_returns=as.xts(pf_returns,dateFormat="Date")
#an if-else construct to differentiate which function should be applied
if (sd == TRUE) { #plot the SD dynamics
b = chart.RollingPerformance(R = pf_returns, width = width, FUN = "StdDev.annualized")
} else { # plot the portfolio returns' dynamics
b = chart.RollingPerformance(R = pf_returns, width = width, FUN = "Return.annualized")
}
return(b)
}
# Function for making a benchmark comparison: plot the development of the weighted portfolio's returns versus the returns
# of the benchmark portfolio
# hand over a benchmark matrix or data frame, which contains the weighted portfolio's returns and the returnss of the
# benchmark portfolio over time
bm_comparison = function (benchmark) {
bm_df = data.frame(benchmark)
#turn the benchmark object into a matrix with appropriate length, width and names
bm_mat = matrix(benchmark, nrow = (length(bm_df[,1])), ncol = (length(bm_df[1,])))
rownames(bm_mat) = c(1:(length(bm_df[,1])))
colnames(bm_mat) = c(colnames(benchmark))
#plot a basic ggplot
bm= ggplot(melt(bm_mat), aes(x=Var1, y=value, col=Var2))+geom_line(alpha = 0.7)+ggtitle("Benchmark comparison")+ylab("Returns")+xlab("Periods")
#make the basic ggplot interactive
plot = ggplotly(bm)
return(plot)
}
# Function for getting the means and sd vector of the data frame from the function above
# hand over the same parameter as in the function above
get_means_and_sd_vector = function(benchmark_c) {
df = data.frame(benchmark_c)
# compute the means vector of the weighted portfolio's returns and the benchmark returns
bm_means = get_means_vector(df)
# compute the SD vector of the weighted portfolio's returns and the benchmark returns
bm_sd = get_sd_vector(df)
# create an empty matrix with an appropriate length, width and names
sum_matrix = matrix(, nrow = length(bm_means), ncol = 2)
rownames(sum_matrix) = c(colnames(df))
colnames(sum_matrix) = c("means_vector", "sd_vector")
# fill the matrix with the computed vectors
sum_matrix[,1] = bm_means
sum_matrix[,2] = bm_sd
# return the transposed matrix
return(t(sum_matrix))
}
#####################################################################################################################
# MEAN VARIANCE STRATEGY SPECIFIC FUNCTIONS
# Function for calculating the absolute weights of the assets in the dataset:
get_weights_vector= function(means_vector,cov_matrix){
# Create the inverse of the passed covariance matrix:
inverse_cov=solve(cov_matrix)
# Implement the formula for Xt (DeMiguel, Garlappi, Uppal; page 1922):
x_t=inverse_cov%*%means_vector
x_t_vector=c(x_t)
# return the vector with the absolute asset weights
return (x_t_vector)
}
####################################################################################################################
# KAN AND ZHOU THREE FUND STRATEGY PORTFOLIO SPECIFIC FUNCTIONS
# Function for getting the mu-g-parameter from the Kan and Zhou paper page 643
# by dimensions is meant the amout of assets in the dataset
get_mu_g = function(data, dimensions){
# compute the inverse matrix of the covariance matrix of the data
inverse_matrix = solve(cov(data))
# create a n-dimensional 1-vector
vec1 = numeric(0)
i_vector = c(vec1, 1:dimensions)
i_vector[1:dimensions] = 1
# compute the nominator and the denominator needed for the final division
nominator = (t(get_means_vector(data))) %*%inverse_matrix%*%i_vector
denominator = (t(i_vector)) %*%inverse_matrix%*%i_vector
# get mu_g and make it a vector
mu_g = nominator/denominator
mu_g = as.vector(mu_g)
return (mu_g)
}
# Function for computing the psi^2-parameter from the Kan and Zhou paper page 643
get_psi_square = function(data, mu_g_object, dimensions ){
# compute the inverse matrix of the covariance matrix of the data
inverse_matrix = solve(cov(data))
# create a n-dimensional 1-vector
vec1 = numeric(0)
i_vector = c(vec1, 1:dimensions)
i_vector[1:dimensions] = 1
# get psi^2 and make it a vector
psi_square = (t(get_means_vector(data)) - mu_g_object*i_vector) %*% inverse_matrix %*% (get_means_vector(data) - mu_g_object*i_vector)
psi_square = as.vector(psi_square)
return (psi_square)
}
# Function for computing the c3-parameter from the Kan and Zhou paper page 636
get_c3 = function(observations, dimensions){
# get the first and the second term for the final multiplication
first_term = (observations-dimensions-4)/observations
second_term = (observations-dimensions-1)/(observations-2)
# get the c3-parameter
c3 = first_term*second_term
return(c3)
}
# Function for computing the absolute weights vector of a Kan and Zhou three fund portfolio from the Kan and Zhou paper page 642
get_weights = function(data, observations, dimensions) {
# get the c3 paremeter
c3_object = get_c3(observations, dimensions)
# get the mu_g parameter
mu_g_object =get_mu_g(data, dimensions)
# get the psi^2 parameter
psi_square_object = get_psi_square(data, mu_g_object, dimensions)
# compute the inverse matrix of the covariance matrix of the data
inverse_matrix = solve(cov(data))
# create a n-dimensional 1-vector
ma = matrix(1, 1, dimensions)
i_vector = c(ma)
# get the means vector of the data
means_vector = get_means_vector(data)
# compute the first and the second term for the final multiplication
first_term = (psi_square_object/(psi_square_object+(dimensions/observations))) * inverse_matrix %*%means_vector
second_term = ((dimensions/observations) / (psi_square_object + (dimensions/observations))) * mu_g_object * inverse_matrix %*% i_vector
# get the absolute weights vector
weights = c3_object * (first_term + second_term)
return(weights)
}
#####################################################################################################################
#0) Subsetting and predefinition of constants
# a subset without the date-column
data.red=data[,-1]
# a subset, containing the excess returns (after subtracting the risk-free rate in column for each period -> column 13 in the original dataset)
# the return in the initial dataset still contain a risk free rate
#data.new=data.red[,-12]-data.red[,12]
data.new=matrix(,nrow=264, ncol=11)
colnames(data.new)=c(colnames(data[,2:12]))
for (i in 1:11){
data.new[,i]=data.red[,i]-data.red[,12]
}
data.new=data.frame(data.new)
# create a subset, containing the non-excessive returns (the original data)
data.probe = data.red[,-12]
# create a subset, containing the benchmark SP500-portfolio
bm_SP500 = data.new$S.P500
# determine the amount of assets considered
assets = length(data.new[1,])
# determine the amount of observations considered
observations = length(data[,1])
#####################################################################################################################
# APPLYING THE FUNCTIONS TO THE MEAN VARIANCE STRATEGY
#1) Calculate the Sharpe ratio -> excess returns needed => use the data.new subset
#1.1) out-of-sample
sharpe_ratio_out_of_sample_mv=get_outofsample_sharperatio_returns(120, data.new, TRUE, "mv")
round(sharpe_ratio_out_of_sample_mv, digits=4)
#1.2) in-sample
# 1. alternative
sharpe_ratio_in_sample_mv=get_insample_sharperatio_returns(data.new, get_weights_vector((get_means_vector(data.new)), cov(data.new)), TRUE)
round(sharpe_ratio_in_sample_mv, digits=4)
# 2. alternative: apply the integrated SharpeRatio-function in R as a check
dates=seq(as.Date("1981/01/30"), as.Date("2002/12/31"), by = "1 month",tzone="GMT")-1
rownames(data.new)=dates
time_series=as.xts(data.new,dateFormat="Date")
# compute the relative weights vector: needed for the weights-parameter of the integrated SharpeRatio function in R
aw = get_weights_vector((get_means_vector(data.new)), cov(data.new)) # absolute
rw = get_rel_weights_vector(aw) # relative
SharpeRatio(R = time_series, Rf = 0, p = 0.95, FUN = c("StdDev"),weights = rw, annualize = FALSE)
#3) calculate the certainty-equivalent -> unadjusted returns needed => use the data.probe subset
#3.1) out-of-sample
# compute the out-of-sample portfolio returns
returns_out= get_outofsample_sharperatio_returns(120, data.probe, FALSE, "mv")
# compute the out-of-sample certainty-equivalent:
certainty_equivalent_out_of_sample_mv=mean(returns_out) - ((1/2)*(var(returns_out)))
round(certainty_equivalent_out_of_sample_mv,digits=4)
#3.2) in-sample
# compute the absolute weights vector of the returns: needed as a parameter for the following function
absolute_weights_in = get_weights_vector((get_means_vector(data.probe)), cov(data.probe))
# compute the in-sample portfolio returns
returns_in = get_insample_sharperatio_returns(data.probe, absolute_weights_in, FALSE )
certainty_equivalent_in_sample_mv=mean(returns_in) - (1/2)*(var(returns_in))
round(certainty_equivalent_in_sample_mv,digits=4)
#4) Plotting the dynamics of a portfolio returns' SD: the plot is based on a 3-month interval
# adjusted returns => data.new subset
#4.1) in-sample portfolio returns
# compute the absolute weights vector of the adjusted returns: needed as a parameter for the following function
absolute_weights_adj = get_weights_vector((get_means_vector(data.new)), cov(data.new))
# compute the adjusted in-sample adjusted portfolio returns
adjusted_returns_in = get_insample_sharperatio_returns(data.new, absolute_weights_adj, FALSE )
# create a sequence of the dates of the observations: needed as a parameter for the following function
dates_in=seq(as.Date("1981/01/30"), as.Date("2002/12/31"), by = "1 month",tzone="GMT")-1
# plot SD dynamics of the in-sample portfolio returns
get_sd_dynamics_or_devreturn(adjusted_returns_in, 3, TRUE, dates_in)
#4.2) out-of-sample portfolio returns
# compute the adjusted out-of-sample adjusted portfolio returns
adjusted_returns_out = get_outofsample_sharperatio_returns(120, data.new, FALSE, "mv")
# create a sequence of the dates of the observations: needed as a parameter for the following function
data.new[121,]
data.new[264,]
dates_out=seq(as.Date("1991/01/29"), as.Date("2002/12/29"), by = "1 month",tzone="GMT")-1
# plot SD dynamics of the out-of-sample portfolio returns
get_sd_dynamics_or_devreturn(adjusted_returns_out, 3, TRUE, dates_out)
#6) Plotting the dynamics of weights of all assets
# based on unadjusted portfolio returns
get_weights_dynamics(120, data.probe, 11, TRUE)
# based on adjusted portfolio returns
get_weights_dynamics(120, data.new, 11, TRUE)
#7) Plotting the dynamics of a portfolio's returns: the plot is based on a 6-month interval
# based on unadjusted in-sample portfolio returns
get_sd_dynamics_or_devreturn(returns_in, 6, FALSE, dates_in)
# based on unadjusted out-of-sample portfolio returns
get_sd_dynamics_or_devreturn(returns_out, 6, FALSE, dates_out)
# based on adjusted in-sample portfolio returns
get_sd_dynamics_or_devreturn(adjusted_returns_in, 6, FALSE, dates_in)
# based on adjusted out-of-sample portfolio returns
get_sd_dynamics_or_devreturn(adjusted_returns_out, 6, FALSE, dates_out)
#8) Benchmark comparison in sample
benchmark_in_sample = cbind(adjusted_returns_in, bm_SP500)
bm_comparison(benchmark_in_sample)
get_means_and_sd_vector(benchmark_in_sample)
#9) benchmark comparison out of sample
W = 120 + 1
L = length(data[,1])
benchmark_out = cbind(adjusted_returns_out, bm_SP500[W:L])
bm_comparison(benchmark_out)
get_means_and_sd_vector(benchmark_out)
#####################################################################################################################
# APPLYING THE FUNCTIONS TO THE KAN AND ZHOU THREE FUND PORTFOLIO STRATEGY
#1) Calculate the Sharpe ratio -> excessive returns needed => use the data.new subset
#1.1) out-of-sample
sharpe_ratio_out_of_sample_kz=get_outofsample_sharperatio_returns(120, data.new, TRUE, "kz")
round(sharpe_ratio_out_of_sample_kz, digits=4)
#1.2) in-sample
# 1. alternative
sharpe_ratio_in_sample_kz=get_insample_sharperatio_returns(data.new, get_weights(data.new, observations, assets), TRUE)
round(sharpe_ratio_in_sample_kz, digits=4)
# 2. alternative: apply the integrated SharpeRatio-function in R as a check
dates_kz=seq(as.Date("1981/01/30"), as.Date("2002/12/31"), by = "1 month",tzone="GMT")-1
rownames(data.new)=dates_kz
time_series_kz=as.xts(data.new,dateFormat="Date")
# compute the relative weights vector: needed for the weights-parameter of the integrated SharpeRatio function in R
aw_kz = as.vector(get_weights(data.new, observations, assets)) # absolute
rw_kz = get_rel_weights_vector(aw_kz) # relative
SharpeRatio(R = time_series_kz, Rf = 0, p = 0.95, FUN = c("StdDev"),weights = rw_kz, annualize = FALSE)
#3) calculate the certainty-equivalent -> unadjusted returns needed => use the data.probe subset
#3.1) out-of-sample (passt)
# compute the out-of-sample portfolio returns
returns_out_kz= get_outofsample_sharperatio_returns(120, data.probe, FALSE, "kz")
# compute the out-of-sample sharpe ratio
certainty_equivalent_out_of_sample_kz=mean(returns_out_kz) - ((1/2)*(var(returns_out_kz)))
round(certainty_equivalent_out_of_sample_kz,digits=4)
#3.2) in-sample
# compute the absolute weights vector of the returns: needed as a parameter for the following function
absolute_weights_kz = get_weights(data.probe, observations, assets)
# compute the in-sample portfolio returns
returns_in_kz = get_insample_sharperatio_returns(data.probe, absolute_weights_kz, FALSE )
certainty_equivalent_in_sample_kz=mean(returns_in_kz) - (1/2)*(var(returns_in_kz))
round(certainty_equivalent_in_sample_kz,digits=4)
#4) Plotting the dynamics of a portfolio returns' SD: the plot is based on a 3-month interval
# adjusted returns => data.new subset
#4.1) in-sample portfolio returns
# compute the absolute weights vector of the adjusted returns: needed as a parameter for the following function
absolute_weights_adj_kz = get_weights(data.new, observations, assets)
# compute the adjusted in-sample adjusted portfolio returns
adjusted_returns_in_kz = get_insample_sharperatio_returns(data.new, absolute_weights_adj_kz, FALSE )
# create a sequence of the dates of the observations: needed as a parameter for the following function
dates_in_kz=seq(as.Date("1981/01/30"), as.Date("2002/12/31"), by = "1 month",tzone="GMT")-1
# plot SD dynamics of the in-sample portfolio returns
get_sd_dynamics_or_devreturn(adjusted_returns_in_kz, 3, TRUE, dates_in_kz)
#4.2) out-of-sample portfolio returns
# compute the adjusted out-of-sample adjusted portfolio returns
adjusted_returns_out_kz = get_outofsample_sharperatio_returns(120, data.new, FALSE, "kz")
# create a sequence of the dates of the observations: needed as a parameter for the following function
data.new[121,]
data.new[264,]
dates_out_kz=seq(as.Date("1991/01/29"), as.Date("2002/12/29"), by = "1 month",tzone="GMT")-1
# plot SD dynamics of the out-of-sample portfolio returns
get_sd_dynamics_or_devreturn(adjusted_returns_out_kz, 3, TRUE, dates_out_kz)
#6) Plotting the dynamics of weights of all assets
# based on unadjusted portfolio returns
get_weights_dynamics(120, data.probe, 11, FALSE)
# based on adjusted portfolio returns
get_weights_dynamics(120, data.new, 11, FALSE)
#7) Plotting the dynamics of a portfolio's returns: the plot is based on a 6-month interval
# based on unadjusted in-sample portfolio returns
get_sd_dynamics_or_devreturn(returns_in_kz, 6, FALSE, dates_in_kz)
# based on unadjusted out-of-sample portfolio returns
get_sd_dynamics_or_devreturn(returns_out_kz, 6, FALSE, dates_out_kz)
# based on adjusted in-sample portfolio returns
get_sd_dynamics_or_devreturn(adjusted_returns_in_kz, 6, FALSE, dates_in_kz)
# based on adjusted out-of-sample portfolio returns
get_sd_dynamics_or_devreturn(adjusted_returns_out_kz, 6, FALSE, dates_out_kz)
#8) Benchmark comparison in sample
benchmark_in_sample_kz = cbind(adjusted_returns_in_kz, bm_SP500)
bm_comparison(benchmark_in_sample_kz)
get_means_and_sd_vector(benchmark_in_sample_kz)
#9) benchmark comparison out of sample
W = 120 + 1
L = length(data[,1])
benchmark_out_kz = cbind(adjusted_returns_out_kz, bm_SP500[W:L])
bm_comparison(benchmark_out_kz)
get_means_and_sd_vector(benchmark_out_kz)
#10) correlation matrix of the 11 assets + the one benchmark
corr_matrix = cor(data.new)
corrplot(corr_matrix, method="number", number.cex = 0.5)
|
54fb92702fd636b2789c52f6904383c3142f2e72
|
cecced4835b4f960141b85e25eabd8756f1702ea
|
/man/sc_atac_plot_features_per_cell_ordered.Rd
|
3e18556700c45d0a1112718a8a395e8197c3d11e
|
[] |
no_license
|
LuyiTian/scPipe
|
13dab9bea3b424d1a196ff2fba39dec8788c2ea8
|
d90f45117bf85e4a738e19adc3354e6d88d67426
|
refs/heads/master
| 2023-06-23T01:44:20.197982
| 2023-04-17T13:26:42
| 2023-04-17T13:26:42
| 71,699,710
| 61
| 26
| null | 2023-06-12T11:04:49
| 2016-10-23T11:53:40
|
HTML
|
UTF-8
|
R
| false
| true
| 547
|
rd
|
sc_atac_plot_features_per_cell_ordered.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sc_atac_subfunctions.R
\name{sc_atac_plot_features_per_cell_ordered}
\alias{sc_atac_plot_features_per_cell_ordered}
\title{Plot showing the number of features per cell in ascending order}
\usage{
sc_atac_plot_features_per_cell_ordered(sce)
}
\arguments{
\item{sce}{The SingleExperimentObject produced by the sc_atac_create_sce function at the end of the pipeline}
}
\value{
returns NULL
}
\description{
Plot showing the number of features per cell in ascending order
}
|
e60a4837d29ce557301cbb09570a04a9b2112fca
|
49961ac17375792c2f0e4a6bab7775becb543a98
|
/R/number.elements.R
|
554e8f30e9d13f73ce982c2d1593c6912ca2b41b
|
[] |
no_license
|
cran/InfNet
|
bfd88e01df0ef46c8bda702326c4957804871ea8
|
23d4738311e84597ed10af4d6a111329b225b081
|
refs/heads/master
| 2021-01-11T05:29:14.343589
| 2006-07-20T00:00:00
| 2006-07-20T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 88
|
r
|
number.elements.R
|
"number.elements" <-
function(mat,col, x){
a<-length(mat[mat[,col]==x,col])
return(a)
}
|
226c0e007f2e99989b6a37db451ed31c540212c7
|
5247d313d1637170b6bbc5e367aba46c88725efd
|
/R/package-doc.R
|
4df722e43d0f9f90e061f12d8cb3115eaacc1fa5
|
[] |
no_license
|
fentonmartin/twitterreport
|
dac5c512eea0831d1a84bef8d2f849eab2b12373
|
5ddb467b8650289322ae83e0525b4ff01fba0d1d
|
refs/heads/master
| 2021-08-22T04:25:01.834103
| 2017-11-29T07:47:43
| 2017-11-29T07:47:43
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 162
|
r
|
package-doc.R
|
#' twitterreport
#'
#' Out-of-the-Box Analysis and Reporting Tools for Twitter
#'
#' @docType package
#' @name twitterreport
#' @author George G. Vega Yon
NULL
|
8103ee64f1b343c54bab57052ec6da8e5c503374
|
88d9c0d58c72ba565d403a21de37f9169ac282a0
|
/data-raw/03_seer-pop-us-standard.R
|
3c1ff426000c051091db45ee2a6d8b64bdba03e1
|
[
"MIT"
] |
permissive
|
GerkeLab/fcds
|
01191bc32e4b73a857ae7ab7ef39e29c6c2713c4
|
7d6cbc89726418629d9c3cd54b10414eb7cab028
|
refs/heads/master
| 2021-07-08T02:48:46.061128
| 2020-07-30T18:45:01
| 2020-07-30T19:04:25
| 167,439,089
| 3
| 1
|
NOASSERTION
| 2020-07-30T19:04:26
| 2019-01-24T21:16:41
|
R
|
UTF-8
|
R
| false
| false
| 4,455
|
r
|
03_seer-pop-us-standard.R
|
library(dplyr)
fcds:::requires_package(c("readr", "stringr", "purrr", "here"), "seer_pop_us-standard.R")
library(readr)
library(stringr)
library(purrr)
# SEER Standard Ages ----
# https://seer.cancer.gov/stdpopulations/
# Standard Populations - 18 Age Groups (0-4, 5-9, 10-14, ..., 85+)
# https://seer.cancer.gov/stdpopulations/stdpop.18ages.txt
download.file("https://seer.cancer.gov/stdpopulations/stdpop.18ages.txt",
here::here("data-raw", "seer_stdpop-18ages.txt"))
seer_std_age_18 <- read_lines(here::here("data-raw", "seer_stdpop-18ages.txt"))
# Standard Populations - Single Ages to 84 and then 85+ (TXT, 4 KB) (2000 U.S.,
# World (WHO 2000-2025), and Canadian 2011 standards only)
# https://seer.cancer.gov/stdpopulations/stdpop.singleagesthru84.txt
download.file("https://seer.cancer.gov/stdpopulations/stdpop.singleagesthru84.txt",
here::here("data-raw", "seer_stdpop-singleagesthru84.txt"))
seer_std_ages <- read_lines(here::here("data-raw", "seer_stdpop-singleagesthru84.txt"))
age_18_groups_by_index <- function(i) {
if (i == 0) return("0")
if (i == 1) return("0 - 4")
if ((i-1) * 5 == 85) return("85+")
paste((i-1) * 5, i * 5 - 1, sep = " - ")
}
seer_standard_dictionary <- c(
"006" = "World (Segi 1960) Std Million (19 age groups)",
"007" = "1991 Canadian Std Million (19 age groups)",
"005" = "European (Scandinavian 1960) Std Million (19 age groups)",
"008" = "1996 Canadian Std Million (19 age groups)",
"010" = "World (WHO 2000-2025) Std Million (19 age groups)",
"014" = "European (EU-27 plus EFTA 2011-2030) Std Million (19 age groups)",
"016" = "2011 Canadian Standard Population (19 age groups)",
"141" = "1940 U.S. Std Million (19 age groups)",
"151" = "1950 U.S. Std Million (19 age groups)",
"161" = "1960 U.S. Std Million (19 age groups)",
"171" = "1970 U.S. Std Million (19 age groups)",
"181" = "1980 U.S. Std Million (19 age groups)",
"191" = "1990 U.S. Std Million (19 age groups)",
"201" = "2000 U.S. Std Million (19 age groups)",
"203" = "2000 U.S. Std Population (19 age groups - Census P25-1130)",
"017" = "2011 Canadian Standard Population (single age to 84)",
"202" = "2000 U.S. Std Population (single ages to 84 - Census P25-1130)",
"205" = "2000 U.S. Std Population (single ages to 99 - Census P25-1130)",
"011" = "World (WHO 2000-2025) Std Million (single ages to 84)",
"012" = "World (WHO 2000-2025) Std Million (single ages to 99)",
"001" = "World (Segi 1960) Std Million (18 age groups)",
"002" = "1991 Canadian Std Million (18 age groups)",
"003" = "European (Scandinavian 1960) Std Million (18 age groups)",
"013" = "European (EU-27 plus EFTA 2011-2030) Std Million (18 age groups)",
"004" = "1996 Canadian Std Million (18 age groups)",
"015" = "2011 Canadian Standard Population (18 age groups)",
"009" = "World (WHO 2000-2025) Std Million (18 age groups)",
"140" = "1940 U.S. Std Million (18 age groups)",
"150" = "1950 U.S. Std Million (18 age groups)",
"160" = "1960 U.S. Std Million (18 age groups)",
"170" = "1970 U.S. Std Million (18 age groups)",
"180" = "1980 U.S. Std Million (18 age groups)",
"190" = "1990 U.S. Std Million (18 age groups)",
"200" = "2000 U.S. Std Million (18 age groups)",
"204" = "2000 U.S. Std Population (18 age groups - Census P25-1130)"
)
seer_tibble <- function(x) {
x %>%
str_match("(\\d{3})(\\d{3})(\\d{8})") %>%
purrr::array_tree(margin = 2) %>%
set_names(c("raw", "standard", "age", "std_pop")) %>%
as_tibble()
}
age_groups_18 <- map_chr(0:18, age_18_groups_by_index)
names(age_groups_18) <- sprintf("%03d", 0:18)
seer_std_age_18 <-
seer_std_age_18 %>%
seer_tibble() %>%
select(-raw) %>%
mutate(
standard = seer_standard_dictionary[standard],
age_group = age_groups_18[age],
std_pop = as.numeric(std_pop)
)
seer_std_ages <-
seer_std_ages %>%
seer_tibble() %>%
select(-raw) %>%
mutate(
standard_name = seer_standard_dictionary[standard],
age = as.integer(age),
std_pop = as.numeric(std_pop),
age_group = sprintf("%03d", age %/% 5 + 1),
age_group = age_groups_18[age_group]
) %>%
group_by(standard, standard_name, age_group) %>%
summarize(std_pop = sum(std_pop))
seer_std_ages <-
seer_std_ages %>%
filter(standard == "202") %>%
ungroup() %>%
fcds::standardize_age_groups() %>%
dplyr::arrange(age_group)
usethis::use_data(seer_std_ages, compress = "xz", overwrite = TRUE)
|
120a88a4977d602610529427faf8b44f25c84d5f
|
3ab868b8eeef4547e97d511aede2fb21ab924e86
|
/man/bayesclust-package.Rd
|
70aa3efdde3e10b8384335c8281ba27ee6e9bfb4
|
[] |
no_license
|
cran/bayesclust
|
e8341b359081a89b084ec1e6aef60afc531e1268
|
8338e3782b0e88f24f588337e20fdb72d8b542d6
|
refs/heads/master
| 2021-01-16T19:20:43.969809
| 2012-05-15T00:00:00
| 2012-05-15T00:00:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,814
|
rd
|
bayesclust-package.Rd
|
\name{bayesclust-package}
\alias{bayesclust-package}
\alias{bayesclust}
\docType{package}
\title{
Testing and Searching for Clusters in A Hierarchical Bayes Model
}
\description{
This package contains a suite of functions that allow
the user to carry out the following hypothesis test on genetic data:
\tabular{l}{
\eqn{H_0} : No clusters \cr
\eqn{H_1} : 2, 3 or 4 clusters \cr
}
}
\details{
The hypothesis test is formulated as a model selection problem,
where the aim is to identify the model with the highest posterior probability. A Hierarchical Bayes
model is assumed for the data. Note that firstly, the null hypothesis is equivalent to saying that
the population consists of just one cluster. Secondly, since the functions here only allow the alternative
hypothesis to be either 2, 3 or 4 at any one time, the package allows the user
to test multiple hypotheses while controlling the False Discovery Rate (FDR).
This is a brief of summary of the test procedure:
\enumerate{
\item
For a given dataset, compute the empirical posterior probability (EPP) of the null hypothesis
using \code{cluster.test}. EPP will serve as the test statistic in this hypothesis test.
\item
Monitor the convergence of EPP by running \code{plot} on the object returned in Step 1.
\item
Generate the distribution of EPP under the null hypothesis using \code{nulldensity}. This can be
done concurrent to Steps 1 and 2. Be sure to use the same parameters for Steps 1 and 3 though.
\item
Estimate the \eqn{p}-value of the EPP for this dataset
using \code{emp2pval}. This function takes the objects returned in
Steps 1 and 3 as input.
\item
Run \code{cluster.optimal} on significant datasets to pick out optimal clusters.
\item
Run \code{plot} on the object returned in Step 6 to view the optimal clustering/partition of the
data.
}
For full details on the distributional assumptions, please refer to the papers listed in the references
section. For further details on the individual functions, please refer to their respective help
pages and the examples.
}
\author{
George Casella \email{casella@stat.ufl.edu} and Claudio Fuentes \email{cfuentes@stat.ufl.edu}
and Vik Gopal \email{viknesh@stat.ufl.edu}
Maintainer: Vik Gopal <viknesh@stat.ufl.edu>
}
\references{
Fuentes, C. and Casella, G. (2009) Testing for the Existence of Clusters.
\emph{SORT} \bold{33}(2)
Gopal, V. and Fuentes, C. and Casella, G. (2012) bayesclust: An R package
for Testing and Searching for Significant Clusters.
\emph{Journal of Statistical Software} \bold{47}(14), 1-21.
\url{http://www.jstatsoft.org/v47/i14/}
}
\keyword{ package }
\keyword{ htest }
\keyword{ multivariate }
\keyword{ cluster }
\seealso{
\code{\link{cluster.test}}, \code{\link{cluster.optimal}},
\code{\link{emp2pval}}, \code{\link{nulldensity}}
}
|
0d92f2a46316ad80c5eca91ce8970e3090e2c927
|
b3fe239c87958a522c3e96de2486d545476e3b27
|
/4.1Network_analysis.R
|
313c7d530dcfec4f1bdd0c1062ac0904e44b7042
|
[] |
no_license
|
Zefeng-Wu/Arabidopsis_Functional_Network
|
db055c96a88bb5c72e338a972c01c3a303549a1c
|
18ca3769e4b4dcadd647404c9a1fdf764320a513
|
refs/heads/master
| 2022-11-16T04:33:07.665397
| 2020-06-11T11:49:41
| 2020-06-11T11:49:41
| 220,196,926
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 182,061
|
r
|
4.1Network_analysis.R
|
#! /usr/bin/R
set.seed(1000)
options(digits = 5)
df2network<-function(df){
data<-read.table(df,header = TRUE,stringsAsFactors = FALSE,sep="\t")
library(igraph)
g<-graph.data.frame(d = data, directed = FALSE)
g<-simplify(g,remove.multiple = TRUE,remove.loops = TRUE)
return(g)
}
#### read network data with different cufoff
g<-df2network("myresearch/network/1network_cutoff10.txt")
### degree kept network
gg<-rewire(g,with = keeping_degseq(niter = vcount(g) * 100))
## network attributions calculation
#0.0 network diameter
#farthest.nodes(g,weights = NA) # AT4G04200 AT5G67580; 807.44
# network betweeness
system.time(AGFN_betweenness<-betweenness(g,directed = FALSE,weights = NA,normalized = TRUE))
write.table(AGFN_betweenness)
### Fit power-law
data <- degree(g)
data.dist <- data.frame(k=0:max(data),p_k=degree_distribution(g))
data.dist <- data.dist[data.dist$p_k>0,]
library(ggplot2)
ggplot(data.dist) + geom_point(aes(x=k, y=p_k)) + # log(k)
theme_bw()+
theme(text=element_text(size = 20)) + ylab("Probability of degree")+xlab("Degree")
library(poweRlaw)
m_pl <- displ$new(data)
est_pl <- estimate_xmin(m_pl)
m_pl$setXmin(est_pl)
plot.data <- plot(m_pl, draw = F)
fit.data <- lines(m_pl, draw = F)
ggplot(plot.data) + geom_point(aes(x=log(x), y=log(y))) +
labs(x="log(k)", y="log(CDF)") + theme_bw() +
geom_line(data=fit.data, aes(x=log(x), y=log(y)), colour="red")+
theme(text=element_text(size=20))
### network closeness
system.time(AGFN_closeness<-closeness(g,directed = FALSE,weights = NA,normalized = TRUE))
###1.1 network validation using tair interaction data
tair_inter<-read.table("/home/wuzefeng/MyResearch/networks/PPI/Arabidopsis/TairProteinInteraction.20090527.txt",header = TRUE,stringsAsFactors = FALSE,sep="\t")
focused_genes_list <-unique(c(tair_inter$Locus_name,tair_inter$InteractorLocus_name))
for (x in focused_genes_list){
focused_genes<-x
if (x %in% names(V(g))){
sub_g<-induced_subgraph(g, ego(g, 1, focused_genes)[[1]])
focus_links<-subset(tair_inter,tair_inter$Locus_name==focused_genes | tair_inter$InteractorLocus_name==focused_genes )
focus_links<-focus_links[,c(1,3,5,6)]
focus_links<-unique(as.data.frame(t(apply(focus_links[,c(1,2)],1,sort)),stringsAsFactors = FALSE))
colnames(focus_links)<-c("V1","V2")
focus_links<-subset(focus_links,focus_links$V1!=focus_links$V2)
message(c(x,";","experiment links number is: ",dim(focus_links)[1],";","of these the predicted links is: ",length(intersect(names(V(sub_g)),unique(as.vector(t(as.matrix(focus_links))))))-1))
write.table(file="4network_analysis_result/1network_validation/tair_interaction_validation",data.frame(gene=x,experiment=dim(focus_links)[1],predicted=length(intersect(names(V(sub_g)),unique(as.vector(t(as.matrix(focus_links))))))-1),sep="\t",append=TRUE,col.names = !file.exists("out"),row.names = FALSE,quote = FALSE)
focus_links<-subset(focus_links,focus_links$V2%in%names(V(sub_g))&focus_links$V1%in%names(V(sub_g))) # common from exper and predicte
}
}
## result plot
tair<-read.table("4network_analysis_result/1network_validation/tair_interaction_validation",header = TRUE)
tair<-subset(tair,tair$experiment>0)
tair$accuaracy<-tair$predicted/tair$experiment
### compare with permute g for 20 times
for(m in 1:20){
message(m)
permute_g<-rewire(g,with = keeping_degseq(niter = vcount(g) * 100))
tair_inter<-read.table("/home/wuzefeng/MyResearch/networks/PPI/Arabidopsis/TairProteinInteraction.20090527.txt",header = TRUE,stringsAsFactors = FALSE,sep="\t")
focused_genes_list <-unique(c(tair_inter$Locus_name,tair_inter$InteractorLocus_name))
for (x in focused_genes_list){
focused_genes<-x
if (x %in% names(V(permute_g))){
sub_g<-induced_subgraph(permute_g, ego(permute_g, 1, focused_genes)[[1]])
focus_links<-subset(tair_inter,tair_inter$Locus_name==focused_genes | tair_inter$InteractorLocus_name==focused_genes )
focus_links<-focus_links[,c(1,3,5,6)]
focus_links<-unique(as.data.frame(t(apply(focus_links[,c(1,2)],1,sort)),stringsAsFactors = FALSE))
colnames(focus_links)<-c("V1","V2")
focus_links<-subset(focus_links,focus_links$V1!=focus_links$V2)
message(c(x,";","experiment links number is: ",dim(focus_links)[1],";","of these the predicted links is: ",length(intersect(names(V(sub_g)),unique(as.vector(t(as.matrix(focus_links))))))-1))
write.table(file=paste("4network_analysis_result/1network_validation/TIP_validation_20_random/tair_interaction_validation.random",m,sep=""),
data.frame(gene=x,experiment=dim(focus_links)[1],
predicted=length(intersect(names(V(sub_g)),unique(as.vector(t(as.matrix(focus_links))))))-1),
sep="\t",append=TRUE,
col.names = !file.exists(paste("4network_analysis_result/1network_validation/TIP_validation_20_random/tair_interaction_validation.random",m,sep="")),
row.names = FALSE,quote = FALSE)
#focus_links<-subset(focus_links,focus_links$V2%in%names(V(sub_g))&focus_links$V1%in%names(V(sub_g)))
}
}
}
## plot
random <- c()
# create directory names
random_dir <- dir("/home/wuzefeng/MyResearch/networks/2network_prediction/4network_analysis_result/1network_validation/TIP_validation_20_random",full.names = TRUE)
# loop through all directories and grab fpkm columns
for( i in 1:length(random_dir) ){
fname <- random_dir[i]
x <- read.table(file=fname, sep="\t", header=T, as.is=T)
random <- cbind(random, x[,"predicted"]/x[,"experiment"])
}
# name the columns
colnames(random) <- stringr::str_split_fixed(basename(random_dir),pattern = "\\.",n = 2)[,2]
# name the rows, they're all in the same order
rownames(random) <- x[,1]
tair_random<-data.frame(apply(random,1,mean))
colnames(tair_random)<-"random_accuaracy"
tair_random<-subset(tair_random,tair_random$random_accuaracy>=0)
dd1<-data.frame(accuaracy=tair$accuaracy,class="Original")
dd2<-data.frame(accuaracy=tair_random$random_accuaracy,class="Permutation")
dd<-rbind(dd1,dd2)
ggplot(dd,aes(x=accuaracy,fill=class))+geom_histogram(position = "dodge")+
theme(text = element_text(size = 20),legend.position=c(0.85,0.9))+
ylab("Count")+xlab("Predictive accuracy")+
scale_fill_manual("Network type",values=c('#9ED2F0','#E6A429'))
#### 1.2 individule target gene validation and plot
tair_inter<-read.table("/home/wuzefeng/MyResearch/networks/PPI/Arabidopsis/TairProteinInteraction.20090527.txt",header = TRUE,stringsAsFactors = FALSE,sep="\t")
focused_genes<-"AT2G18790" # AT1G02340 #AT2G18790
sub_g<-induced_subgraph(g, ego(g, 1, focused_genes)[[1]])
focus_links<-subset(tair_inter,tair_inter$Locus_name==focused_genes | tair_inter$InteractorLocus_name==focused_genes)
focus_links<-focus_links[,c(1,3,5,6)]
focus_links<-unique(as.data.frame(t(apply(focus_links[,c(1,2)],1,sort)),stringsAsFactors = FALSE))
focus_links<-subset(focus_links,focus_links$V1!=focus_links$V2)
message(c("experiment links number is: ",dim(focus_links)[1],";","of these the predicted links is: ",length(intersect(names(V(sub_g)),unique(as.vector(t(as.matrix(focus_links))))))-1))
focus_links<-subset(focus_links,focus_links$V2%in%names(V(sub_g))&focus_links$V1%in%names(V(sub_g)))
### plot attribution
plot_personal_graph<-function(graph_object,focused_list){
#V(sub_g)$color[names(V(sub_g))%in%focus_links$V2]<-"purple"
V(sub_g)$color<-ifelse(names(V(sub_g))%in%focused_list,"red","steelblue") # vertex color
V(sub_g)$size <- 8 # vertex size
V(sub_g)$label.cex <- 1 # vertex label size
V(sub_g)$label.color<-"brown"
E(sub_g)$color <- "gray" # edge color
E(sub_g)$width=2#E(sub_g)$weight/max(E(sub_g)$weight)
E(sub_g,P=as.vector(t(as.matrix(focus_links))))$color<-"purple"
E(sub_g,P=as.vector(t(as.matrix(focus_links))))$width<-5
#E(sub_g)$width=edge.betweenness(sub_g)
plot.igraph(sub_g,layout=layout.fruchterman.reingold)
legend('topleft',
legend=c("Predicted","Validation"),
pch=95, #shape
box.lty=2, #
lty=1:2,
pt.cex= 3, #lines size
cex=1, #box size
col=c("gray","purple"))
}
plot_personal_graph(graph_object = sub_g,focused_list = focused_genes)
## 2.kegg validation
###2.1 network validation using kegg pathway
require(org.At.tair.db)
xx<- as.list(org.At.tairPATH2TAIR)
combins<-combn(seq(1:length(xx)),2)
for (col in 1:ncol(combins)){
gene_list1<-xx[[combins[,col][1]]]
gene_list2<-xx[[combins[,col][2]]]
group_in_possible<-choose(length(gene_list1),2)+choose(length(gene_list2),2)-choose(length(intersect(gene_list1,gene_list2)),2)
group_in_prediction <- ecount(induced_subgraph(g,intersect(gene_list1,names(V(g)))) %u% induced_subgraph(g,intersect(gene_list2,names(V(g)))))
between_group_possible <-length(gene_list1)*length(gene_list2)-length(intersect(gene_list1,gene_list2))^2+choose(length(intersect(gene_list1,gene_list2)),2)
between_group_prediction <-ecount(induced_subgraph(g,intersect(unique(c(gene_list1,gene_list2)),names(V(g)))))-group_in_prediction
print(c(col, group_in_possible,group_in_prediction,between_group_possible,between_group_prediction))
message(fisher.test(matrix(c(group_in_prediction,group_in_possible,between_group_prediction,between_group_possible),nrow=2),alternative="greater")$p.value)
write.table(file="KEGG_validation",data.frame(pathway=paste(names(xx)[combins[,col][1]],names(xx)[combins[,col][2]],sep = "_"),
predicted_in_pathway = group_in_prediction,
in_pathway = group_in_possible,
prediction_between_pathway = between_group_prediction,
between_pathway = between_group_possible,
p_value = fisher.test(matrix(c(group_in_prediction,group_in_possible,between_group_prediction,between_group_possible),nrow=2),alternative="greater")$p.value),
sep="\t",append=TRUE,col.names = !file.exists("KEGG_validation"),
row.names = FALSE,quote = FALSE)
}
####2.2 reanalysis kegg data with permutations network
for (m in 1:20){
message(m)
permute_g<-rewire(g,with = keeping_degseq(niter = vcount(g) * 100))
for (col in 1:ncol(combins)){
gene_list1<-xx[[combins[,col][1]]]
gene_list2<-xx[[combins[,col][2]]]
group_in_possible<-choose(length(gene_list1),2)+choose(length(gene_list2),2)-choose(length(intersect(gene_list1,gene_list2)),2)
group_in_prediction <- ecount(induced_subgraph(permute_g,intersect(gene_list1,names(V(permute_g)))) %u% induced_subgraph(permute_g,intersect(gene_list2,names(V(permute_g)))))
between_group_possible <-length(gene_list1)*length(gene_list2)-length(intersect(gene_list1,gene_list2))^2+choose(length(intersect(gene_list1,gene_list2)),2)
between_group_prediction <-ecount(induced_subgraph(permute_g,intersect(unique(c(gene_list1,gene_list2)),names(V(permute_g)))))-group_in_prediction
print(c(col, group_in_possible,group_in_prediction,between_group_possible,between_group_prediction))
message(fisher.test(matrix(c(group_in_prediction,group_in_possible,between_group_prediction,between_group_possible),nrow=2),alternative="greater")$p.value)
write.table(file=paste("4network_analysis_result/1network_validation/KEGG_validation_20_random/KEGG_validation.random",m,sep=""),data.frame(pathway=paste(names(xx)[combins[,col][1]],names(xx)[combins[,col][2]],sep = "_"),
predicted_in_pathway = group_in_prediction,
in_pathway = group_in_possible,
prediction_between_pathway = between_group_prediction,
between_pathway = between_group_possible,
p_value = fisher.test(matrix(c(group_in_prediction,group_in_possible,between_group_prediction,between_group_possible),nrow=2),alternative="greater")$p.value),
sep="\t",append=TRUE,col.names = !file.exists(paste("4network_analysis_result/1network_validation/KEGG_validation_20_random/KEGG_validation.random",m,sep="")),
row.names = FALSE,quote = FALSE)
}
}
##ggplot
data1<-read.table("4network_analysis_result/1network_validation/KEGG_validation",stringsAsFactors = FALSE,header = TRUE)
data1$qvalue<-p.adjust(data1$p_value)
###
random <- c()
# create directory names
random_dir <- dir("/home/wuzefeng/MyResearch/networks/2network_prediction/4network_analysis_result/1network_validation/KEGG_validation_20_random",full.names = TRUE)
# loop through all directories and grab fpkm columns
for( i in 1:length(random_dir) ){
fname <- random_dir[i]
x <- read.table(file=fname, sep="\t", header=T, as.is=T)
random <- cbind(random, x[,"p_value"])
}
# name the columns
colnames(random) <- stringr::str_split_fixed(basename(random_dir),pattern = "\\.",n = 2)[,2]
# name the rows, they're all in the same order
rownames(random) <- x[,1]
tair_random<-data.frame(apply(random,1,mean))
colnames(tair_random)<-"p_value"
p_values<-rbind(data.frame(Pvalue=data1$p_value,class="original",stringsAsFactors = FALSE),data.frame(Pvalue=tair_random$p_value,class="permutation",stringsAsFactors = FALSE))
library(ggplot2)
p<-ggplot(p_values,aes(x=Pvalue,fill=class))+geom_histogram(position = "dodge")+
theme(text = element_text(size=20),legend.position=c(0.85,0.9))+
ylab("Frequency")+xlab("P-value")+
scale_fill_manual("Network type",values=c('#9ED2F0','#E6A429')) #legend modifications
#####
####3 compare wit aranet2
ara_net2 <-read.table("other_sourse/AraNet.txt",header = FALSE,stringsAsFactors = FALSE)
colnames(ara_net2)<-c("X1","X2","weight")
ara_net2<-ara_net2[!duplicated(ara_net2[c(1,2)]),]
library(igraph)
ara_net_g<-graph.data.frame(d = ara_net2, directed = FALSE)
ara_net_g<-simplify(ara_net_g,remove.multiple = TRUE,remove.loops = TRUE) #22894 *895000
intersect_g<-ara_net_g%s%g #14112 common edges
intersect_permute_g<-permute_g%s%ara_net_g #199
### 3.5 compare with AtPIN
AtPIN <-read.table("other_sourse/AtPIN_PPI.txt",header = FALSE,stringsAsFactors = FALSE,sep="\t")
AtPIN<-AtPIN[,-3]
colnames(AtPIN)<-c("X1","X2")
AtPIN<-AtPIN[!duplicated(AtPIN[c(1,2)]),]
library(igraph)
AtPIN_g<-graph.data.frame(d = AtPIN, directed = FALSE)
AtPIN_g<-simplify(AtPIN_g,remove.multiple = TRUE,remove.loops = TRUE) #15163 * 95043
intersect_g<-ara_net_g%s%g # 14112
intersect_permute_g<-permute_g%s%ara_net_g #1824
###4 analysis flowering geneset
##nalysis flowering geneset
flowering_genes<-read.csv("other_resources/Flowering Interactive Database FLOR-ID - Flowering time.csv",header = TRUE,stringsAsFactors = FALSE)
commen_flower_genes<- intersect(flowering_genes$Gene.details,names(V(g)))
#short distence among these flowering genes
d_flower<-shortest.paths(g,commen_flower_genes, commen_flower_genes,weights = NA)
hist(d_flower)
#random sampled genes
fun <- function(x){
require(igraph)
rand_genes<-sample(names(V(g)),285,replace = FALSE)
d_random<-shortest.paths(g,rand_genes, rand_genes,weights = NA)
return(mean(as.numeric(d_random)[is.finite(as.numeric(d_random))]))
}
library(parallel)
system.time(simulations<-mclapply(1:1000,fun,mc.cores = 7))
simulations<-unlist(simulations)
ggplot(as.data.frame(simulations),aes(x=simulations))+geom_histogram(fill="steelblue")+
theme(text = element_text(size=20))+
ylab("Frequency")+
xlab("Mean shortest path")+
annotate("segment", x=2.95, xend=2.95, y=25, yend=0, color="black", size=2, arrow=arrow())
###4.5 flc genes
####4.5 flc genes
sub_g<-induced_subgraph(g, ego(g, 1, "AT5G10140")[[1]])
tair_no_annotated <-c("AT1G02840","AT1G32320","AT1G54440","AT2G20050","AT2G26330","AT2G35110","AT3G26790","AT3G46520","AT5G60410","AT5G45830")
V(sub_g)$size <- 8 # vertex size
V(sub_g)$label.cex <- 1 # vertex label size
V(sub_g)$label.color<-"brown"
E(sub_g)$color <- "gray" # edge color
V(sub_g)$color<-"steelblue"
V(sub_g)$color[names(V(sub_g))=="AT5G10140"]<-"orange"
V(sub_g)$color[names(V(sub_g))%in%tair_no_annotated]<-"lightgreen"
plot(sub_g,vertex.frame.color= "white")
legend('topleft',
legend=c("FLC","Known","Unknown"),
pch=19, #shape
box.lty=2, #
pt.cex= 3, #lines size
cex=1, #box size
col=c("orange","steelblue","lightgreen"),
y.intersp=1.5
)
## gene degree and dn/ds
genes_degree<-degree(g)
dnds<-read.table("myresearch/network/data/TAIR2lyrata.ds.mart_export.txt",sep="\t",header = TRUE,stringsAsFactors = FALSE)
dnds<-dnds[complete.cases(dnds),]
dnds<-unique(dnds)
dnds<-dnds %>% group_by(Gene.stable.ID) %>% slice(which.min(dS.with.Arabidopsis.lyrata)) # select minumum ds for same genes
dnds$dnds<-dnds$dN.with.Arabidopsis.lyrata/dnds$dS.with.Arabidopsis.lyrata
rownames(dnds)<-dnds$Gene.stable.ID
genes_dnds<-dnds[names(degree(g)),]$dnds
df<-data.frame(degree=degree(g),dnds=genes_dnds)
df<-na.omit(df)
df<-df[is.finite(df$dnds),]
df<-df[df$dnds<5,]
cor.test(df$degree,df$dnds) # pcc = -0.12
##7 imprinted genes analysis
###7.0 import imprinted genes
Arabidospis_imprinted_genes_parse<-function(ara_imprinted_genes_file){
imprinted_data <-read.table(ara_imprinted_genes_file,stringsAsFactors = FALSE)
imprinted_genes<-unique(imprinted_data$V1)
imprinted_genes_in_network<- intersect(imprinted_genes,names(V(g)))
if (ncol(imprinted_data)==2){
paternal_imprint <- unique(imprinted_data$V1[imprinted_data$V2=="f"])
maternal_imprint <- unique(imprinted_data$V1[imprinted_data$V2=="m"])
}
if (ncol(imprinted_data)==3){
paternal_imprint <- unique(imprinted_data$V1[imprinted_data$V3=="f"])
maternal_imprint <- unique(imprinted_data$V1[imprinted_data$V3=="m"])
}
IGs<-list()
IGs$IG <- imprinted_genes
IGs$PEG <- paternal_imprint
IGs$MEG <- maternal_imprint
IGs$IG_in_networks <- imprinted_genes_in_network
IGs$PEG_in_networks <- intersect(paternal_imprint,names(V(g)))
IGs$MEG_in_networks <- intersect(maternal_imprint,names(V(g)))
message("Imprinted genes number in Arabidopsis is: ",length(IGs$IG))
return(IGs)
}
imprinted_data <-Arabidospis_imprinted_genes_parse("myresearch/network/data/imp2+.list")
#imprinted_data <-Arabidospis_imprinted_genes_parse("/home/wuzefeng/MyResearch/Imprinting_prediction/imprint_gene_list/TAIR10/6imprinted.list") #526
imprinted_genes<-imprinted_data$IG
imprinted_genes_in_network<-imprinted_data$IG_in_networks ### 82|470
maternal_imprint<-imprinted_data$MEG_in_networks ### #376
paternal_imprint<-imprinted_data$PEG_in_networks ### #94
sub_g<-induced_subgraph(g, imprinted_genes_in_network)
V(sub_g)$color[names(V(sub_g))%in%maternal_imprint]<-"tomato" # vertex color
V(sub_g)$color[names(V(sub_g))%in%paternal_imprint]<-"steelblue"
V(sub_g)$distance <- ifelse(names(V(sub_g))%in%componet_IGs,3,iso2comp_min_dis$Freq[match(names(V(sub_g)),iso2comp_min_dis$Var1)]) # vertex size
V(sub_g)$label.cex <- 0.8 # vertex label size
V(sub_g)$label.color<-"black"
E(sub_g)$color <- "gray" # edge color
E(sub_g)$width=3
V(sub_g)$class<-ifelse(names(V(sub_g))%in%maternal_imprint,"MEG","PEG")
## plot
library(intergraph)
library(ggnetwork)
dat <- ggnetwork(sub_g, layout="fruchtermanreingold", arrow.gap=0, cell.jitter=0)
ggplot(dat,aes(x=x, y=y, xend=xend, yend=yend)) +
geom_edges(color="grey50", curvature=0.1, size=0.5) +
geom_nodes(aes(size=distance,color=class)) +
geom_nodetext(aes(label=vertex.names),size=3, color="#8856a7") +
theme_blank()+scale_color_brewer(palette = "Set2")+
scale_size_area(max_size = 9)
## 7.0.1imprinted and neighbors netwoerk
sub_g<-induced_subgraph(g, unique(names(unlist(ego(g,order = 1,imprinted_genes_in_network)))))
plot_personal_graph<-function(graph_object){
V(sub_g)$color[names(V(sub_g))%in%maternal_imprint]<-"tomato" # vertex color
V(sub_g)$color[names(V(sub_g))%in%paternal_imprint]<-"steelblue"
V(sub_g)$color[!names(V(sub_g))%in%imprinted_genes_in_network]<-"black"
V(sub_g)$size <- 2 # vertex size
V(sub_g)$label.cex <- 0.8 # vertex label size
V(sub_g)$label.color<-"black"
E(sub_g)$color <- "gray" # edge color
E(sub_g)$width=1
plot.igraph(sub_g,layout=layout.fruchterman.reingold,vertex.frame.color= "white",vertex.label=NA)
legend('topleft',
legend=c("Maternal","Paternal","Partners"),
pch=19, #shape
box.lty=2, #
pt.cex= 3, #lines size
cex=1, #box size
col=c("tomato","steelblue","black"),
y.intersp=1.5)
}
plot_personal_graph(graph_object = sub_g)
### perform network module analysis
nd <- as.data.frame(get.edgelist(sub_g),stringsAsFactors = FALSE)
colnames(nd)<-c("X1","X2")
gene_list<-unique(c(nd$X1,nd$X2))
## make gene2num table
gene_list_numbers <-seq(0,length(gene_list)-1)
gene_name2number<-data.frame(gene_list,gene_list_numbers,stringsAsFactors = FALSE)
## assign network vetex to number
nd$g1<-gene_name2number$gene_list_numbers[match(nd$X1,gene_name2number$gene_list)]
nd$g2<-gene_name2number$gene_list_numbers[match(nd$X2,gene_name2number$gene_list)]
## import imprinted genes
imprinted_genes<-read.table("~/MyResearch/Imprinting_prediction/imprint_gene_list/TAIR10/ara_imp_by_paper_num/imp2+.list",stringsAsFactors = FALSE)
imprinted_genes$color<-ifelse(imprinted_genes$V2=="m",1,2)
###
nd$c1<- imprinted_genes$color[match(nd$X1,imprinted_genes$V1)]
nd$c2<- imprinted_genes$color[match(nd$X2,imprinted_genes$V1)]
nd$c1[is.na(nd$c1)]=0
nd$c2[is.na(nd$c2)]=0
fanmond_input<-nd[,c(3,4,5,6)]
write.table(fanmond_input,"fanmod_inout.txt",row.names = FALSE,col.names = FALSE,quote = FALSE,sep = "\t")
imprinted_genes<-read.table("~/MyResearch/Imprinting_prediction/imprint_gene_list/TAIR10/ara_imp_by_paper_num/imp2+.list",stringsAsFactors = FALSE)
nd$c1<- imprinted_genes$color[match(nd$X1,imprinted_genes$V1)]
## 7.1 imp-imp distence
imp_distance <-shortest.paths(g,imprinted_genes_in_network, imprinted_genes_in_network,weights = NA)
imp_distance <- imp_distance[upper.tri(imp_distance)]
## 7.1.5 random simulation
fun <- function(x){
require(igraph)
rand_genes<-sample(names(V(g)),length(imprinted_genes_in_network),replace = FALSE)
d_random<-shortest.paths(g,rand_genes, rand_genes,weights = NA)
d_random <-d_random[upper.tri(d_random)]
d_random <-d_random[is.finite(d_random)]
return(mean(d_random))
}
library(parallel)
system.time(simulations<-mclapply(1:1000,fun,mc.cores = 60))
simulations<-unlist(simulations)
ggplot(as.data.frame(simulations),aes(x=simulations))+geom_histogram(fill="steelblue",bins = 200)+
theme_bw()+
theme(text = element_text(size=20))+
ylab("Frequency")+
xlab("Mean shortest path")+
annotate("segment", x=mean(imp_distance[is.finite(imp_distance)]), xend=mean(imp_distance[is.finite(imp_distance)]), y=5, yend=0, color="black", size=2, arrow=arrow())+
annotate("segment", x=median(imp_distance[is.finite(imp_distance)]), xend=median(imp_distance[is.finite(imp_distance)]), y=5, yend=0, color="red", size=2, arrow=arrow())+
annotate("text", x = median(imp_distance[is.finite(imp_distance)]), y = 6, label = "Median")+
annotate("text", x = mean(imp_distance[is.finite(imp_distance)]), y = 6, label = "Mean")
### paternal and maternal distance
maternal_imp_distance <-shortest.paths(g,maternal_imprint, maternal_imprint,weights = NA)
maternal_imp_distance<-maternal_imp_distance[upper.tri(maternal_imp_distance)]
maternal_imp_distance<-maternal_imp_distance[is.finite(maternal_imp_distance)]
paternal_imp_distance <-shortest.paths(g,paternal_imprint, paternal_imprint,weights = NA)
paternal_imp_distance<-paternal_imp_distance[upper.tri(paternal_imp_distance)]
paternal_imp_distance<-paternal_imp_distance[is.finite(paternal_imp_distance)]
maternal_paternal_distance <- shortest.paths(g,paternal_imprint, maternal_imprint,weights = NA)
maternal_paternal_distance<-maternal_paternal_distance[upper.tri(maternal_paternal_distance)]
maternal_paternal_distance<-maternal_paternal_distance[is.finite(maternal_paternal_distance)]
dis_maternal<-data.frame(shortest.paths=maternal_imp_distance,class="MEG-MEG")
dis_paternal<-data.frame(shortest.paths=paternal_imp_distance,class="PEG-PEG")
dis_maternal2paternal<-data.frame(shortest.paths=maternal_paternal_distance,class="MEG-PEG")
df<-rbind(dis_maternal,dis_paternal,dis_maternal2paternal)
df$shortest.paths<-ifelse(df$shortest.paths>5,5,df$shortest.paths)
ggplot(df,aes(y=shortest.paths,x=class,fill=class))+
geom_violin()+
theme_bw(base_size = 20)+
geom_signif(comparisons = list(c("MEG", "PEG"),c("MEG","MEG->PEG"),c("MEG->PEG","PEG")),
test = "wilcox.test",map_signif_level = "FALSE",
test.args = list(alternative = "greater"),
step_increase = 0.05,
tip_length = 0.01)+
scale_fill_manual(values = c("tomato","steelblue", "orange")) +
theme(legend.position = "none",
axis.title.x = element_blank(),
plot.title = element_text(hjust = 0.5, size = 12, face = "bold"))+
ylab("Shortest path") +
stat_summary(fun.y = mean, geom = "point", size = 2, color = "red")
### optional plot
df<-df%>%group_by(class,shortest.paths)%>%summarise(n=n())%>%mutate(y=n/sum(n))
ggplot(df, aes(fill=as.factor(shortest.paths), y=y, x=class))+
theme_bw()+
theme(text=element_text(size=20))+
geom_bar( stat="identity", position="fill")+
scale_fill_brewer(name="Shortest path",palette = "Set3",labels=c(c(1,2,3,4),expression(phantom(x)>=5)))+
geom_text(aes(label=paste(round(y,2)*100,"%",sep="")),color="black", size=3.5,position=position_fill(0.5))+
ylab("Percentage")+xlab("")
#scale_fill_manual(name="Shortest path", values=c("#458B74", "#CDAA7D", "#8968CD", "#CD5555", "#1874CD", "#EE7600"))
### keeping degree unchanged to count the frequency of the largestest component by sampling same number of genes in random networks
IGON<-induced_subgraph(g, imprinted_genes_in_network)
max_component_size_of_IGON<-max(components(IGON)$csize)
max_component_size_of_random<-c()
for (m in (seq(1,1000))){
sub_gg<-induced_subgraph(g,vids = sample(names(V(g)),length(imprinted_genes_in_network)))
max_component_size_of_sub_gg<-max(components(sub_gg)$csize)
max_component_size_of_random<-c(max_component_size_of_random,max_component_size_of_sub_gg)
}
ggplot(data.frame(Max_component_size = max_component_size_of_random),aes(x=Max_component_size))+
geom_histogram(bins = 200,fill="steelblue")+
theme_bw(base_size = 20)+
xlab("Maximal component size")+ylab("Count")+
#xlim(0,45)+
annotate("segment",x=max_component_size_of_IGON,xend = max_component_size_of_IGON,yend=0,y=5,size=2, arrow=arrow(),color="red")+
annotate("text", x = max_component_size_of_IGON, y = 6, label = "Imprinted genes")
### calulate isolated genes to component genes distance
iso_IGs <-names(components(IGON)$membership)[components(IGON)$membership!=1] #48 IGs
componet_IGs <- names(components(IGON)$membership)[components(IGON)$membership==1] #34
iso2comp_dis<- shortest.paths(g,v = iso_IGs,to = componet_IGs,weights = NA)
iso2comp_dis_df<-as.data.frame(as.table(iso2comp_dis))
iso2comp_min_dis<-data.frame(iso2comp_dis_df%>%group_by(Var1)%>%summarise_at("Freq",min))
distance1_ratio<-sum(iso2comp_min_dis$Freq==2)/nrow(iso2comp_min_dis) #60.4
#Min. 1st Qu. Median Mean 3rd Qu. Max.
#2.00 2.00 2.00 2.42 3.00 4.00
### random sampled genes to component distance (nort significant)
fun <- function(x){
require(igraph)
random_genes<-sample(names(V(g))[!names(V(g))%in%componet_IGs],length(iso_IGs))
iso2comp_dis<- shortest.paths(g,v = random_genes,to = componet_IGs,weights = NA)
iso2comp_dis_df<-as.data.frame(as.table(iso2comp_dis))
a<-data.frame(iso2comp_dis_df%>%group_by(Var1)%>%summarise_at("Freq",min))
return(mean(a$Freq))
}
library(parallel)
system.time(random2comp_dis<-mclapply(1:1000,fun,mc.cores = 60))
random2comp_dis<-unlist(random2comp_dis)
ggplot(data.frame(shortest.path = random2comp_dis[is.finite(random2comp_dis)]),aes(x=shortest.path))+
geom_histogram(fill="steelblue")+
theme_bw(base_size = 20)+
xlab("Maximal component size")+ylab("Count")+
#xlim(0,45)+
annotate("segment",x=mean(iso2comp_min_dis$Freq),xend = mean(iso2comp_min_dis$Freq),yend=0,y=25,size=2, arrow=arrow(),color="red")+
annotate("text", x = mean(iso2comp_min_dis$Freq), y = 30, label = "Imprinted genes")
##### the ratio of non-component genes to IG component genes distance is 1 in random network
fun<-function(x){
random_genes<-sample(names(V(g))[!names(V(g))%in%componet_IGs],length(iso_IGs))
iso2comp_dis<- shortest.paths(g,v = random_genes,to = componet_IGs,weights = NA)
iso2comp_dis_df<-as.data.frame(as.table(iso2comp_dis))
a<-data.frame(iso2comp_dis_df%>%group_by(Var1)%>%summarise_at("Freq",min))
return(sum(a$Freq==2)/nrow(a))
}
system.time(random2comp_dis1_ratio<-mclapply(1:1000,fun,mc.cores = 60))
random2comp_dis1_ratio<-unlist(random2comp_dis1_ratio)
ggplot(data.frame(Max_component_size = random2comp_dis1_ratio),aes(x=Max_component_size))+
geom_histogram(fill="steelblue",bins=200)+
theme_bw(base_size = 14)+
xlab("Pecentage of genes seperated by one genes to component")+ylab("Count")+
#xlim(0,45)+
annotate("segment",x=distance1_ratio,xend = distance1_ratio,yend=0,y=5,size=2, arrow=arrow(),color="red")+
annotate("text", x = distance1_ratio, y = 6, label = "IGON")
### non-component genes to component gene distance1 in random networks
fun<-function(x){
sub_gg<-induced_subgraph(g,vids = sample(names(V(g)),length(imprinted_genes_in_network)))
componet_Gs <-names(V(sub_gg)[which.max(components(sub_gg)$csize)==components(sub_gg)$membership]) #48 IGs
iso_Gs <- names(V(sub_gg)[which.max(components(sub_gg)$csize)!=components(sub_gg)$membership])
iso2comp_dis<- shortest.paths(g,v = iso_Gs,to = componet_Gs,weights = NA)
iso2comp_dis_df<-as.data.frame(as.table(iso2comp_dis))
b<-data.frame(iso2comp_dis_df%>%group_by(Var1)%>%summarise_at("Freq",min))
ratio_distance1<-sum(b$Freq==2)/nrow(b)
return(ratio_distance1)
}
system.time(noncomp2comp_distance1_ratio<-mclapply(1:1000,fun,mc.cores = 60))
noncomp2comp_distance1_ratio<-unlist(noncomp2comp_distance1_ratio)
ggplot(data.frame(Max_component_size = noncomp2comp_distance1_ratio),aes(x=Max_component_size))+
geom_histogram(fill="steelblue",bins=200)+
theme_bw(base_size = 20)+
xlab("Pecentage of isolated genes in subnetwork")+ylab("Count")+
#xlim(0,45)+
annotate("segment",x=distance1_ratio,xend = distance1_ratio,yend=0,y=6,size=2, arrow=arrow(),color="red")+
annotate("text", x = distance1_ratio, y = 5, label = "IGON")
### test whether non-connected or iso_IGs can link to the IG componnet by permissive imprinted genes
intermediate_nodes <-c()
iso_IG_can_reach_component_via_1_step<-c()
for (node in iso_IGs){
temp_shortest_path <- get.shortest.paths(graph = g,from = node,to = componet_IGs,weights = NA)
print(listLen(temp_shortest_path$vpath))
if (min(listLen(temp_shortest_path$vpath))>3){message("Shortest path with more than 3 nodes, not consider!")}
if (min(listLen(temp_shortest_path$vpath))==3){
for (path in temp_shortest_path$vpath){
if (length(path)==3){
#print (path)
iso_IG_can_reach_component_via_1_step<-unique(c(iso_IG_can_reach_component_via_1_step,node))
intermediate_nodes<-c(intermediate_nodes,names(path[2]))
}
}
}
}
intermediate_nodes <-unique(intermediate_nodes) # 90 permissive
## intersect with permissive IGs
permissive_imprinted_data <-Arabidospis_imprinted_genes_parse("myresearch/network/data/6imprinted.list") #528
permissive_imp_not_in_strict <- permissive_imprinted_data$IG_in_networks[!permissive_imprinted_data$IG_in_networks%in% imprinted_genes_in_network] # 470
PINIS_intermediate<-length(intersect(intermediate_nodes,permissive_imp_not_in_strict))
random_ratio <-c()
for (m in (1:1000)){
random_genes <-sample(names(V(g))[!names(V(g))%in%imprinted_genes_in_network],length(permissive_imp_not_in_strict))
random_ratio<-c(random_ratio,length(intersect(intermediate_nodes,random_genes)))
}
ggplot(data.frame(Number = random_ratio),aes(x=random_ratio))+
geom_histogram(fill="steelblue",bins=30,stat = "count")+
theme_bw(base_size = 20)+
xlab("Number of intermediated genes")+ylab("Frequency")+
#xlim(0,45)+
annotate("segment",x=PINIS_intermediate,xend=PINIS_intermediate,yend=0,y=30,size=2, arrow=arrow(),color="red")+
annotate("text", x =PINIS_intermediate, y = 35, label = "Obseved")+
scale_x_discrete(limits=seq(0,10,2))
### 7.1.6 connectivity
connectivity<-function(igraph_object){
return(ecount(igraph_object)*2/(vcount(igraph_object)*vcount(igraph_object)-1))
}
AGFN_connectivity<-connectivity(g) #0.0021
IGPN<-induced_subgraph(g, unique(names(unlist(ego(g,order = 1,imprinted_genes_in_network)))))
IGPN_connnectivity<-connectivity(IGPN) # 0.018
IGON<-induced_subgraph(g, imprinted_genes_in_network)
IGON_connectivity<-connectivity(IGON) #0.02 / 0.0082
IGPN_sub_imp<-induced_subgraph(g,unique(names(unlist(ego(g,order = 1,imprinted_genes_in_network))))[!unique(names(unlist(ego(g,order = 1,imprinted_genes_in_network))))%in%imprinted_genes_in_network])
IGPN_sub_imp_conectivity <-connectivity(IGPN_sub_imp) #0.018 / 0.0085
random_connectivity <-c()
for (m in 1:1000){
message(m)
sample_g<-induced_subgraph(graph = g,vids = sample(names(V(g)),length(imprinted_genes_in_network)))
random_connectivity<-c(random_connectivity,connectivity(sample_g))
}
library(ggplot2)
ggplot(data.frame(Connectivity=random_connectivity),
aes(x=Connectivity))+
geom_histogram(bins=200,fill="steelblue")+
theme_bw()+
theme(text = element_text(size=20))+ylab("Frequency")+
annotate("segment",x=IGPN_connnectivity,xend = IGPN_connnectivity,yend=0,y=10,size=2, arrow=arrow(),color="steelblue")+
annotate("segment",x=IGON_connectivity,xend = IGON_connectivity, yend=0,y=10,size=2, arrow=arrow(),color="orange")+
annotate("text", x = IGPN_connnectivity, y = 12, label = "IGPN")+
annotate("text", x = IGON_connectivity, y = 12, label = "IGON")
### 7.1.6 cluster coefficent (transitivity)
AGFN_transitivity<-transitivity(g) # 0.24
IGPN_transitivity<-transitivity(IGPN) # 0.38
IGON_transitivity<-transitivity(IGON) # 0.31 #
IGPN_sub_imp_transitivity<- transitivity(IGPN_sub_imp) #0.38
AGFN_permute_transitivity <-transitivity(gg)
AGFN_transitivity_local<-mean(transitivity(g,type = "local"),na.rm = TRUE) # 0.25
IGPN_transitivity_local<-mean(transitivity(IGPN,type = "local"),na.rm = TRUE) # 0.40
IGON_transitivity_local<-mean(transitivity(IGON,type = "local"),na.rm = TRUE) # 0.35
IGPN_sub_imp_transitivity_local <-mean(transitivity(IGPN_sub_imp,type = "local"),na.rm = TRUE)
AGFN_permute_transitivity_local <-mean(transitivity(gg,type = "local"),na.rm = TRUE)
permute_g_transitivity<- transitivity(permute_g) #0.24
ggtexttable(rbind(data.frame(Data="AGFN",Clust.Coef=AGFN_transitivity, mode = "global"),
data.frame(Data="IGPN",Clust.Coef=IGPN_transitivity, mode = "global") ,
data.frame(Data="IGON",Clust.Coef=IGON_transitivity, mode = "global"),
data.frame(Data="IGPN-imprinted",Clust.Coef = IGPN_sub_imp_transitivity, mode = "global"),
data.frame(Data="AGFN_permute",Clust.Coef = AGFN_permute_transitivity, mode = "global"),
data.frame(Data="IGFN",Clust.Coef=AGFN_transitivity_local, mode = "local"),
data.frame(Data="IGPN",Clust.Coef=IGPN_transitivity_local, mode = "local"),
data.frame(Data="IGON",Clust.Coef=IGON_transitivity_local, mode = "local"),
data.frame(Data="IGPN-imprinted",Clust.Coef = IGPN_sub_imp_transitivity_local, mode = "local"),
data.frame(Data="AGFN_permute",Clust.Coef = AGFN_permute_transitivity_local, mode = "local")),
theme = ttheme("mOrange",base_size = 15))
#### partner gene ontolgy enrichemnts (topgo)
partners<- names(V(IGPN))[!names(V(IGPN))%in% imprinted_genes]
source("/home/wuzefeng/R/topGO.R")
aa<-GO_enrichemnts(gene2go_file = "/home/wuzefeng/MyResearch/Imprinting_prediction/imprint_gene_list/TAIR10/imp_go_enrich/1gene2go.out",interested_genes = partners)
aa<-aa[apply(aa,1,function(x) x[7]<0.001),]
write.csv(aa,file = "partners.go.csv",quote = FALSE,col.names = TRUE,row.names = FALSE)
### plant GSEA using enricher by clusterProfiler for non-imprited partener and all partner
library(clusterProfiler)
tf_target <- read.table("myresearch/network/data/1reformat/Ara_TFT.txt_out.txt",sep="\t",stringsAsFactors = FALSE)
Gfam <- read.table("myresearch/network/data/1reformat/Ara_GFam.txt_out.txt",sep="\t",stringsAsFactors = FALSE)
Kegg <- read.table("myresearch/network/data/1reformat/Ara_KEGG.txt_out.txt",sep="\t",stringsAsFactors = FALSE)
Lit <-read.table("myresearch/network/data/1reformat/Ara_LIT.txt_out.txt",sep="\t",stringsAsFactors = FALSE,quote = "")
MiR <- read.table("myresearch/network/data/1reformat/Ara_MIR.txt_out.txt",sep="\t",stringsAsFactors = FALSE,quote = "'")
PO <- read.table("myresearch/network/data/1reformat/Ara_PO.txt_out.txt",sep="\t",stringsAsFactors = FALSE)
Cyc <- read.table("myresearch/network/data/1reformat/Ara_Cyc.txt_out.txt",sep="\t",stringsAsFactors = FALSE)
length(unique(tf_target$V1))+length(unique(Gfam$V1))+length(unique(Kegg$V1))+length(unique(MiR$V1))+length(unique(PO$V1))+length(unique(Cyc$V1))
Term_size_filter<-function(df,size=5){
df<-df%>%group_by(V1)%>%filter(n()>size)
}
Term_list = list(TF_target = tf_target,
Gene_family = Gfam,
KEGG_pathway = Kegg,
#Liture = Lit,
Micro_RNA = MiR,
Plant_ontology = PO,
Mata_pathway=Cyc)
for (name in names(Term_list)){
message(name, "-Orignal term number: ",length(unique(Term_list[[name]]$V1)))
temp_type <- Term_list[[name]]
for (term_size in c(5,10,15)){
type <- Term_size_filter(df = temp_type,size = term_size)
message("...After filtering the term number: ",length(unique(type$V1)))
enrich<-enricher(gene = names(V(IGPN_sub_imp)),
pvalueCutoff = 0.05,
pAdjustMethod = "BH",
universe = names(V(g)),
qvalueCutoff = 0.2,
TERM2GENE = type[,c(1,3)],
TERM2NAME = unique(type[,c(1,2)]))
enrich_df<-enrich@result[enrich@result$qvalue<0.2&enrich@result$p.adjust<0.05,] # filter
enrich_df<-enrich_df%>%arrange(p.adjust) # sort by adjusted p-value
message("...Enrich term number:",nrow(enrich_df))
### filter by term number (optional)
if (nrow(enrich_df)>=10){
enrich_df<-enrich_df[1:10,]
}
if (nrow(enrich_df)>=5&nrow(enrich_df)<10){
enrich_df<- enrich_df[1:5,]
}
### visulization
if (nrow(enrich_df)>0){
out_df<- data.frame(term_class = name, min_term_size = term_size, Description = enrich_df$Description, p.adjust = enrich_df$p.adjust)
#write.table(out_df,file = "myresearch/network/result_data/PlantGSEA_enrich.txt",sep="\t", quote = FALSE,append = TRUE,col.names = !file.exists("myresearch/network/result_data/PlantGSEA_enrich.txt"),row.names = FALSE)
write.table(out_df,file = "myresearch/network/result_data/PlantGSEA_permissive_enrich.txt",sep="\t", quote = FALSE,append = TRUE,col.names = !file.exists("myresearch/network/result_data/PlantGSEA_permissive_enrich.txt"),row.names = FALSE)
#print(dotplot(enrich,showCategory=Enrichmed_term_number))
}
else{message("....NO enriched terms!")}
}
}
#dd<-read.table("myresearch/network/result_data/PlantGSEA_enrich.txt",sep="\t",header = TRUE)
dd<-read.table("myresearch/network/result_data/PlantGSEA_permissive_enrich.txt",sep="\t",header = TRUE)
ggplot(data = dd,aes(as.factor(min_term_size),Description,fill=-log(p.adjust),10))+
geom_tile(color="black")+
facet_grid(term_class~.,scales="free_y",space="free")+
scale_fill_gradient(low = "red",high = "green")+
theme_bw()+
xlab("Minimum term size")
##### plant GSEA using enricher by clusterProfiler for all imprited partener
for (name in names(Term_list)){
message(name, "-Orignal term number: ",length(unique(Term_list[[name]]$V1)))
temp_type <- Term_list[[name]]
for (term_size in c(5,10,15)){
type <- Term_size_filter(df = temp_type,size = term_size)
message("...After filtering the term number: ",length(unique(type$V1)))
enrich<-enricher(gene = names(V(IGPN)),
pvalueCutoff = 0.05,
pAdjustMethod = "BH",
universe = names(V(g)),
qvalueCutoff = 0.2,
TERM2GENE = type[,c(1,3)],
TERM2NAME = unique(type[,c(1,2)]))
enrich_df<-enrich@result[enrich@result$qvalue<0.2&enrich@result$p.adjust<0.05,] # filter
enrich_df<-enrich_df%>%arrange(p.adjust) # sort by adjusted p-value
### filter by term number (optional)
if (nrow(enrich_df)>=10){
enrich_df<-enrich_df[1:10,]
}
if (nrow(enrich_df)>=5&nrow(enrich_df)<10){
enrich_df<- enrich_df[1:5,]
}
### visulization
if (nrow(enrich_df)>0){
out_df<- data.frame(term_class = name, min_term_size = term_size, Description = enrich_df$Description, p.adjust = enrich_df$p.adjust)
write.table(out_df,file = "myresearch/network/result_data/PlantGSEA_enrich_add_imp.txt",sep="\t", quote = FALSE,append = TRUE,
col.names = !file.exists("myresearch/network/result_data/PlantGSEA_enrich_add_imp.txt"),row.names = FALSE)
message("...Enrich term number:",Enrichmed_term_number)
#print(dotplot(enrich,showCategory=Enrichmed_term_number))
}
else{message("....NO enriched terms!")}
}
}
dd<-read.table("myresearch/network/result_data/PlantGSEA_enrich_add_imp.txt",sep="\t",header = TRUE)
ggplot(data = dd,aes(as.factor(min_term_size),Description,fill=-log(p.adjust),10))+
geom_tile(color="black")+
facet_grid(term_class~.,scales="free_y",space="free")+
scale_fill_gradient(low = "red",high = "green")+
theme_bw()+
xlab("Minimum term size")
################ merge enrich out with or without imprinted genes
dd1<-read.table("myresearch/network/result_data/PlantGSEA_enrich.txt",sep="\t",header = TRUE)
dd1$type<-"Partners Without IGs"
dd2<-read.table("myresearch/network/result_data/PlantGSEA_enrich_add_imp.txt",sep="\t",header = TRUE)
dd2$type<-"Partners With IGs"
dd<-rbind(dd1,dd2)
ggplot(data = dd,aes(as.factor(min_term_size),Description,fill=-log(p.adjust),10))+
geom_tile(color="black")+
facet_grid(term_class~type,scales="free_y",space="free")+
scale_fill_gradient(low = "red",high = "green")+
theme_bw()+
theme(plot.margin = unit(c(1,1,1,1), "cm"),
panel.spacing.x = unit(0, "lines"),
panel.spacing.y = unit(0, "lines"))+
xlab("Minimum term size")
## GO enrichment for non imprinted partners
## (GO slim)
library(clusterProfiler)
GO_slim_BP <- read.table("myresearch/network/data/1reform_GO/GO_slim/biological.go",sep="\t",stringsAsFactors = FALSE)
GO_slim_MF <- read.table("myresearch/network/data/1reform_GO/GO_slim/molecular_function.go",sep="\t",stringsAsFactors = FALSE)
GO_slim_CC <- read.table("myresearch/network/data/1reform_GO/GO_slim/cell_component.GO",sep="\t",stringsAsFactors = FALSE)
Term_list = list(BP = GO_slim_BP,MF = GO_slim_MF,CC = GO_slim_CC)
for (name in names(Term_list)){
message(name, "-Orignal term number: ",length(unique(Term_list[[name]]$V1)))
temp_type <- Term_list[[name]]
for (term_size in c(5,10,15)){
type <- Term_size_filter(df = temp_type,size = term_size)
message("...After filtering the term number: ",length(unique(type$V1)))
enrich<-enricher(gene = names(V(IGPN_sub_imp)),
pvalueCutoff = 0.05,
pAdjustMethod = "BH",
universe = names(V(g)),
qvalueCutoff = 0.2,
TERM2GENE = type[,c(1,3)],
TERM2NAME = unique(type[,c(1,2)]))
enrich_df<-enrich@result[enrich@result$qvalue<0.2&enrich@result$p.adjust<0.05,] # filter
enrich_df<-enrich_df%>%arrange(p.adjust) # sort by adjusted p-value
message("...Enrich term number:",nrow(enrich_df))
### filter by term number (optional)
if (nrow(enrich_df)>=10){
enrich_df<-enrich_df[1:10,]
}
if (nrow(enrich_df)>=5&nrow(enrich_df)<10){
enrich_df<- enrich_df[1:5,]
}
### visulization
if (nrow(enrich_df)>0){
out_df<- data.frame(term_class = name, min_term_size = term_size, Description = enrich_df$Description, p.adjust = enrich_df$p.adjust)
#write.table(out_df,file = "myresearch/network/result_data/PlantGSEA_enrich.txt",sep="\t", quote = FALSE,append = TRUE,col.names = !file.exists("myresearch/network/result_data/PlantGSEA_enrich.txt"),row.names = FALSE)
write.table(out_df,file = "myresearch/network/result_data/GO/GO_slim_enrich.txt",sep="\t", quote = FALSE,append = TRUE,
col.names = !file.exists("myresearch/network/result_data/GO/GO_slim_enrich.txt"),row.names = FALSE)
#print(dotplot(enrich,showCategory=Enrichmed_term_number))
}
else{message("....NO enriched terms!")}
}
} #non-imprinted partners
for (name in names(Term_list)){
message(name, "-Orignal term number: ",length(unique(Term_list[[name]]$V1)))
temp_type <- Term_list[[name]]
for (term_size in c(5,10,15)){
type <- Term_size_filter(df = temp_type,size = term_size)
message("...After filtering the term number: ",length(unique(type$V1)))
enrich<-enricher(gene = names(V(IGPN)),
pvalueCutoff = 0.05,
pAdjustMethod = "BH",
universe = names(V(g)),
qvalueCutoff = 0.2,
TERM2GENE = type[,c(1,3)],
TERM2NAME = unique(type[,c(1,2)]))
enrich_df<-enrich@result[enrich@result$qvalue<0.2&enrich@result$p.adjust<0.05,] # filter
enrich_df<-enrich_df%>%arrange(p.adjust) # sort by adjusted p-value
message("...Enrich term number:",nrow(enrich_df))
### filter by term number (optional)
if (nrow(enrich_df)>=10){
enrich_df<-enrich_df[1:10,]
}
if (nrow(enrich_df)>=5&nrow(enrich_df)<10){
enrich_df<- enrich_df[1:5,]
}
### visulization
if (nrow(enrich_df)>0){
out_df<- data.frame(term_class = name, min_term_size = term_size, Description = enrich_df$Description, p.adjust = enrich_df$p.adjust)
#write.table(out_df,file = "myresearch/network/result_data/PlantGSEA_enrich.txt",sep="\t", quote = FALSE,append = TRUE,col.names = !file.exists("myresearch/network/result_data/PlantGSEA_enrich.txt"),row.names = FALSE)
write.table(out_df,file = "myresearch/network/result_data/GO/GO_slim_all_enrich.txt",sep="\t", quote = FALSE,append = TRUE,
col.names = !file.exists("myresearch/network/result_data/GO/GO_slim_all_enrich.txt"),row.names = FALSE)
#print(dotplot(enrich,showCategory=Enrichmed_term_number))
}
else{message("....NO enriched terms!")}
}
} # all patners
## (plantgsea)
library(clusterProfiler)
GO_plantgsea_BP <- read.table("myresearch/network/data/1reform_GO/PlantGSEA/BP.GO",sep="\t",stringsAsFactors = FALSE)
GO_plantgsea_MF <- read.table("myresearch/network/data/1reform_GO/PlantGSEA/MF.GO",sep="\t",stringsAsFactors = FALSE)
GO_plantgsea_CC <- read.table("myresearch/network/data/1reform_GO/PlantGSEA/CC.GO",sep="\t",stringsAsFactors = FALSE)
Term_list = list(BP = GO_plantgsea_BP,MF = GO_plantgsea_MF,CC = GO_plantgsea_CC)
for (name in names(Term_list)){
message(name, "-Orignal term number: ",length(unique(Term_list[[name]]$V1)))
temp_type <- Term_list[[name]]
for (term_size in c(5,10,15)){
type <- Term_size_filter(df = temp_type,size = term_size)
message("...After filtering the term number: ",length(unique(type$V1)))
enrich<-enricher(gene = names(V(IGPN_sub_imp)),
pvalueCutoff = 0.05,
pAdjustMethod = "BH",
universe = names(V(g)),
qvalueCutoff = 0.2,
TERM2GENE = type[,c(1,3)],
TERM2NAME = unique(type[,c(1,2)]))
enrich_df<-enrich@result[enrich@result$qvalue<0.2&enrich@result$p.adjust<0.05,] # filter
enrich_df<-enrich_df%>%arrange(p.adjust) # sort by adjusted p-value
message("...Enrich term number:",nrow(enrich_df))
### filter by term number (optional)
if (nrow(enrich_df)>=10){
enrich_df<-enrich_df[1:10,]
}
if (nrow(enrich_df)>=5&nrow(enrich_df)<10){
enrich_df<- enrich_df[1:5,]
}
### visulization
if (nrow(enrich_df)>0){
out_df<- data.frame(term_class = name, min_term_size = term_size, Description = enrich_df$Description, p.adjust = enrich_df$p.adjust)
#write.table(out_df,file = "myresearch/network/result_data/PlantGSEA_enrich.txt",sep="\t", quote = FALSE,append = TRUE,col.names = !file.exists("myresearch/network/result_data/PlantGSEA_enrich.txt"),row.names = FALSE)
write.table(out_df,file = "myresearch/network/result_data/GO/GO_plantgsea_enrich.txt",sep="\t", quote = FALSE,append = TRUE,
col.names = !file.exists("myresearch/network/result_data/GO/GO_plantgsea_enrich.txt"),row.names = FALSE)
#print(dotplot(enrich,showCategory=Enrichmed_term_number))
}
else{message("....NO enriched terms!")}
}
} # for mon-impritned partners
for (name in names(Term_list)){
message(name, "-Orignal term number: ",length(unique(Term_list[[name]]$V1)))
temp_type <- Term_list[[name]]
for (term_size in c(5,10,15)){
type <- Term_size_filter(df = temp_type,size = term_size)
message("...After filtering the term number: ",length(unique(type$V1)))
enrich<-enricher(gene = names(V(IGPN)),
pvalueCutoff = 0.05,
pAdjustMethod = "BH",
universe = names(V(g)),
qvalueCutoff = 0.2,
TERM2GENE = type[,c(1,3)],
TERM2NAME = unique(type[,c(1,2)]))
enrich_df<-enrich@result[enrich@result$qvalue<0.2&enrich@result$p.adjust<0.05,] # filter
enrich_df<-enrich_df%>%arrange(p.adjust) # sort by adjusted p-value
message("...Enrich term number:",nrow(enrich_df))
### filter by term number (optional)
if (nrow(enrich_df)>=10){
enrich_df<-enrich_df[1:10,]
}
if (nrow(enrich_df)>=5&nrow(enrich_df)<10){
enrich_df<- enrich_df[1:5,]
}
### visulization
if (nrow(enrich_df)>0){
out_df<- data.frame(term_class = name, min_term_size = term_size, Description = enrich_df$Description, p.adjust = enrich_df$p.adjust)
#write.table(out_df,file = "myresearch/network/result_data/PlantGSEA_enrich.txt",sep="\t", quote = FALSE,append = TRUE,col.names = !file.exists("myresearch/network/result_data/PlantGSEA_enrich.txt"),row.names = FALSE)
write.table(out_df,file = "myresearch/network/result_data/GO/GO_plantgsea_all_enrich.txt",sep="\t", quote = FALSE,append = TRUE,
col.names = !file.exists("myresearch/network/result_data/GO/GO_plantgsea_all_enrich.txt"),row.names = FALSE)
#print(dotplot(enrich,showCategory=Enrichmed_term_number))
}
else{message("....NO enriched terms!")}
}
} # for all partners
### plot
dd1<-read.table("myresearch/network/result_data/GO/GO_plantgsea_enrich.txt",sep="\t",header = TRUE)
dd1$type<-"Partners Without IGs"
dd2<-read.table("myresearch/network/result_data/GO/GO_plantgsea_all_enrich.txt",sep="\t",header = TRUE)
dd2$type<-"Partners With IGs"
dd<-rbind(dd1,dd2)
ggplot(data = dd,aes(as.factor(min_term_size),Description,fill=-log(p.adjust),10))+
geom_tile(color="black")+
facet_grid(term_class~type,scales="free_y",space="free")+
scale_fill_gradient(low = "red",high = "green")+
theme_bw()+
theme(plot.margin = unit(c(1,1,1,1), "cm"),
panel.spacing.x = unit(0, "lines"),
panel.spacing.y = unit(0, "lines"))+
xlab("Minimum term size")
###7.3 imprinted genes degree
maternal_degree <-degree(g,maternal_imprint)
paternal_degree <-degree(g,paternal_imprint)
imprint_degree <-degree(g,imprinted_genes_in_network)
partner_degree<-degree(g,partners)
boxplot(list(as.numeric(degree(g,imprinted_genes_in_network)),as.numeric(maternal_degree),as.numeric(paternal_degree),as.numeric(degree(g))[as.numeric(degree(g))<=200]),
names = c("Imprinted","Maternal","Paternal","All"),
col=c("purple","red","blue","green"),ylab='Degree',
cex.lab=2,cex.axis=1.5)
library(ggplot2)
df1<-data.frame(Degree=maternal_degree,class="MEG")
df2<-data.frame(Degree=paternal_degree,class="PEG")
df3<-data.frame(Degree=imprint_degree,class='IGs')
df4<-data.frame(Degree=partner_degree,class="Partners")
df5<-data.frame(Degree=as.numeric(degree(g,v = names(V(g))[!names(V(g))%in%imprinted_genes_in_network])),class="Non-IGs")
df<-rbind(df1,df2,df3,df4,df5)
ggplot(df,aes(y=log(Degree,2),x=class,col=class))+
geom_violin(trim = FALSE,aes(fill=class),alpha=0.5,col="white")+
theme_bw(base_size = 20)+
geom_boxplot(width=0.2)+
geom_signif(comparisons = list(c("MEG","Non-IGs"),c("PEG","Non-IGs"),c("IGs","Non-IGs"),c("Partners","Non-IGs")),
test = "wilcox.test",map_signif_level = "TRUE",
test.args = list(alternative = "greater"),
step_increase = 0.06,
tip_length = 0.01)+
#scale_fill_manual(values = c("tomato","steelblue", "orange")) +
theme(legend.position = "none",
axis.title.x = element_blank(),
plot.title = element_text(hjust = 0.5, size = 12, face = "bold"))+
ylab("Degree (log2)")
#### 7.4.1 imprinted genes betweeness
IGFN_betweenness<-read.table("myresearch/network/data/0.1AFGN_betweenness_normalized.txt",header = TRUE,stringsAsFactors = FALSE)
imprinted_betweenness <-IGFN_betweenness$betweenness[IGFN_betweenness$genes%in%imprinted_genes_in_network]
maternal_betweenness <-IGFN_betweenness$betweenness[IGFN_betweenness$genes%in%maternal_imprint]
paternal_betweenness <-IGFN_betweenness$betweenness[IGFN_betweenness$genes%in%paternal_imprint]
IGPN_betweenness <- IGFN_betweenness$betweenness[IGFN_betweenness$genes%in%names(V(IGPN))]
IGPN_betweenness_remov_imp <-IGFN_betweenness$betweenness[IGFN_betweenness$genes%in%names(V(IGPN))& !IGFN_betweenness$genes%in% imprinted_genes_in_network]
df1<-data.frame(Betweenness = log(IGFN_betweenness$betweenness[!IGFN_betweenness$genes%in%imprinted_genes_in_network],2), class="Non_IGs")
df2<-data.frame(Betweenness = log(imprinted_betweenness,2),class="IGs")
df3<-data.frame(Betweenness = log(maternal_betweenness,2), class="MEG")
df4<-data.frame(Betweenness = log(paternal_betweenness,2), class="PEG")
df5<-data.frame(Betweenness = log(IGPN_betweenness,2), class = "IGPN")
df6<-data.frame(Betweenness = log(IGPN_betweenness_remov_imp,2),class="Non_IG_IGPN")
df<-rbind(df1,df2,df3,df4,df5,df6)
df$class<-factor(x = df$class,levels = c("IGs","MEG","PEG","Non_IGs","IGPN","Non_IG_IGPN"))
ggplot(df,aes(y=Betweenness,col=class,x=class))+
geom_violin(trim = FALSE,aes(fill=class),alpha=0.5,col="white")+
geom_boxplot(width=0.2)+
theme_bw()+
theme(text = element_text(size = 20),legend.position = "none")+
ylab("Betweenness (log2)")+
xlab("")+
geom_signif(comparisons = list(c("IGs","Non_IGs"),c("MEG","Non_IGs"),c("PEG","Non_IGs"),c("IGPN","IGs")),
map_signif_level = TRUE,
step_increase = 0.08,
tip_length = 0.01,
test.args = list(alternative = "greater"))
### 7.4.5 imprinted genes closenness
IGFN_closeness<-read.table("myresearch/network/data/0.2closeness_normlized.txt",header = TRUE,stringsAsFactors = FALSE)
imprinted_closeness <-IGFN_closeness$closesness[IGFN_closeness$gene_name%in%imprinted_genes_in_network]
maternal_closeness <-IGFN_closeness$closesness[IGFN_closeness$gene_name%in%maternal_imprint]
paternal_closeness <-IGFN_closeness$closesness[IGFN_closeness$gene_name%in%paternal_imprint]
IGPN_closeness <- IGFN_closeness$closesness[IGFN_closeness$gene_name%in%names(V(IGPN))]
IGPN_closeness_remov_imp <-IGFN_closeness$closesness[IGFN_closeness$gene_name%in%names(V(IGPN))& !IGFN_closeness$gene_name%in% imprinted_genes_in_network]
df1<-data.frame(Closeness=IGFN_closeness$closesness[!IGFN_closeness$gene_name%in%imprinted_genes_in_network], class="Non_IGs")
df2<-data.frame(Closeness=imprinted_closeness,class="IGs")
df3<-data.frame(Closeness=maternal_closeness, class="MEG")
df4<-data.frame(Closeness=paternal_closeness, class="PEG")
df5<-data.frame(Closeness=IGPN_closeness, class="IGPN")
df6<-data.frame(Closeness=IGPN_closeness_remov_imp,class="Non_IGs_IGPN")
df<-rbind(df1,df2,df3,df4,df5,df6)
df<-df[df$Closeness>0.001,]
df$class<-factor(x = df$class,levels = c("IGs","MEG","PEG","Non_IGs","IGPN","Non_IGs_IGPN"))
ggplot(df,aes(y=Closeness,col=class,x=class))+
geom_violin(trim = FALSE,aes(fill=class),alpha=0.5,col="white")+
geom_boxplot(width=0.2)+
theme_bw()+
theme(text = element_text(size = 20),legend.position = "none")+
ylab("Closeness")+
xlab("")+
geom_signif(comparisons = list(c("IGs","Non_IGs"),c("MEG","Non_IGs"),c("PEG","Non_IGs"),c("IGPN","IGs")),
map_signif_level = FALSE,
step_increase = 0.08,
tip_length = 0.01,
test.args = list(alternative = "greater"))
### add AGPN
dff<-rbind(df5,df2,df1)
dff<-dff[dff$Closeness>0.001,]
dff$class<-factor(x = dff$class,levels = c("IGPN","IGs","All"))
ggplot(dff,aes(y=Closeness,fill=class,x=class))+geom_boxplot()+
theme_bw()+
theme(text = element_text(size = 20),legend.position = "none")+
ylab("Closeness")+
xlab("")+
geom_signif(comparisons = list(c("IGPN","All"),c("IGPN","IGs"),c("IGs","All")),
map_signif_level = FALSE,
step_increase = 0.08,tip_length = 0.01)
### remove imprinted genes from IGPN
dff<-rbind(df6,df2,df1)
dff<-dff[dff$Closeness>0.001,]
dff$class<-factor(x = dff$class,levels = c("IGPN","IGs","All"))
ggplot(dff,aes(y=Closeness,fill=class,x=class))+geom_boxplot()+
theme_bw()+
theme(text = element_text(size = 20),legend.position = "none")+
ylab("Closeness")+
xlab("")+
geom_signif(comparisons = list(c("IGPN","All"),c("IGPN","IGs")),
map_signif_level = FALSE,
step_increase = 0.08,tip_length = 0.01)
### 7.5 imporinted genes eigenvector
eigns<-eigen_centrality(graph = g,directed = FALSE,weights = NA)
maternal_eigns<-eigns$vector[maternal_imprint_in_network]
paternal_eigns<-eigns$vector[paternal_imprint_in_network]
boxplot(list(log(maternal_eigns,2),log(paternal_eigns,2)),col=c("red","steelblue"),
names = c("Maternal","Paternal"),ylab='Eigenvector (log2)',
cex.lab=2,cex.axis=2)
######### overlap% impritned genes with hub genes (seting degree) (optional )
IGPN_sub_imp_ratio<-c()
imprinted_genes_imp_ratio <-c()
random_genes_ratio<-c()
random_genes<-sample(x = names(V(g)),size = length(V(IGON)),replace = FALSE)
for (r in seq(0,800,10)){
hub_genes<- names(V(g))[degree(g)>=r]
IGPN_sub_imp_ratio<-c(IGPN_sub_imp_ratio,(length(intersect(names(V(IGPN_sub_imp)),hub_genes))/vcount(IGPN_sub_imp)))
imprinted_genes_imp_ratio<-c(imprinted_genes_imp_ratio,length(intersect(imprinted_genes_in_network,hub_genes))/length(imprinted_genes_in_network))
random_genes_ratio<-c(random_genes_ratio,length(intersect(random_genes,hub_genes))/length(random_genes))
}
overlap_hub_ratio<-data.frame(threshold=rep(seq(0,800,10),3),ratio=c(IGPN_sub_imp_ratio,imprinted_genes_imp_ratio,random_genes_ratio),class=c(rep("IGPs",81),rep("IGs",81),rep("Random",81)))
ggplot(overlap_hub_ratio,aes(x=threshold,y=ratio,color=class))+
geom_line(size=1.5)+
theme_bw(base_size = 20)+
theme(legend.position = c(0.85,0.85))+
ylab("Percentage of genes")+xlab("Hub threshold")+
annotate("segment", x=20, xend=150, y=0.75, yend=0.80, color="black", size=1, arrow=arrow())+
annotate("text", x =250, y = 0.80, label = "P-value = 2.8e-10")+#8e-15
annotate("segment", x=20, xend=150, y=0.5, yend=0.55, color="black", size=1, arrow=arrow())+
annotate("text", x =250, y = 0.55, label = "P-value = 0.094")#8e-15
##### hub top 5%, top 10% and 15% (Figure 2B)
threshold = c(0.90,0.95,0.99)
hub_genes<- names(V(g))[degree(g)>=quantile(degree(g),probs = threshold[3])]
IG_belong_hubs<-intersect(imprinted_genes_in_network,hub_genes) #2
IG_non_hubs<-imprinted_genes_in_network[!imprinted_genes_in_network%in%IG_belong_hubs] # 80
IG_non_hubs2hub_genes<-shortest.paths(graph = g,v = IG_non_hubs,to = hub_genes,weights = NA)
IG_non_hubs2hub_min_distance<-apply(IG_non_hubs2hub_genes,1,min)
IG_non_hubs2hub_direct_linked<-sum(IG_non_hubs2hub_min_distance==1) # 55 (55/80)
### random sampling imprinted geens and count the number of genes directing interact with hub genes
fun<-function(x){
random_genes <- sample(names(V(g)),82)
random_belong_hubs <- intersect(random_genes,hub_genes)
random_non_hubs <-random_genes[!random_genes%in%hub_genes]
random_non_hubs2hub_genes<-shortest.paths(graph = g,v = random_non_hubs,to = hub_genes,weights = NA)
random_non_hubs2hub_min_distance<-apply(random_non_hubs2hub_genes,1,min)
random_non_hubs2hub_direct_linked<-sum(random_non_hubs2hub_min_distance==1) # 55 (55/80)
return(random_non_hubs2hub_direct_linked/length(random_non_hubs))
}
system.time(random_results<-mclapply(1:1000,fun,mc.cores = 60))
random_results<-unlist(random_results)
ggplot(as.data.frame(random_results),aes(x=random_results))+geom_histogram(bins=200,fill="steelblue")+
theme_bw()+
theme(text = element_text(size=20))+
ylab("Frequency")+
xlab("% genes interacting with hub genes")+
annotate("segment", x=IG_non_hubs2hub_direct_linked/length(IG_non_hubs), xend=IG_non_hubs2hub_direct_linked/length(IG_non_hubs), y=5, yend=0, color="red", size=2, arrow=arrow())+
annotate("text", x =IG_non_hubs2hub_direct_linked/length(IG_non_hubs), y = 6, label = "IGs",col="black")
### ROC or prediction of partner genes
score_list<-list() # record each cor
label_list<-list()
modenames<- c("Partners","PEGs","MEGs")
score_list[[1]]<-c(degree(g,names(V(IGPN_sub_imp))),
degree(g,names(V(g))[!names(V(g))%in%names(V(IGPN_sub_imp))]))
label_list[[1]]<-c(rep(1,length(names(V(IGPN_sub_imp)))),
rep(0,length(names(V(g))[!names(V(g))%in%names(V(IGPN_sub_imp))])))
score_list[[2]]<-c(degree(g,paternal_imprint),
degree(g,names(V(g))[!names(V(g))%in%paternal_imprint]))
label_list[[2]]<-c(rep(1,length(paternal_imprint)),
rep(0,length(names(V(g))[!names(V(g))%in%paternal_imprint])))
score_list[[3]]<-c(degree(g,maternal_imprint),
degree(g,names(V(g))[!names(V(g))%in%maternal_imprint]))
label_list[[3]]<-c(rep(1,length(maternal_imprint)),
rep(0,length(names(V(g))[!names(V(g))%in%maternal_imprint])))
dsids <-seq(0,2)
mdata <- mmdata(score_list, labels = label_list,modnames = modenames, dsids = dsids)
mmcurves <- evalmod(mdata)
autoplot(mmcurves,curvetype = "ROC")
### 7.6 imprinted genes years
library(readxl)
gene_olds <-as.data.frame(read_excel(path = "other_resources/nature11394-s2_gene_time.xls",sheet = "first dataset",col_names = TRUE))
gene_olds$Gene<-toupper(gene_olds$Gene)
boxplot(gene_olds$Phylostratum,gene_olds$Phylostratum[gene_olds$Gene%in%imprinted_genes],
gene_olds$Phylostratum[gene_olds$Gene%in%maternal_imprint],
gene_olds$Phylostratum[gene_olds$Gene%in%paternal_imprint],
col=c('#9ED2F0','#E6A429',"tomato","steelblue"),
names=c("All genes","Imprinted genes","Maternal","Paternal"),
ylab="Phylostratum",cex.lab=2,cex.axis=1.2) # no difference in gene year between imprinted and non-imprinted
## class imprinted genes into two classed based on gene olds
#7.6.1 degree by old
old_imprinted_genes<-gene_olds$Gene[gene_olds$Phylostratum<=4&gene_olds$Gene%in%imprinted_genes_in_network]
younger_imprinted_genes<-gene_olds$Gene[gene_olds$Phylostratum>4&gene_olds$Gene%in%imprinted_genes_in_network]
old_paternal_genes <-gene_olds$Gene[gene_olds$Phylostratum<=4&gene_olds$Gene%in%paternal_imprint]
younger_paternal_genes <-gene_olds$Gene[gene_olds$Phylostratum>4&gene_olds$Gene%in%paternal_imprint]
old_maternal_genes <-gene_olds$Gene[gene_olds$Phylostratum<=4&gene_olds$Gene%in%maternal_imprint]
younger_maternal_genes <-gene_olds$Gene[gene_olds$Phylostratum>4&gene_olds$Gene%in%maternal_imprint]
par(mar=c(15,5,5,5))
boxplot(degree(g,old_imprinted_genes),
degree(g,younger_imprinted_genes),
degree(g,old_maternal_genes),
degree(g,younger_maternal_genes),
degree(g,old_paternal_genes),
degree(g,younger_paternal_genes),
col=c("#CDC1C5", "#8B8970", "#FF7F50", "#CD3333", "#87CEFA", "#009ACD"),
names=c("Older imprinted genes","Younger imprinted genes","Older maternal genes","Younger maternal genes","Older paternal genes","Younger paternal genes"),
ylab="Degree",cex.lab=2,cex.axis=1.2,las=2)
older_genes<-gene_olds$Gene[gene_olds$Phylostratum<=3&gene_olds$Gene%in%names(V(g))]
younger_genes<-gene_olds$Gene[gene_olds$Phylostratum>3&gene_olds$Gene%in%names(V(g))]
boxplot(degree(g,older_genes),degree(g,younger_genes),names=c("old","younger"))
## 7.6.2 shortest path by gene old
par(mar=c(15,5,5,5))
boxplot(as.numeric(distances(g,old_imprinted_genes,old_imprinted_genes,weights = NA)),
as.numeric(distances(g,younger_imprinted_genes,younger_imprinted_genes,weights = NA)),
as.numeric(distances(g,old_maternal_genes,old_maternal_genes,weights = NA)),
as.numeric(distances(g,younger_maternal_genes,younger_maternal_genes,weights = NA)),
as.numeric(distances(g,old_paternal_genes,old_paternal_genes,weights = NA)),
as.numeric(distances(g,younger_paternal_genes,younger_paternal_genes,weights = NA)),
col=c("#CDC1C5", "#8B8970", "#FF7F50", "#CD3333", "#87CEFA", "#009ACD"),
names=c("Older imprinted genes","Younger imprinted genes","Older maternal genes","Younger maternal genes","Older paternal genes","Younger paternal genes"),
ylab="Shortest path",cex.lab=2,cex.axis=1.2,las=2)
older_genes<-gene_olds$Gene[gene_olds$Phylostratum<=3&gene_olds$Gene%in%names(V(g))]
younger_genes<-gene_olds$Gene[gene_olds$Phylostratum>3&gene_olds$Gene%in%names(V(g))]
boxplot(degree(g,older_genes),degree(g,younger_genes),names=c("old","younger"))
## 7.7 imprinted genes pcc classification
edges_list2df<-as.data.frame(as_edgelist(g)) # some genes not in networks
gene_expression<-read.table("~/MyResearch/microarray/AT40_RMA_MAS/7all_gene_expresion.txt",header = TRUE)
edges_list2df1<-subset(edges_list2df,edges_list2df$V1%in%rownames(gene_expression)&edges_list2df$V2%in%rownames(gene_expression))
gg_pcc<-cor(t(gene_expression))
edges_list2df1$PCC<-apply(edges_list2df1,1,function(x)gg_pcc[x[1],x[2]])
rm(gg_pcc)&gc()
#edges_list2df<-merge(edges_list2df,edges_list2df1,by.x = c("V1","V2"),all.x=T)
#edges_list2df$PCC[is.na(edges_list2df$PCC)]<-0
E(g)$weight<-edges_list2df$PCC
## plot
sub_g<-induced_subgraph(graph = g,vids = imprinted_genes_in_network)
sub_g<-induced_subgraph(graph = sub_g,names(components(sub_g)$membership[components(sub_g)$membership==1]))
par(bg="white",mar=c(2,2,2,2))
V(sub_g)$color[names(V(sub_g))%in%maternal_imprint_in_network]<-"tomato" # vertex color
V(sub_g)$color[names(V(sub_g))%in%paternal_imprint_in_network]<-"steelblue"
V(sub_g)$size <- 8 # vertex size
V(sub_g)$label.cex <- 0.8 # vertex label size
V(sub_g)$label.color<-"black"
E(sub_g)$color<-ifelse(E(sub_g)$weight>0.3,"orange","gray") # edge color
E(sub_g)$width=3
plot(sub_g,layout=layout.fruchterman.reingold,
vertex.frame.color= NA,
vertex.color=V(sub_g)$color,
vetex.shape=V(sub_g)$shape)
legend('topleft',
#bg="white",
text.col="black",
legend=c("Paternal","Maternal","Correlated","Anti-correlated"),
pch=c(19,19,NA,NA), #shape
lty = c(0.5,0.5,1,1),
box.lty = 2, #
pt.cex = 3, #lines size
cex=1, #box size
col=c("tomato","steelblue","orange","gray"),
y.intersp=1.5)
#text(-1, -1,"bbbbbb",col = "white")
#########################################
wc<-cluster_edge_betweenness(sub_g,weights = NULL)
plot(wc,sub_g)
### imp distance to exp corrs
IGs_distance<-shortest.paths(graph = g,v = intersect(colnames(hh),imprinted_genes_in_network),intersect(colnames(hh),imprinted_genes_in_network),weights = NA)
IGs_distance<-data.frame(row=rownames(IGs_distance)[row(IGs_distance)[upper.tri(IGs_distance)]],
col=colnames(IGs_distance)[col(IGs_distance)[upper.tri(IGs_distance)]],
d=IGs_distance[upper.tri(IGs_distance)])
IGs_distance<-subset(IGs_distance,is.finite(d))
IGs_distance$d<-ifelse(IGs_distance$d>=5,5,IGs_distance$d)
IGs_distance$exp_cor<-apply(IGs_distance,1,function(x)cor(hh[,c(x[1],x[2])])[2,1])
ggplot(IGs_distance,aes(y=exp_cor,x=as.factor(d),fill=as.factor(d)))+
geom_boxplot()+
theme_bw()+
theme(text=element_text(size = 20),legend.position = "none")+
ylab("Gene expression correlation")+
xlab("Shortest path")
#geom_smooth(method = "lm", se=TRUE, color="orange", aes(group=1),alpha=0.1,linetype = "dashed")
## optional visuliazation
ggplot(IGs_distance,aes(y=exp_cor,x=as.factor(d),col=as.factor(d)))+
geom_violin(trim = FALSE,aes(fill=as.factor(d)),alpha=0.5,col="white")+
theme_bw(base_size = 20)+
geom_boxplot(width=0.2)+
ylab("Gene expression correlation (PCC)")+
xlab("Shortest path")+
theme(legend.position = "none")
## paternal 2 maternal distancee
PEG2MEGs<-shortest.paths(g,v = paternal_imprint[paternal_imprint%in%rownames(gene_expression)],
maternal_imprint[maternal_imprint%in%rownames(gene_expression)],weights = NA)
PEG2MEGs<-as.data.frame(as.table(PEG2MEGs))
PEG2MEGs$pcc<-apply(PEG2MEGs,1,function(x)(gg_pcc[x[1],x[2]]))
ggplot(data = PEG2MEGs) +
aes(x = Freq, fill = Freq,y=pcc) +
geom_boxplot() +
scale_fill_brewer(palette = "Accent") +
theme_bw()
## paternal 2 maternal distence and comapared with to parters
paternal_in_exp <-paternal_imprint[paternal_imprint%in%rownames(gene_expression)]
maternal_in_exp <-maternal_imprint[maternal_imprint%in%rownames(gene_expression)]
paternal_partners<-unique(unlist(neighborhood(graph = g,nodes = paternal_in_exp))) # paternal neighours
paternal_partners<-paternal_partners[!paternal_partners%in%paternal_imprint]
maternal_partners<-unique(unlist(neighborhood(graph = g,nodes = maternal_in_exp)))
maternal_partners<-maternal_partners[!maternal_partners%in%maternal_imprint]
PEG2partners<-shortest_paths(graph = g,from = paternal_imprint[paternal_imprint%in%rownames(gene_expression)])
### add ds value from lyrata
ds <- read.table("other_resources/DS_to_lyrata/TAIR2lyrata.ds.mart_export.txt",header = TRUE,sep="\t")
ds <- na.omit(ds)
ds <- as.data.frame(ds%>%group_by(Gene.stable.ID)%>%summarise_at('dS.with.Arabidopsis.lyrata',max))
Delta_ds<-function(g1,g2){
if(g1%in%ds$Gene.stable.ID&g2%in%ds$Gene.stable.ID){
delta_ds<-abs(ds$dS.with.Arabidopsis.lyrata[ds$Gene.stable.ID==g1]-ds$dS.with.Arabidopsis.lyrata[ds$Gene.stable.ID==g2])
}
else{
Delta_ds<-NA
}
}
PEG2MEGs$ds_diff<-unlist(apply(PEG2MEGs,1,function(x)Delta_ds(x[1],x[2])))
PEG2MEGs<-na.omit(PEG2MEGs)
PEG2MEGs<-PEG2MEGs[PEG2MEGs$ds_diff<=1,]
ggplot(PEG2MEGs,aes(x=as.factor(Freq),y=ds_diff))+geom_boxplot(notch = FALSE)
##7.8 imprinted and transcription factors distance
##7.8 imprinted and transcription factors distance
ara_TFs<-read.table("/home/wuzefeng/MyResearch/motif_dbs/2plantTFDB/Arabidopsis_TF_list/Ath_TF_list",header = FALSE,stringsAsFactors = FALSE)
ara_TFs<-unique(ara_TFs$V2)
TFs_in_networks <- intersect(ara_TFs,names(V(g))) #1526
sub_g<-induced_subgraph(g,vids = c(imprinted_genes_in_network,TFs_in_networks))
plot_personal_graph<-function(graph_object){
V(sub_g)$color[names(V(sub_g))%in%maternal_imprint]<-"tomato" # vertex color
V(sub_g)$color[names(V(sub_g))%in%paternal_imprint]<-"steelblue"
V(sub_g)$size <- 8 # vertex size
V(sub_g)$label.cex <- 0.8 # vertex label size
V(sub_g)$label.color<-"black"
E(sub_g)$color <- "gray" # edge color
E(sub_g)$width=3
plot.igraph(sub_g,layout=layout.fruchterman.reingold,vertex.frame.color= "white")
legend('topleft',
legend=c("Maternal","Paternal"),
pch=19, #shape
box.lty=2, #
pt.cex= 3, #lines size
cex=1, #box size
col=c("tomato","steelblue"),
y.intersp=1.5)
}
plot_personal_graph(graph_object = sub_g)
summary(as.numeric(distances(g,paternal_imprint,TFs_in_networks,weights = NA))) #no difference
summary(as.numeric(distances(g,maternal_imprint,TFs_in_networks,weights = NA)))
summary(as.numeric(distances(g,sample(names(V(g)),length(TFs_in_networks)),TFs_in_networks,weights = NA)))
##7.7.1 TF-imprinted martix heatmaps
imprinted_paternal_TF <-intersect(TFs_in_networks,paternal_imprint)
imprinted_maternal_TF <-intersect(TFs_in_networks,maternal_imprint)
m<-matrix(data = 0,nrow = length(TFs_in_networks),ncol = length(imprinted_genes_in_network)) # 1526 * 82/468
rownames(m)<-TFs_in_networks
colnames(m)<-imprinted_genes_in_network
for (imp in imprinted_genes_in_network){
nbs<-names(unlist(neighborhood(g,order = 1,nodes = imp)))
tf_nbs<-intersect(nbs,TFs_in_networks)
m[tf_nbs,imp]<-1
}
mm<-m[,colSums(m)>0]
mmm<-mm[rowSums(mm)>0,] #226 * 42 | 501 * 229
## pheatmap
annotation <- data.frame(Pattern = factor(c(rep("MEG",length(maternal_imprint)),
rep("PEG",length(paternal_imprint))),
labels = c("Maternal", "Paternal")))
rownames(annotation) <- c(maternal_imprint,paternal_imprint) # check out the row names of annotation
annotation<-subset(annotation,rownames(annotation)%in%colnames(mmm))
library(pheatmap)
pheatmap(t(mmm),legend_breaks = 0:1, legend_labels = c("0","1"),fontsize_col =5,annotation_row = annotation)
## TF-imprinted genes turn into networks module
tf_g<-as.data.frame(as.table(mmm),stringsAsFactors = FALSE)
tf_g<-tf_g[tf_g$Freq>0,]
tf_g<-tf_g[tf_g$Var1!=tf_g$Var2,]
tf_imp_net<-graph.data.frame(d = tf_g, directed = FALSE)
tf_imp_net<-simplify(tf_imp_net,remove.multiple = TRUE,remove.loops = TRUE)
sub_g<-tf_imp_net
### 7.9.2 integrating motif enrichment data analysis (no use)
motif_gene_mappping<-read.table("/home/wuzefeng/MyResearch/motif_dbs/1jaspar2018/5gene_mapping_from2database")
mapping_function<-function(x){
if (sum(is.na(x[c(3,4,5)]))==length(x[c(3,4,5)])) return("NA")
else return(na.omit(unique(x[c(3,4,5)])))
}
motif_gene_mappping$gene_id<-apply(motif_gene_mappping,1,FUN = mapping_function)
motif_enrich_tf<-read.table("/home/wuzefeng/MyResearch/motif_dbs/1jaspar2018/1fimo_out/Tair/imp_vs_all.txt",header = TRUE,sep = "\t")
motif_enrich_tf$gene_id <-unlist(motif_gene_mappping$gene_id[match(rownames(motif_enrich_tf),motif_gene_mappping$V2)])
intersect(rownames(mmm),motif_enrich_tf$gene_id) # "AT1G32640" "AT2G03710" "AT5G03150" "AT5G44160" "AT4G32730"
### 7.9.3 integrating ppi data analysis for visulization
ara_reg<-read.table("../PPI/Arabidopsis/BIOGRID-ORGANISM-Arabidopsis_thaliana_Columbia-3.4.134.tab2.txt",sep="\t",header = TRUE,stringsAsFactors = FALSE)
ppi_imprinted<-ara_reg[ara_reg$Systematic.Name.Interactor.A%in%imprinted_genes_in_network | ara_reg$Systematic.Name.Interactor.A%in%imprinted_genes_in_network,]
ppi_imprinted<-data.frame(a=ppi_imprinted$Systematic.Name.Interactor.A,b=ppi_imprinted$Systematic.Name.Interactor.B,stringsAsFactors = FALSE)
ppi_imprinted<-ppi_imprinted[ppi_imprinted$a!=ppi_imprinted$b&ppi_imprinted$b!="-",]
ppi_imprinted$a_info<-ifelse(ppi_imprinted$a%in%imprinted_genes_in_network,"Y","N")
ppi_imprinted$b_info<-ifelse(ppi_imprinted$b%in%imprinted_genes_in_network,"Y","N")
ppi_imprinted<-ppi_imprinted[ppi_imprinted$a%in%names(V(tf_imp_net))&ppi_imprinted$b%in%names(V(tf_imp_net)),]
ppi_imprinted<-ppi_imprinted[apply(ppi_imprinted,1,function(x) are.connected(graph = tf_imp_net,v1 = x[1],v2 = x[2])),]
par(bg="white",mar=c(2,2,2,2))
plot_personal_graph<-function(graph_object){
#V(sub_g)$color[names(V(sub_g))%in%focus_links$V2]<-"purple"
V(sub_g)$color<-"green"
V(sub_g)$frame.color<-"white"
V(sub_g)$color[names(V(sub_g))%in%maternal_imprint]<-"tomato" # vertex color
V(sub_g)$color[names(V(sub_g))%in%paternal_imprint]<-"steelblue"
V(sub_g)$color[names(V(sub_g))%in%imprinted_paternal_TF]<-"pink"
V(sub_g)$color[names(V(sub_g))%in%imprinted_maternal_TF]<-"yellow"
V(sub_g)$size <- 4 # vertex size
V(sub_g)$label.cex <- 0.5 # vertex label size
V(sub_g)$label.color<-"black"
E(sub_g)$color <- "gray" # edge color
E(sub_g)$width=2#E(sub_g)$weight/max(E(sub_g)$weight)
E(sub_g,P=as.vector(t(as.matrix(ppi_imprinted[,c(1,2)]))))$color<-"purple"
E(sub_g,P=as.vector(t(as.matrix(ppi_imprinted[,c(1,2)]))))$width<-3
#E(sub_g)$width=edge.betweenness(sub_g)
plot.igraph(sub_g,layout=layout.fruchterman.reingold)
legend('topleft',
#bg="white",
text.col="tomato",
legend=c("MEG","PEG","TF","MEG TFs","PEG TFs"),
pch=19, #shape
box.lty=2, #
pt.cex= 3, #lines size
cex=1, #box size
col=c("tomato","steelblue","green","pink","yellow"),
y.intersp=1.5)
}
plot_personal_graph(tf_imp_net)
#### integrating micro data to calcualte co-regulation of TF-TFs or TF2IGs correlation
micro_data<-read.table("myresearch/network/data/7all_gene_expresion.txt",sep = "\t")
hh<-t(micro_data)
co_regulated_IGs<-colSums(mmm)[colSums(mmm)>=2]
co_regulated_tfs_corrs<-c()
for (name in names(co_regulated_IGs)){
co_regulated_tfs<-rownames(mmm)[mmm[,name]!=0]
message(length(co_regulated_tfs))
if (length(intersect(colnames(hh),co_regulated_tfs))>=2){
temp_corrs<-cor(hh[,intersect(colnames(hh),co_regulated_tfs)])
diag(temp_corrs)<-0
#message(mean(temp_corrs[upper.tri(temp_corrs)]))
message(max(temp_corrs))
print(which(temp_corrs == max(temp_corrs), arr.ind = TRUE))
co_regulated_tfs_corrs<-c(co_regulated_tfs_corrs,temp_corrs[upper.tri(temp_corrs)])
}
}
tfs_random_corrs<-c()
for (m in as.numeric(co_regulated_IGs)){
message(m)
random_tfs<-sample(rownames(mmm),m)
if (length(intersect(colnames(hh),random_tfs))>=2){
temp_corrs<-cor(hh[,intersect(colnames(hh),random_tfs)])
message(mean(temp_corrs[upper.tri(temp_corrs)]))
tfs_random_corrs<-c(tfs_random_corrs,temp_corrs[upper.tri(temp_corrs)])
}
}
tfs_corrs<-data.frame(corr=c(co_regulated_tfs_corrs,tfs_random_corrs),
class=c(rep("coregulator",length(co_regulated_tfs_corrs)),
rep("non-coregulator",length(tfs_random_corrs)))
)
library(dplyr)
mu<-tfs_corrs%>%group_by(class)%>%summarise_all(mean)
p<-ggplot(tfs_corrs,aes(x=corr,fill=class))+
geom_density(alpha = 0.5)+
theme_bw()+
theme(text = element_text(size = 20),
legend.position = c(0.8,0.9),legend.text = element_text(size = 12))+
xlab("Expression correlation")+
ylab("Density")+
geom_vline(data=mu, aes(xintercept=corr, color=class),
linetype="dashed",size=1)+
scale_fill_manual(values = c("#E69F00", "#56B4E9"))
#### co-regulated imprinted genes expression correlation analysis
co_regulated_TFs<-rowSums(mmm)[rowSums(mmm)>=2] # these tfs regulated more than two genes
co_regulated_IGs_corrs<-c()
for (name in names(co_regulated_TFs)){
co_regulated_IGs<-colnames(mmm)[mmm[name,]!=0]
message(length(co_regulated_IGs))
if (length(intersect(colnames(hh),co_regulated_IGs))>=2){ ## some genes not in mircro expresison
temp_corrs<-cor(hh[,intersect(colnames(hh),co_regulated_IGs)])
message(mean(temp_corrs[upper.tri(temp_corrs)]))
co_regulated_IGs_corrs<-c(co_regulated_IGs_corrs,temp_corrs[upper.tri(temp_corrs)])
}
}
IGs_random_corrs<-c()
for (m in as.numeric(co_regulated_TFs)){
message(m)
random_IGs<-sample(colnames(mmm),m)
if (length(intersect(colnames(hh),random_IGs))>=2){
temp_corrs<-cor(hh[,intersect(colnames(hh),random_IGs)])
message(mean(temp_corrs[upper.tri(temp_corrs)]))
IGs_random_corrs<-c(IGs_random_corrs,temp_corrs[upper.tri(temp_corrs)])
}
}
### plot
IGs_corrs<-data.frame(corr=c(co_regulated_IGs_corrs,IGs_random_corrs),
class=c(rep("coregulated",length(co_regulated_IGs_corrs)),
rep("non-coregulated",length(IGs_random_corrs)))
)
p<-ggplot(IGs_corrs,aes(y=corr,x=class,fill=class))+
geom_boxplot()+
theme_bw()+
theme(text = element_text(size = 20),
legend.position = "none")+
ylab("Expression correlation")+
xlab("")+
scale_fill_manual(values = c("#E69F00", "#56B4E9"))+
geom_signif(comparisons = list(c("coregulated","non-coregulated")))
## 7.9 transcription tissue specificty
gene_specificity<-function(expression_matrix){
y<-apply(expression_matrix,1,function(x)sum(1-x/max(x))/(ncol(expression_matrix)-1))
return(y)
}
endosperm_ratio<-function(expression_matrix){
y<-apply(expression_matrix,1,function(x)(x[1]+x[2]+x[3])/(3*sum(x)))
return(y)
}
rna_expression<-read.table("/data1/SRA/Arabodopsis_rna/3all_tissues_integration/reads_cpm.txt",header = TRUE)
#micro_pp<-read.table("other_resources/pp_micro_arrary/GSE69995_re-analyzed_data_matrix.txt",header = TRUE,sep="\t",row.names = 1)
gs<-gene_specificity(rna_expression)
er<-endosperm_ratio(rna_expression)
df_IGs<-data.frame(Specificity=gs[colnames(mmm)],Relative_Abandunce=er[colnames(mmm)],class="IGs")
df_TFs<-data.frame(Specificity=gs[rownames(mmm)],Relative_Abandunce=er[rownames(mmm)],class="TFs")
rownames(df_TFs)<-rownames(mmm)
df_TFs<-na.omit(df_TFs)
#df_All<-data.frame(Specificity=gs,Relative_Abandunce=er,class="All")
df<-rbind(df_IGs,df_TFs)#,df_All)
df$color[df$class=="IGs"]<-"tomato"
df$color[df$class=="TFs"]<-"steelblue"
df$id<-rownames(df)
#df$color[is.na(df$color)]<-"gray"
library(ggrepel)
plot(df$Specificity,df$Relative_Abandunce,col=adjustcolor(df$color,alpha=0.2),pch=16)
ggplot(df,aes(x=Specificity,y=Relative_Abandunce,color=class))+geom_point(alpha=0.5)+
geom_smooth(method = "lm")+
theme_bw()+
theme(text=element_text(size=20),legend.position = c(0.15,0.9))+
xlab("Expression specificity")+
ylab("Expression in endopem")+
geom_label_repel(data = subset(df,df$Specificity>0.8&df$Relative_Abandunce>0.3),
aes(label = id,
fill = class),
color = 'white',
size = 3.5,
segment.color="black",
point.padding=0.5)+
## 0.66 vs 0.21
# tf and imp correlation
tf_g_gs<-data.frame(x=gs[tf_g$Var1],y=gs[tf_g$Var2])
ggplot(tf_g_gs,aes(x=x,y=y))+geom_point(alpha=0.5)+
geom_smooth(method = "lm")+
geom_density_2d()+
theme_bw()+
theme(text=element_text(size=20),legend.position = c(0.15,0.9))+
xlab("TF expression specificity")+
ylab("IG expression specificity")+
annotate(x=0.3,y=0.95,
label=paste("R = ", round(cor(tf_g_gs$x,tf_g_gs$y,use = "complete"),2)),
geom="text", size=8, col="darkred")
#7.12 ######### orthologous imprinted genes annalsis
######### rice ortholgous imprinted genes
Compare_orthologous_genes<-function(imprinted_genes_file_in_other_species, orthologous_file,calculation="degree",species="Rice"){
library(ggsignif)
library(ggplot2)
message(c("Species:",species))
## get IG, PEG, MEG from other species
other_imprinted_data<-read.table(imprinted_genes_file_in_other_species,stringsAsFactors = FALSE)
other_imprinted_genes<-unique(other_imprinted_data$V1) # imprinted genes list in other species
maternal_genes<-unique(other_imprinted_data$V1[other_imprinted_data$V2=="m"])
paternal_genes<-unique(other_imprinted_data$V1[other_imprinted_data$V2=="f"])
##arabidopsis orthologs of impritned genes of other species
ortholougs<-read.table(orthologous_file,stringsAsFactors = FALSE) #39072 from biomart
imp_orthlogous_genes<-unique(ortholougs$V1[ortholougs$V2%in% other_imprinted_genes])
message(c("other imprinted genes:",length(other_imprinted_genes)))
message(c("ortholgous to TAIR:",length(imp_orthlogous_genes)))
message(c("ortholgous is imprinted in TAIR:",sum(imp_orthlogous_genes%in%imprinted_genes))) #
message(c("ortholgous is imprinted and in network in TAIR:",sum(imp_orthlogous_genes%in%imprinted_genes_in_network))) #
#imp_orthlogous_genes<-imp_orthlogous_genes[!imp_orthlogous_genes%in%imprinted_genes_in_network] # whether drop the othologous genes also imprinted in arabidopsis
message(c("ortholgous is not imprinted and in network in TAIR:",length(imp_orthlogous_genes)))
imp_orthlogous_genes_in_ara_network <- intersect(imp_orthlogous_genes,names(V(g)))
if (calculation=="degree"){ ### calulated degree for different set of genes
message("degree calculation!")
df_ggplot1<-data.frame(degree=degree(g,names(V(g))[!names(V(g))%in%imprinted_genes_in_network]),class="All genes")
df_ggplot2<-data.frame(degree=degree(g,imp_orthlogous_genes_in_ara_network),class="orthologous")
df_ggplot3<-data.frame(degree=degree(g,imprinted_genes_in_network),class="Imprinted_genes")
##plot for ggplot
df<-rbind(df_ggplot1,df_ggplot2,df_ggplot3)
df$species<-species
message(c(nrow(df),"-",ncol(df)))
p<-ggplot(df,aes(x=class,y=degree,fill=class))+geom_boxplot()+
geom_signif(comparisons = list(c("All genes","orthologous"),c("Imprinted_genes","orthologous")),
test="wilcox.test", test.args=list(alternative="greater"),
step_increase = 0.05,tip_length = 0.01)+
theme_bw(base_size = 20)+
scale_x_discrete(labels=c("AG","OG","IG"))+
scale_fill_manual(values = c("#999999", "#E69F00", "#56B4E9"))+
theme(legend.position="none",axis.title.x=element_blank(),
#axis.text.x=element_text(angle = 45,vjust=0.7),
plot.title = element_text(hjust = 0.5,size=20,face = "bold"))+
ylab("Degree")+ggtitle(species)
} ## d
if(calculation=="shortest.path"){
message("shortest.paths calculating!")
#df_ggplot1<-data.frame(calculation = as.numeric(shortest.paths(g,names(V(g))[!names(V(g))%in%imprinted_genes_in_network],weights = NA),class="All genes"))
d2<-shortest.paths(g,v = imp_orthlogous_genes_in_ara_network,to =imp_orthlogous_genes_in_ara_network, weights = NA)
d2<-d2[upper.tri(d2)]
d2<-d2[is.finite(d2)]
df_ggplot2<-data.frame(calculation = d2,class="orthologous")
d3<-shortest.paths(g,v = imprinted_genes_in_network,to =imprinted_genes_in_network, weights = NA)
d3<-d3[upper.tri(d3)]
d3<-d3[is.finite(d3)]
df_ggplot3<-data.frame(calculation = d3,class="Imprinted_genes")
##plot for ggplot
df<-rbind(df_ggplot2,df_ggplot3)
df$species<-species
#p<-ggplot(df,aes(x=class,y=calculation,fill=class))+geom_boxplot()+
p<-ggplot(df,aes(x=class,y=calculation,fill=class))+geom_violin()+
geom_signif(comparisons = list(c("Imprinted_genes","orthologous")),
test="wilcox.test", test.args=list(alternative="less"),
step_increase = 0.05,tip_length = 0.01)+
theme_bw(base_size = 20)+
scale_x_discrete(labels=c("OGs","IGs"))+
scale_fill_manual(values = c("#999999", "#E69F00"))+
theme(legend.position="none",axis.title.x=element_blank(),
plot.title = element_text(hjust = 0.5,size=20,face = "bold"))+ # title posistion
ylab("Shortest path")+ggtitle(species)
} ## compare shortest path of IGs and OGs (have difference)
if(calculation=="betweenness"){
message("betweenness calculating!")
betweenness_all_genes<-read.table("4network_analysis_result/2high_confidence_imp_result/0.1AFGN_betweenness_normalized.txt",stringsAsFactors = FALSE,header = TRUE)
rownames(betweenness_all_genes)<-betweenness_all_genes$genes
df_ggplot1<-data.frame(calculation = betweenness_all_genes[names(V(g))[!names(V(g))%in%imprinted_genes_in_network],]$betweenness,class="All genes")
df_ggplot2<-data.frame(calculation = betweenness_all_genes[imp_orthlogous_genes_in_ara_network,]$betweenness,class="orthologous")
df_ggplot3<-data.frame(calculation = betweenness_all_genes[imprinted_genes_in_network,]$betweenness,class="Imprinted_genes")
df<-rbind(df_ggplot1,df_ggplot2,df_ggplot3)
df$species<-species
#p<-ggplot(df,aes(x=class,y=calculation,fill=class))+geom_boxplot()+
p<-ggplot(df,aes(x=class,y=log(calculation,2),fill=class))+geom_violin()+
geom_signif(comparisons = list(c("All genes","orthologous"),c("Imprinted_genes","orthologous")),
test="wilcox.test", test.args=list(alternative="greater"),
step_increase = 0.05,tip_length = 0.01)+
theme_bw(base_size = 20)+
scale_x_discrete(labels=c("AG","OG","IG"))+
scale_fill_manual(values = c("#999999", "#E69F00", "#56B4E9"))+
theme(legend.position="none",axis.title.x=element_blank(),
plot.title = element_text(hjust = 0.5,size=12,face = "bold"))+ # title posistion
ylab("Betweenness")+ggtitle(species)+
stat_summary(fun.y=median, geom="point", size=2, color="red")
} ## differentce of betweenness of IGs and OGs
if(calculation=="shortest.path.compared.random"){
message("shortest.path.compared.random")
d2<-shortest.paths(g,v = imp_orthlogous_genes_in_ara_network,to =imp_orthlogous_genes_in_ara_network, weights = NA)
d2<-d2[upper.tri(d2)]
d2<-d2[is.finite(d2)]
# simulate 1000 times
fun <- function(x){
require(igraph)
rand_genes<-sample(names(V(g)),length(imp_orthlogous_genes_in_ara_network),replace = FALSE)
d_random<-shortest.paths(g,rand_genes, rand_genes,weights = NA)
d_random<-d_random[upper.tri(d_random)]
d_random<-d_random[is.finite(d_random)]
return(mean(d_random))
}
library(parallel)
system.time(simulations<-mclapply(1:1000,fun,mc.cores = 7))
simulations<-unlist(simulations)
dff<-data.frame(simulations=simulations,species=species)
p<-ggplot(dff,aes(x=simulations))+geom_histogram(fill="steelblue")+
theme_bw()+
theme(text = element_text(size=20),plot.title = element_text(hjust = 0.5,size=20,face = "bold"))+
ylab("Frequency")+
xlab("Mean shortest path")+
annotate("segment", x=mean(d2), xend=mean(d2), y=25, yend=0, color="black", size=1, arrow=arrow())+
annotate("segment", x=median(d2), xend=median(d2), y=25, yend=0, color="red", size=1, arrow=arrow())+
annotate("text", x = median(d2), y = 28, label = "Median")+
annotate("text", x = mean(d2), y = 28, label = "Mean")+ggtitle(species)
} ## mean shortest path of OG , with random gene mean distance
if(calculation=="distance_to_imp"){
message("distance_to_imp")
d2<-shortest.paths(g,v = imp_orthlogous_genes_in_ara_network,to =imprinted_genes_in_network, weights = NA)
d2<-d2[upper.tri(d2)]
d2<-d2[is.finite(d2)]
## distence of OG to IGs
fun <- function(x){
require(igraph)
rand_genes<-sample(names(V(g)),length(imp_orthlogous_genes_in_ara_network),replace = FALSE)
d_random<-shortest.paths(g,rand_genes, imprinted_genes_in_network,weights = NA)
d_random<-d_random[upper.tri(d_random)]
d_random<-d_random[is.finite(d_random)]
return(mean(d_random))
}
library(parallel)
system.time(simulations<-mclapply(1:1000,fun,mc.cores = 7))
simulations<-unlist(simulations)
dff<-data.frame(simulations=simulations,species=species)
##plot
p<-ggplot(dff,aes(x=simulations))+geom_histogram(fill="steelblue")+
theme_bw()+
theme(text = element_text(size=20),plot.title = element_text(hjust = 0.5,size=20,face = "bold"))+
ylab("Frequency")+
xlab("Mean shortest path of OG to IGs")+
annotate("segment", x=mean(d2), xend=mean(d2), y=25, yend=0, color="black", size=1, arrow=arrow())+
annotate("segment", x=median(d2), xend=median(d2), y=25, yend=0, color="red", size=1, arrow=arrow())+
annotate("text", x = median(d2), y = 28, label = "Median")+
annotate("text", x = mean(d2), y = 28, label = "Mean")+ggtitle(species)
} # shortest path of OG to IG
if(calculation=="cluster_coefficient"){
message("cluster coefficient calculating!")
cluster_coefficient<-transitivity(g,weights = NA,type = "local")
names(cluster_coefficient)<-names(V(g))
df_ggplot1<-data.frame(calculation = cluster_coefficient[names(V(g))[!names(V(g))%in%imprinted_genes_in_network]],class="All genes")
df_ggplot2<-data.frame(calculation = cluster_coefficient[imp_orthlogous_genes_in_ara_network],class="orthologous")
df_ggplot3<-data.frame(calculation = cluster_coefficient[imprinted_genes_in_network],class="Imprinted_genes")
df<-rbind(df_ggplot1,df_ggplot2,df_ggplot3)
df$species<-species
#p<-ggplot(df,aes(x=class,y=calculation,fill=class))+geom_boxplot()+
p<-ggplot(df,aes(x=class,y=calculation,fill=class))+geom_boxplot(notch = TRUE)+
geom_signif(comparisons = list(c("All genes","orthologous"),c("Imprinted_genes","orthologous")),
test="wilcox.test", test.args=list(alternative="greater"),
step_increase = 0.05,tip_length = 0.01)+
theme_bw(base_size = 20)+
scale_x_discrete(labels=c("AG","OG","IG"))+
scale_fill_manual(values = c("#999999", "#E69F00", "#56B4E9"))+
theme(legend.position="none",axis.title.x=element_blank(),
plot.title = element_text(hjust = 0.5,size=12,face = "bold"))+ # title posistion
ylab("Cluster coefficient")+ggtitle(species)+
stat_summary(fun.y=median, geom="point", size=2, color="red")
}
if(calculation=="OG_distane_to_imprinting_center"){
IG2other<-shortest.paths(g,v = imprinted_genes_in_network,names(V(g)),weights = NA)
other2IG_mean_distance<-colMeans(IG2other)
IG_center<-unique(names(sort(other2IG_mean_distance)))[50] # 50 most closed genes to imprinted genes as imprinting center
OG2center<-shortest.paths(graph = g,v = imp_orthlogous_genes_in_ara_network,to = IG_center,weights = NA)
OG2center_mean_distance<-mean(OG2center[is.finite(OG2center)])
return(OG2center_mean_distance)
}
return(p)
}
#degree calcualtion
p1<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/RICE/imp_by_papers/imp_2.txt",
orthologous_file = "/data1/SRA/rice_chip/3scripts/1pesudo_gene_prediction/2rna_seq_20173D/3orthlogs/3ara2rice",calculation = "degree",species = "O. sativa")
p2<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/MAIZE/maize_imp_by_papers/maize2+.imp",
orthologous_file = "~/MyResearch/genome.db/Maize/gtf/gene_id_convert/TAIR2maizev3_ortholgous",calculation = "degree",species = "Z. mays")
p3<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/lyrata/lyrata_maped.imp",
orthologous_file = "~/MyResearch/genome.db/Arabidopsis_lyrata/gtf/TAIR2lyrata",calculation = "degree",species = "A. lyrata")
p4<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/sy/strong.sy.imp",
orthologous_file = "~/MyResearch/genome.db/Solanum_lycopersicum/gtf/TAIR2sly",calculation = "degree",species = "S. lycopersicum")
p5<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/sbicolor/sb.impV2",
orthologous_file = "~/MyResearch/genome.db/Sbicolor/gtf/sb2tair.txt",calculation = "degree",species = "S. bicolor")
p6<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/caster_bean/castor.imp",
orthologous_file = "~/MyResearch/Imprinting_prediction/imprint_gene_list/caster_bean/2TAIR2R.comm.mRNA.txt",calculation = "degree",species = "R. communis")
p7<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/capsla_rubela/2imprinted_list",
orthologous_file = "~/MyResearch/Imprinting_prediction/imprint_gene_list/capsla_rubela/tair2c.rubella",calculation = "degree",species = "C. rubella")
#grid.arrange(plot1, plot2, nrow=1, ncol=2)
#library(ggpubr)
#ggarrange(p1,p2,p3,p4,p5,p6,p7)
pd<-rbind(p1$data,p2$data,p3$data,p4$data,p5$data,p6$data,p7$data)
pd$species<-factor(pd$species,levels=c("A. lyrata","C. rubella","R. communis", "S. lycopersicum", "O. sativa", "S. bicolor", "Z. mays"))
p<-ggplot(pd,aes(x=class,y=log(degree,2),fill=class))+geom_boxplot(notch=TRUE)+
facet_wrap(~ species,ncol = 7)+
geom_signif(comparisons = list(c("All genes","orthologous"),
c("Imprinted_genes","orthologous")),
test="wilcox.test",
#test.args=list(alternative="greater"),
#map_signif_level = TRUE,
step_increase = 0.05,
tip_length = 0.01)+
theme_bw(base_size = 20)+
scale_x_discrete(labels=c("AG","OG","IG"))+
scale_fill_manual(values = c("#999999", "#E69F00", "#56B4E9"))+
theme(legend.position="none",
axis.title.x=element_blank(),
#axis.text.x=element_text(angle = 45,vjust=0.7),
plot.title = element_text(hjust = 0.5,size=20,face = "bold"),
panel.spacing=unit(0,"lines"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.text.x = element_text(size = 12,face = "italic"))+
ylab("Degree (log2)")
## shortest path calulation
p1<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/RICE/imp_by_papers/imp_2.txt",
orthologous_file = "/data1/SRA/rice_chip/3scripts/1pesudo_gene_prediction/2rna_seq_20173D/3orthlogs/3ara2rice",calculation = "shortest.path",species = "O. sativa")
p2<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/MAIZE/maize_imp_by_papers/maize2+.imp",
orthologous_file = "~/MyResearch/genome.db/Maize/gtf/gene_id_convert/TAIR2maizev3_ortholgous",calculation = "shortest.path",species = "Z. mays")
p3<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/lyrata/lyrata_maped.imp",
orthologous_file = "~/MyResearch/genome.db/Arabidopsis_lyrata/gtf/TAIR2lyrata",calculation = "shortest.path",species = "A. lyrata")
p4<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/sy/strong.sy.imp",
orthologous_file = "~/MyResearch/genome.db/Solanum_lycopersicum/gtf/TAIR2sly",calculation = "shortest.path",species = "S. lycopersicum")
p5<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/sbicolor/sb.impV2",
orthologous_file = "~/MyResearch/genome.db/Sbicolor/gtf/sb2tair.txt",calculation = "shortest.path",species = "S. bicolor")
p6<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/caster_bean/castor.imp",
orthologous_file = "~/MyResearch/Imprinting_prediction/imprint_gene_list/caster_bean/2TAIR2R.comm.mRNA.txt",calculation = "shortest.path",species = "R. communis")
p7<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/capsla_rubela/2imprinted_list",
orthologous_file = "~/MyResearch/Imprinting_prediction/imprint_gene_list/capsla_rubela/tair2c.rubella",calculation = "shortest.path",species = "C. rubella")
#grid.arrange(plot1, plot2, nrow=1, ncol=2)
#library(ggpubr)
#ggarrange(p1,p2,p3,p4,p5)
pd<-rbind(p1$data,p2$data,p3$data,p4$data,p5$data,p6$data,p7$data)
pd$species<-factor(pd$species,levels=c("A. lyrata","C. rubella","R. communis", "S. lycopersicum", "O. sativa", "S. bicolor", "Z. mays"))
p<-ggplot(pd,aes(x=class,y=calculation,fill=class))+geom_violin()+
facet_wrap(~ species,ncol = 7)+
geom_signif(comparisons = list(c("Imprinted_genes","orthologous")),
test="wilcox.test",
#test.args=list(alternative="greater"),
#map_signif_level = TRUE,
step_increase = 0.05,
tip_length = 0.01)+
theme_bw(base_size = 20)+
scale_x_discrete(labels=c("OG","IG"))+
scale_fill_manual(values = c("#999999", "#E69F00", "#56B4E9"))+
theme(legend.position="none",
axis.title.x=element_blank(),
#axis.text.x=element_text(angle = 45,vjust=0.7),
plot.title = element_text(hjust = 0.5,size=20,face = "bold"),
panel.spacing=unit(0,"lines"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.text.x = element_text(size = 12,face = "italic"))+
ylab("Shortest path")+
stat_summary(fun.y = mean, geom = "point", size = 2, color = "red")
##betweennesss
p1<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/RICE/imp_by_papers/imp_2.txt",
orthologous_file = "/data1/SRA/rice_chip/3scripts/1pesudo_gene_prediction/2rna_seq_20173D/3orthlogs/3ara2rice",calculation = "betweenness",species = "O. sativa")
p2<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/MAIZE/maize_imp_by_papers/maize2+.imp",
orthologous_file = "~/MyResearch/genome.db/Maize/gtf/gene_id_convert/TAIR2maizev3_ortholgous",calculation = "betweenness",species = "Z. mays")
p3<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/lyrata/lyrata_maped.imp",
orthologous_file = "~/MyResearch/genome.db/Arabidopsis_lyrata/gtf/TAIR2lyrata",calculation = "betweenness",species = "A. lyrata")
p4<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/sy/strong.sy.imp",
orthologous_file = "~/MyResearch/genome.db/Solanum_lycopersicum/gtf/TAIR2sly",calculation = "betweenness",species = "S. lycopersicum")
p5<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/sbicolor/sb.impV2",
orthologous_file = "~/MyResearch/genome.db/Sbicolor/gtf/sb2tair.txt",calculation = "betweenness",species = "S. bicolor")
p6<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/caster_bean/castor.imp",
orthologous_file = "~/MyResearch/Imprinting_prediction/imprint_gene_list/caster_bean/2TAIR2R.comm.mRNA.txt",calculation = "betweenness",species = "R. communis")
p7<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/capsla_rubela/2imprinted_list",
orthologous_file = "~/MyResearch/Imprinting_prediction/imprint_gene_list/capsla_rubela/tair2c.rubella",calculation = "betweenness",species = "C. rubella")
##
pd<-rbind(p1$data,p2$data,p3$data,p4$data,p5$data,p6$data,p7$data)
pd$species<-factor(pd$species,levels=c("A. lyrata","C. rubella","R. communis", "S. lycopersicum", "O. sativa", "S. bicolor", "Z. mays"))
p<-ggplot(pd,aes(x=class,y=log(calculation,2),fill=class))+geom_boxplot(notch = TRUE)+
facet_wrap(~ species,ncol = 7)+
geom_signif(comparisons = list(c("All genes", "orthologous"),c("Imprinted_genes","orthologous")),
test="wilcox.test",
#test.args=list(alternative="greater"),
#map_signif_level = TRUE,
step_increase = 0.05,
tip_length = 0.01)+
theme_bw(base_size = 20)+
scale_x_discrete(labels=c("AG","OG","IG"))+
scale_fill_manual(values = c("#999999", "#E69F00", "#56B4E9"))+
theme(legend.position="none",
axis.title.x=element_blank(),
#axis.text.x=element_text(angle = 45,vjust=0.7),
plot.title = element_text(hjust = 0.5,size=20,face = "bold"),
panel.spacing=unit(0,"lines"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.text.x = element_text(size = 12,face = "italic"))+
ylab("Betweeness (log2)")
### distance compared with random
p1<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/RICE/imp_by_papers/imp_2.txt",
orthologous_file = "/data1/SRA/rice_chip/3scripts/1pesudo_gene_prediction/2rna_seq_20173D/3orthlogs/3ara2rice",calculation = "shortest.path.compared.random",species = "O. sativa")
p2<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/MAIZE/maize_imp_by_papers/maize2+.imp",
orthologous_file = "~/MyResearch/genome.db/Maize/gtf/gene_id_convert/TAIR2maizev3_ortholgous",calculation = "shortest.path.compared.random",species = "Z. mays")
p3<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/lyrata/lyrata_maped.imp",
orthologous_file = "~/MyResearch/genome.db/Arabidopsis_lyrata/gtf/TAIR2lyrata",calculation = "shortest.path.compared.random",species = "A. lyrata")
p4<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/sy/strong.sy.imp",
orthologous_file = "~/MyResearch/genome.db/Solanum_lycopersicum/gtf/TAIR2sly",calculation = "shortest.path.compared.random",species = "S. lycopersicum")
p5<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/sbicolor/sb.impV2",
orthologous_file = "~/MyResearch/genome.db/Sbicolor/gtf/sb2tair.txt",calculation = "shortest.path.compared.random",species = "S. bicolor")
p6<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/caster_bean/castor.imp",
orthologous_file = "~/MyResearch/Imprinting_prediction/imprint_gene_list/caster_bean/2TAIR2R.comm.mRNA.txt",calculation = "shortest.path.compared.random",species = "R. communis")
p7<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/capsla_rubela/2imprinted_list",
orthologous_file = "~/MyResearch/Imprinting_prediction/imprint_gene_list/capsla_rubela/tair2c.rubella",calculation = "shortest.path.compared.random",species = "C. rubella")
pd<-rbind(p1$data,p2$data,p3$data,p4$data,p5$data,p6$data,p7$data)
pd$species<-factor(pd$species,levels=c("A. lyrata","C. rubella","R. communis", "S. lycopersicum", "O. sativa", "S. bicolor", "Z. mays"))
arrow_pos = data.frame(name=c(sum(p1$data$simulations<=p1$layers[[2]]$data[,1])/1000,
sum(p2$data$simulations<=p2$layers[[2]]$data[,1])/1000,
sum(p3$data$simulations<=p3$layers[[2]]$data[,1])/1000,
sum(p4$data$simulations<=p4$layers[[2]]$data[,1])/1000,
sum(p5$data$simulations<=p5$layers[[2]]$data[,1])/1000,
sum(p6$data$simulations<=p6$layers[[2]]$data[,1])/1000,
sum(p7$data$simulations<=p7$layers[[2]]$data[,1])/1000),
species=as.character(unique(pd$species)),
xvar=c(p1$layers[[2]]$data[,1],
p2$layers[[2]]$data[,1],
p3$layers[[2]]$data[,1],
p4$layers[[2]]$data[,1],
p5$layers[[2]]$data[,1],
p6$layers[[2]]$data[,1],
p7$layers[[2]]$data[,1]),
yvar=c(p1$layers[[2]]$data[,3],
p2$layers[[2]]$data[,3],
p3$layers[[2]]$data[,3],
p4$layers[[2]]$data[,3],
p5$layers[[2]]$data[,3],
p6$layers[[2]]$data[,3],
p7$layers[[2]]$data[,3])
)
arrow_pos$species = factor(arrow_pos$species,levels=levels(pd$species))
p<-ggplot(pd,aes(x=simulations))+geom_histogram(fill="#E69F00",bins = 100)+
facet_wrap(~ species,ncol = 7)+
theme_bw(base_size = 12)+
theme(legend.position="none",
axis.title.x=element_blank(),
#axis.text.x=element_text(angle = 45,vjust=0.7),
plot.title = element_text(hjust = 0.5,size=12,face = "bold"),
panel.spacing=unit(0,"lines"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.text.x = element_text(size = 12,face = "italic"))+
ylab("Frequency")+
xlab("Mean shortest path")+
geom_segment(data = arrow_pos, aes(x=xvar,xend=xvar, y=yvar, yend=0), color="black", size=1, arrow=arrow())+
geom_text(data = arrow_pos, aes(label=paste("p=",name),x=xvar,y=yvar+2))
#library(ggpubr)
#ggarrange(p1,p2,p3,p4,p5)
### orthologous to imp
p1<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/RICE/imp_by_papers/imp_2.txt",
orthologous_file = "/data1/SRA/rice_chip/3scripts/1pesudo_gene_prediction/2rna_seq_20173D/3orthlogs/3ara2rice",calculation = "distance_to_imp",species = "O. sativa")
p2<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/MAIZE/maize_imp_by_papers/maize2+.imp",
orthologous_file = "~/MyResearch/genome.db/Maize/gtf/gene_id_convert/TAIR2maizev3_ortholgous",calculation = "distance_to_imp",species = "Z. mays")
p3<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/lyrata/lyrata_maped.imp",
orthologous_file = "~/MyResearch/genome.db/Arabidopsis_lyrata/gtf/TAIR2lyrata",calculation = "distance_to_imp",species = "A. lyrata")
p4<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/sy/strong.sy.imp",
orthologous_file = "~/MyResearch/genome.db/Solanum_lycopersicum/gtf/TAIR2sly",calculation = "distance_to_imp",species = "S. lycopersicum")
p5<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/sbicolor/sb.impV2",
orthologous_file = "~/MyResearch/genome.db/Sbicolor/gtf/sb2tair.txt",calculation = "distance_to_imp",species = "S. bicolor")
p6<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/caster_bean/castor.imp",
orthologous_file = "~/MyResearch/Imprinting_prediction/imprint_gene_list/caster_bean/2TAIR2R.comm.mRNA.txt",calculation = "distance_to_imp",species = "R. communis")
p7<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/capsla_rubela/2imprinted_list",
orthologous_file = "~/MyResearch/Imprinting_prediction/imprint_gene_list/capsla_rubela/tair2c.rubella",calculation = "distance_to_imp",species = "C. rubella")
pd<-rbind(p1$data,p2$data,p3$data,p4$data,p5$data,p6$data,p7$data)
pd$species<-factor(pd$species,levels=c("A. lyrata","C. rubella","R. communis", "S. lycopersicum", "O. sativa", "S. bicolor", "Z. mays"))
arrow_pos = data.frame(name=c(sum(p1$data$simulations<=p1$layers[[2]]$data[,1])/1000,
sum(p2$data$simulations<=p2$layers[[2]]$data[,1])/1000,
sum(p3$data$simulations<=p3$layers[[2]]$data[,1])/1000,
sum(p4$data$simulations<=p4$layers[[2]]$data[,1])/1000,
sum(p5$data$simulations<=p5$layers[[2]]$data[,1])/1000,
sum(p6$data$simulations<=p6$layers[[2]]$data[,1])/1000,
sum(p7$data$simulations<=p7$layers[[2]]$data[,1])/1000),
species=as.character(unique(pd$species)),
xvar=c(p1$layers[[2]]$data[,1],
p2$layers[[2]]$data[,1],
p3$layers[[2]]$data[,1],
p4$layers[[2]]$data[,1],
p5$layers[[2]]$data[,1],
p6$layers[[2]]$data[,1],
p7$layers[[2]]$data[,1]),
yvar=c(p1$layers[[2]]$data[,3],
p2$layers[[2]]$data[,3],
p3$layers[[2]]$data[,3],
p4$layers[[2]]$data[,3],
p5$layers[[2]]$data[,3],
p6$layers[[2]]$data[,3],
p7$layers[[2]]$data[,3])
)
p<-ggplot(pd,aes(x=simulations))+geom_histogram(fill="#E69F00",bins = 100)+
facet_wrap(~ species,ncol = 7)+
theme_bw(base_size = 12)+
theme(legend.position="none",
axis.title.x=element_blank(),
#axis.text.x=element_text(angle = 45,vjust=0.7),
plot.title = element_text(hjust = 0.5,size=12,face = "bold"),
panel.spacing=unit(0,"lines"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.text.x = element_text(size = 12,face = "italic"))+
ylab("Frequency")+
xlab("Mean shortest path to IGs")+
geom_segment(data = arrow_pos, aes(x=xvar,xend=xvar, y=yvar, yend=0), color="black", size=1, arrow=arrow())+
geom_text(data = arrow_pos, aes(label=paste("p=",name),x=xvar,y=yvar+2))
### cluster coefficient
##
p1<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/RICE/imp_by_papers/imp_2.txt",
orthologous_file = "/data1/SRA/rice_chip/3scripts/1pesudo_gene_prediction/2rna_seq_20173D/3orthlogs/3ara2rice",calculation = "cluster_coefficient",species = "O. sativa")
p2<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/MAIZE/maize_imp_by_papers/maize2+.imp",
orthologous_file = "~/MyResearch/genome.db/Maize/gtf/gene_id_convert/TAIR2maizev3_ortholgous",calculation = "cluster_coefficient",species = "Z. mays")
p3<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/lyrata/lyrata_maped.imp",
orthologous_file = "~/MyResearch/genome.db/Arabidopsis_lyrata/gtf/TAIR2lyrata",calculation = "cluster_coefficient",species = "A. lyrata")
p4<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/sy/strong.sy.imp",
orthologous_file = "~/MyResearch/genome.db/Solanum_lycopersicum/gtf/TAIR2sly",calculation = "cluster_coefficient",species = "S. lycopersicum")
p5<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/sbicolor/sb.impV2",
orthologous_file = "~/MyResearch/genome.db/Sbicolor/gtf/sb2tair.txt",calculation = "cluster_coefficient",species = "S. bicolor")
p6<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/caster_bean/castor.imp",
orthologous_file = "~/MyResearch/Imprinting_prediction/imprint_gene_list/caster_bean/2TAIR2R.comm.mRNA.txt",calculation = "cluster_coefficient",species = "R. communis")
p7<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/capsla_rubela/2imprinted_list",
orthologous_file = "~/MyResearch/Imprinting_prediction/imprint_gene_list/capsla_rubela/tair2c.rubella",calculation = "cluster_coefficient",species = "C. rubella")
##
pd<-rbind(p1$data,p2$data,p3$data,p4$data,p5$data,p6$data,p7$data)
pd$species<-factor(pd$species,levels=c("A. lyrata","C. rubella","R. communis", "S. lycopersicum", "O. sativa", "S. bicolor", "Z. mays"))
p<-ggplot(pd,aes(x=class,y=calculation,fill=class))+geom_boxplot(notch = TRUE)+
facet_wrap(~ species,ncol = 7)+
geom_signif(comparisons = list(c("All genes", "orthologous"),c("Imprinted_genes","orthologous")),
test="wilcox.test",
#test.args=list(alternative="greater"),
#map_signif_level = TRUE,
step_increase = 0.05,
tip_length = 0.01)+
theme_bw(base_size = 20)+
scale_x_discrete(labels=c("AG","OG","IG"))+
scale_fill_manual(values = c("#999999", "#E69F00", "#56B4E9"))+
theme(legend.position="none",
axis.title.x=element_blank(),
#axis.text.x=element_text(angle = 45,vjust=0.7),
plot.title = element_text(hjust = 0.5,size=20,face = "bold"),
panel.spacing=unit(0,"lines"),
panel.grid.major = element_blank(),
panel.grid.minor = element_blank(),
strip.text.x = element_text(size = 12,face = "italic"))+
ylab("Cluster coefficient")
### OG distance to imprinting center
p1<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/RICE/imp_by_papers/imp_2.txt",
orthologous_file = "/data1/SRA/rice_chip/3scripts/1pesudo_gene_prediction/2rna_seq_20173D/3orthlogs/3ara2rice",calculation = "OG_distane_to_imprinting_center",species = "O. sativa")
p2<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/MAIZE/maize_imp_by_papers/maize2+.imp",
orthologous_file = "~/MyResearch/genome.db/Maize/gtf/gene_id_convert/TAIR2maizev3_ortholgous",calculation = "OG_distane_to_imprinting_center",species = "Z. mays")
p3<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/lyrata/lyrata_maped.imp",
orthologous_file = "~/MyResearch/genome.db/Arabidopsis_lyrata/gtf/TAIR2lyrata",calculation = "OG_distane_to_imprinting_center",species = "A. lyrata")
p4<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/sy/strong.sy.imp",
orthologous_file = "~/MyResearch/genome.db/Solanum_lycopersicum/gtf/TAIR2sly",calculation = "OG_distane_to_imprinting_center",species = "S. lycopersicum")
p5<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/sbicolor/sb.impV2",
orthologous_file = "~/MyResearch/genome.db/Sbicolor/gtf/sb2tair.txt",calculation = "OG_distane_to_imprinting_center",species = "S. bicolor")
p6<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/caster_bean/castor.imp",
orthologous_file = "~/MyResearch/Imprinting_prediction/imprint_gene_list/caster_bean/2TAIR2R.comm.mRNA.txt",calculation = "OG_distane_to_imprinting_center",species = "R. communis")
p7<-Compare_orthologous_genes(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/capsla_rubela/2imprinted_list",
orthologous_file = "~/MyResearch/Imprinting_prediction/imprint_gene_list/capsla_rubela/tair2c.rubella",calculation = "OG_distane_to_imprinting_center",species = "C. rubella")
## OG distance between species based on sicence <ds> = d(ab) - (d(aa)+d(bb))/2
V(g)$ara_imp[names(V(g))%in%maternal_imprint]="MEGs"
V(g)$ara_imp[names(V(g))%in%paternal_imprint]="PEGs"
OG_class<-function(imprinted_genes_file_in_other_species,orthologous_file){
other_imprinted_data<-read.table(imprinted_genes_file_in_other_species,stringsAsFactors = FALSE)
other_imprinted_genes<-unique(other_imprinted_data$V1) # imprinted genes list in other species
maternal_genes<-unique(other_imprinted_data$V1[other_imprinted_data$V2=="m"])
paternal_genes<-unique(other_imprinted_data$V1[other_imprinted_data$V2=="f"])
##arabidopsis orthologs of impritned genes of other species
ortholougs<-read.table(orthologous_file,stringsAsFactors = FALSE) #39072 from biomart
imp_orthlogous_genes<-unique(ortholougs$V1[ortholougs$V2%in% other_imprinted_genes])
message(c("other imprinted genes:",length(other_imprinted_genes)))
message(c("ortholgous to TAIR:",length(imp_orthlogous_genes)))
message(c("ortholgous is imprinted in TAIR:",sum(imp_orthlogous_genes%in%imprinted_genes))) #
message(c("ortholgous is imprinted and in network in TAIR:",sum(imp_orthlogous_genes%in%imprinted_genes_in_network))) #
#imp_orthlogous_genes<-imp_orthlogous_genes[!imp_orthlogous_genes%in%imprinted_genes_in_network] # whether drop the othologous genes also imprinted in arabidopsis
message(c("ortholgous is not imprinted and in network in TAIR:",length(imp_orthlogous_genes)))
imp_orthlogous_genes_in_ara_network <- intersect(imp_orthlogous_genes,names(V(g)))
return(imp_orthlogous_genes_in_ara_network)
}
#rice
rice_OG<-OG_class(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/RICE/imp_by_papers/imp_2.txt",orthologous_file = "/data1/SRA/rice_chip/3scripts/1pesudo_gene_prediction/2rna_seq_20173D/3orthlogs/3ara2rice")
V(g)$rice_imp=ifelse(names(V(g))%in%rice_OG,"rice_OG","other") #75
## mazie
maize_OG<-OG_class(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/MAIZE/maize_imp_by_papers/maize2+.imp",orthologous_file = "~/MyResearch/genome.db/Maize/gtf/gene_id_convert/TAIR2maizev3_ortholgous")
V(g)$maize_imp=ifelse(names(V(g))%in%maize_OG,"maize_OG","other")
#lyrata
lyrata_OG<-OG_class(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/lyrata/lyrata_maped.imp",orthologous_file = "~/MyResearch/genome.db/Arabidopsis_lyrata/gtf/TAIR2lyrata")
V(g)$lyrata_imp=ifelse(names(V(g))%in%lyrata_OG,"lyrata_OG","other")
# soly
sly_OG<-OG_class(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/sy/strong.sy.imp",orthologous_file = "~/MyResearch/genome.db/Solanum_lycopersicum/gtf/TAIR2sly")
V(g)$sly_imp=ifelse(names(V(g))%in%sly_OG,"sly_OG","other")
# sbicolor
sb_OG<-OG_class(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/sbicolor/sb.impV2",orthologous_file = "~/MyResearch/genome.db/Sbicolor/gtf/sb2tair.txt")
V(g)$sb_imp = ifelse(names(V(g))%in%sb_OG,"sb_OG","other")
## castor
castor_OG<-OG_class(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/caster_bean/castor.imp", orthologous_file = "~/MyResearch/Imprinting_prediction/imprint_gene_list/caster_bean/2TAIR2R.comm.mRNA.txt")
V(g)$castor_imp= ifelse(names(V(g))%in%castor_OG,"castor_OG","other")
#rubella
rubella_OG<-OG_class(imprinted_genes_file_in_other_species = "~/MyResearch/Imprinting_prediction/imprint_gene_list/capsla_rubela/2imprinted_list",orthologous_file = "~/MyResearch/Imprinting_prediction/imprint_gene_list/capsla_rubela/tair2c.rubella")
V(g)$rubella_imp=ifelse(names(V(g))%in%rubella_OG,"rubella_OG","other")
module_distances<-function(gene_list1,gene_list2){
d1_dis_matrix<-shortest.paths(graph = g,v = gene_list1,to = gene_list1,weights = NA)
d1_dis<-mean(d1_dis_matrix[upper.tri(d1_dis_matrix)&is.finite(d1_dis_matrix)],na.rm = TRUE)
d2_dis_matrix<-shortest.paths(graph = g,v = gene_list2,to = gene_list2,weights = NA)
d2_dis<-mean(d2_dis_matrix[upper.tri(d2_dis_matrix)&is.finite(d2_dis_matrix)],na.rm = TRUE)
d12_dis_matrix<-shortest.paths(graph = g,v = gene_list1,gene_list2,weights = NA)
if(identical(rownames(d12_dis_matrix),colnames(d12_dis_matrix))){
d12_dis<-mean(d12_dis_matrix[upper.tri(d12_dis_matrix)&is.finite(d12_dis_matrix)],na.rm = TRUE)
}
else{
d12_dis<-mean(d12_dis_matrix[is.finite(d12_dis_matrix)],na.rm = TRUE)
}
D<-d12_dis-(d1_dis+d2_dis)/2
return(D)
}
md<-module_distances(gene_list1 = names(V(g))%in%imprinted_genes_in_network,gene_list2 = names(V(g))[V(g)$castor_imp=="castor_OG"])
###plot orthologous genes
all_OGs<-c(names(V(g))[V(g)$castor_imp=="castor_OG"],names(V(g))[V(g)$rice_imp=="rice_OG"],names(V(g))[V(g)$maize_imp=="maize_OG"],
names(V(g))[V(g)$lyrata_imp=="lyrata_OG"],names(V(g))[V(g)$castor_imp=="castor"],names(V(g))[V(g)$sly_imp=="sly_OG"],
names(V(g))[V(g)$sb_imp=="sb_OG"],imprinted_genes_in_network)
sub_g<-induced_subgraph(graph = g,vids = unique(all_OGs))
plot_personal_graph<-function(graph_object){
#V(sub_g)$color[names(V(sub_g))%in%focus_links$V2]<-"purple"
#V(sub_g)$color<-"green"
V(sub_g)$frame.color<-"white"
V(sub_g)$color[names(V(sub_g))%in%imprinted_genes_in_network]<-"#CD3333" # vertex color
V(sub_g)$color[names(V(sub_g))%in%names(V(g))[V(g)$lyrata_imp=="lyrata_OG"]]<-"#FF7F00"
V(sub_g)$color[names(V(sub_g))%in%names(V(g))[V(g)$rice_imp=="rice_OG"]]<-"#EEC591"
V(sub_g)$color[names(V(sub_g))%in%names(V(g))[V(g)$maize_imp=="maize_OG"]]<-"#53868B"
V(sub_g)$color[names(V(sub_g))%in%names(V(g))[V(g)$rubella_imp=="rubella_OG"]]<-"#458B00" # vertex color
V(sub_g)$color[names(V(sub_g))%in%names(V(g))[V(g)$castor_imp=="castor_OG"]]<-"#AB82FF"
V(sub_g)$color[names(V(sub_g))%in%names(V(g))[V(g)$sly_imp=="sly_OG"]]<-"#0000FF"
V(sub_g)$color[names(V(sub_g))%in%names(V(g))[V(g)$sb_imp=="sb_OG"]]<-"#FF6EB4"
V(sub_g)$size <- 4 # vertex size
#V(sub_g)$label.cex <- 0.5 # vertex label size
V(sub_g)$label<-NA
V(sub_g)$label.color<-"black"
E(sub_g)$color <- "black" # edge color
E(sub_g)$width=2#E(sub_g)$weight/max(E(sub_g)$weight)
plot.igraph(graph_object,layout=layout.fruchterman.reingold)
legend('topleft',
#bg="white",
text.col="tomato",
legend=c("Arabidopsis","Lyrata","Rice","Maize","Rubella","Castor", "Sly","Sbicolor"),
pch=19, #shape
box.lty=2, #
pt.cex= 3, #lines size
cex=1, #box size
col=c("#CD3333", "#FF7F00", "#EEC591", "#53868B", "#458B00", "#AB82FF", "#0000FF", "#FF6EB4"),
y.intersp=1.5)
}
plot_personal_graph(sub_g)
## simulation
simulations<-c()
for (m in seq(1:1000)){
random_genes<-sample(names(V(g)),vcount(sub_g))
random_net<-induced_subgraph(g,vids = random_genes)
random_comp_size<-max(components(random_net)$csize)
simulations<-c(simulations,random_comp_size)
}
p<-ggplot(data.frame(simulations=simulations),aes(x=simulations))+geom_histogram(fill="#E69F00",bins = 100)+
theme_bw(base_size = 20)+
ylab("Frequency")+
xlab("Maximum component size")+
annotate("segment", x=max(components(sub_g)$csize), xend=max(components(sub_g)$csize), y=25, yend=0, color="red", size=2, arrow=arrow())+
annotate("text", x = max(components(sub_g)$csize), y = 28, label = "Observed")
######### populaiton data
#########7.13 population data
popu<-read.csv("other_sourse/Ath_chr_protein_coding.tsv",header = TRUE,sep = "\t")
rownames(popu)<-popu$gene_id
popu$gene_pi[popu$gene_id%in%imprinted_genes_in_network]
#boxplot(popu$promoter_pi[popu$gene_id%in%imprinted_genes],popu$promoter_pi,popu$promoter_pi[popu$gene_id%in%maternal_imprint])
g_pop_common_common_genes<-intersect(names(V(g)),popu$gene_id)
df_ggplot1<-data.frame(pi=popu$promoter_pi[popu$gene_id%in%imprinted_genes],class="IG",region="Promoter")
df_ggplot2<-data.frame(pi=popu$promoter_pi[popu$gene_id%in%maternal_imprint],class="MEG",region="Promoter")
df_ggplot3<-data.frame(pi=popu$promoter_pi[popu$gene_id%in%paternal_imprint],class="PEG",region="Promoter")
df_ggplot4<-data.frame(pi=popu$promoter_pi,class="All genes",region="Promoter")
df_ggplot5<-data.frame(pi=popu$gene_pi[popu$gene_id%in%imprinted_genes],class="IG",region="Genebody")
df_ggplot6<-data.frame(pi=popu$gene_pi[popu$gene_id%in%maternal_imprint],class="MEG",region="Genebody")
df_ggplot7<-data.frame(pi=popu$gene_pi[popu$gene_id%in%paternal_imprint],class="PEG",region="Genebody")
df_ggplot8<-data.frame(pi=popu$gene_pi,class="All genes",region="Genebody")
df<-rbind(df_ggplot1,df_ggplot2,df_ggplot3,df_ggplot4,df_ggplot5,df_ggplot6,df_ggplot7,df_ggplot8)
library(ggsignif)
ggplot(df,aes(x=class,y=pi,fill=class))+geom_boxplot()+
geom_signif(comparisons = list(c("IG","All genes"),c("MEG","All genes"),c("PEG","All genes")),
test="wilcox.test", test.args=list(alternative="greater"),step_increase = 0.05,tip_length = 0.01)+
theme_bw(base_size = 20)+ facet_wrap(.~region,ncol = 2,scales = "free")+
#scale_x_discrete(labels=c("All genes","Maize orthologs","Imprinted genes"))+
scale_fill_manual(values = c("#999999", "#E69F00", "#56B4E9","#9370DB"))+
theme(legend.position="none")+ylab("Pi")
#####7.14 imprinted genes and essential genes
#### imprinted genes and essential genes
gene_class<-read.table("myresearch/network/data/gene_classes.txt",header = FALSE,sep="\t",stringsAsFactors = FALSE)
ESN<-gene_class$V1[gene_class$V2=="1ESN"] #841
ESN<-intersect(ESN,names(V(g))) #820
MRP<-gene_class$V1[gene_class$V2=="2MRP"] #2149
MRP<-intersect(MRP,names(V(g))) #2009
CLB<-gene_class$V1[gene_class$V2=="3CLB"] #399
CLB<-intersect(CLB,names(V(g))) #385
CND<-gene_class$V1[gene_class$V2=="4CND"] #586
CND<-intersect(CND,names(V(g))) #574
NO_PHE5NOTYPE<-gene_class$V1[gene_class$V2=="5NO PHE5NOTYPE"] #369
NO_PHE5NOTYPE<-intersect(NO_PHE5NOTYPE,names(V(g))) #341
ESN2IGs <-as.numeric(shortest.paths(graph = g,v = imprinted_genes_in_network,ESN,weights = NA))
ESN2IGs_P <-as.numeric(shortest.paths(graph = g,v = paternal_imprint,ESN,weights = NA))
ESN2IGs_M <-as.numeric(shortest.paths(graph = g,v = maternal_imprint,ESN,weights = NA))
MRP2IGs <-as.numeric(shortest.paths(graph = g,v = imprinted_genes_in_network,MRP,weights = NA))
CLB2IGs <-as.numeric(shortest.paths(graph = g,v = imprinted_genes_in_network,CLB,weights = NA))
CND2IGs <-as.numeric(shortest.paths(graph = g,v = imprinted_genes_in_network,CND,weights = NA))
NO_PHE5NOTYPE2IGs <-as.numeric(shortest.paths(graph = g,v = imprinted_genes_in_network,NO_PHE5NOTYPE,weights = NA))
##distance of imprinted genes to essential genes (Figure 3A)
fun <- function(x){
require(igraph)
rand_genes<-sample(names(V(g)),length(ESN),replace = FALSE)
d_random<-shortest.paths(g,imprinted_genes_in_network, rand_genes,weights = NA)
return(mean(as.numeric(d_random)[is.finite(as.numeric(d_random))]))
}
library(parallel)
system.time(simulations<-mclapply(1:1000,fun,mc.cores = 60))
simulations<-unlist(simulations)
ggplot(as.data.frame(simulations),aes(x=simulations))+geom_histogram(fill="steelblue",bins = 500)+
theme_bw()+
theme(text = element_text(size=20))+
ylab("Frequency")+
xlab("Mean shortest path")+
annotate("segment", x=mean(ESN2IGs[is.finite(ESN2IGs)]), xend=mean(ESN2IGs[is.finite(ESN2IGs)]), y=5, yend=0, color="black", size=2, arrow=arrow())+
annotate("text", x = mean(ESN2IGs[is.finite(ESN2IGs)]), y = 6, label = "IGs")+
annotate("segment", x=mean(ESN2IGs_P[is.finite(ESN2IGs_P)]), xend=mean(ESN2IGs_P[is.finite(ESN2IGs_P)]), y=5, yend=0, color="blue", size=2, arrow=arrow())+
annotate("text", x = mean(ESN2IGs_P[is.finite(ESN2IGs_P)]), y = 6, label = "PEGs")+
annotate("segment", x=mean(ESN2IGs_M[is.finite(ESN2IGs_M)]), xend=mean(ESN2IGs_M[is.finite(ESN2IGs_M)]), y=5, yend=0, color="tomato", size=2, arrow=arrow())+
annotate("text", x = mean(ESN2IGs_M[is.finite(ESN2IGs_M)]), y = 6, label = "MEGs")
## plot
sub_g<-induced.subgraph(g,vids = c(imprinted_genes_in_network,ESN))
#
sub_g <- induced.subgraph(sub_g, names(membership(components(sub_g))[membership(components(sub_g))==1]))
plot_personal_graph<-function(graph_object){
V(sub_g)$color[names(V(sub_g))%in%maternal_imprint]<-"tomato" # vertex color
V(sub_g)$color[names(V(sub_g))%in%paternal_imprint]<-"steelblue"
V(sub_g)$color[names(V(sub_g))%in%ESN]<-"orange"
V(sub_g)$size <- 2 # vertex size
V(sub_g)$label.cex <- 0.8 # vertex label size
V(sub_g)$label.color<-"black"
E(sub_g)$color <- "gray" # edge color
E(sub_g)$width=1
V(sub_g)$label<-NA
plot.igraph(sub_g,layout=layout.fruchterman.reingold,
vertex.frame.color= "white")
legend('topleft',
legend=c("Maternal","Paternal","Essential"),
pch=19, #shape
box.lty=2, #
pt.cex= 3, #lines size
cex=1, #box size
col=c("tomato","steelblue","orange"),
y.intersp=1.5)
}
plot_personal_graph(sub_g)
## partners overlaped with essential genes
intersect_with_partners<-c()
for (r in seq(1:1000)){
message(r)
intersect_with_partners<-c(intersect_with_partners,length(intersect(ESN,sample(names(V(g)),vcount(IGPN_sub_imp))))/vcount(IGPN_sub_imp))
}
real<-length(intersect(ESN,names(V(IGPN_sub_imp))))/vcount(IGPN_sub_imp)
ggplot(data.frame(overlap=intersect_with_partners),aes(x=overlap))+
geom_histogram(bins=100,fill="steelblue")+
theme_bw(base_size = 20)+
annotate("segment", x=real, xend=real, y=10, yend=0, color="red", size=2, arrow=arrow())+
annotate("text", x = real, y = 12, label = "Observed")+ylab("Frequency")+xlab("Percentage of genes being essential genes")
#### overlap with no phenotype
intersect_with_partners<-c()
for (r in seq(1:1000)){
intersect_with_partners<-c(intersect_with_partners,length(intersect(NO_PHE5NOTYPE,sample(names(V(g)),vcount(IGPN_sub_imp)))) )
}
real<-length(intersect(NO_PHE5NOTYPE,names(V(IGPN_sub_imp))))
ggplot(data.frame(overlap=intersect_with_partners),aes(x=overlap))+
geom_histogram(bins=20,fill="steelblue")+
theme_bw(base_size = 20)+
annotate("segment", x=real, xend=real, y=25, yend=0, color="red", size=2, arrow=arrow())+
annotate("text", x = real, y = 30, label = "Observed")+ylab("Count")+xlab("Overlaps with no-phenotype genes")
#####
### 5.1 essential degree comparsion (Figure S7)
degree_essential <- degree(g,v = ESN) ## long time
mean(degree_essential)
#degree_random <- degree(g,v = sample(names(V(g)),length(essential_genes),replace = FALSE)) # long time
fun <- function(x){
require(igraph)
rand_genes<-sample(names(V(g)),length(ESN),replace = FALSE)
degree_random<-degree(g,rand_genes)
return(mean(as.numeric(degree_random)))
}
library(parallel)
system.time(simulations<-mclapply(1:1000,fun,mc.cores = 60))
simulations<-unlist(simulations)
ggplot(as.data.frame(simulations),aes(x=simulations))+geom_histogram(fill="steelblue",bins=200)+
theme_bw()+
theme(text = element_text(size=20))+
ylab("Frequency")+
xlab("Mean degree")+
annotate("segment", x=78.8, xend=78.9, y=5, yend=0, color="red", size=2, arrow=arrow())+
annotate("text", x = 78.8, y = 7, label = "Essential genes")
##### betweenness of essential to random
between_essential <- IGFN_betweenness$betweenness[IGFN_betweenness$genes%in%ESN] ## long time
mean(between_essential)
#degree_random <- degree(g,v = sample(names(V(g)),length(essential_genes),replace = FALSE)) # long time
fun <- function(x){
require(igraph)
rand_genes<-sample(names(V(g)),length(ESN),replace = FALSE)
betweenness_random<-IGFN_betweenness$betweenness[IGFN_betweenness$genes%in%rand_genes]
return(mean(betweenness_random))
}
library(parallel)
system.time(simulations<-mclapply(1:1000,fun,mc.cores = 60))
simulations<-unlist(simulations)
ggplot(as.data.frame(simulations),aes(x=simulations))+geom_histogram(fill="steelblue",bins = 200)+
theme_bw()+
theme(text = element_text(size=20))+
ylab("Frequency")+
xlab("Mean betweenness")+
annotate("segment", x=mean(between_essential), xend=mean(between_essential), y=5, yend=0, color="red", size=2, arrow=arrow())+
annotate("text", x = mean(between_essential), y =6, label = "Essential genes")
### the percentage of partners being essential
partners_non_essential <-
### 6.pathogen genes analysis
### pathogen genes analysis
##### 6.1 indivudule kegg pathway validation
require(pathview)
library(org.At.tair.db)
xx<- as.list(org.At.tairPATH2TAIR)
focused_kegg_Pathway<-"04626"
#sub_g<-induced_subgraph(g,vids = intersect(xx[[focused_kegg_Pathway]],names(V(g))))
#ath.dat.kegg <- sim.mol.data(mol.type="gene",id.type="tair",species="ath",nmol=3000)
#pv.out <- pathview(gene.data = ath.dat.kegg, gene.idtype="tair",pathway.id = focused_kegg_Pathway, species = "ath", out.suffix = "ath.kegg",kegg.native = T, same.layer=T)
pathogen_degree<- degree(g,v = intersect(xx[[focused_kegg_Pathway]],names(V(g))))
pathogen_genes<-intersect(xx[[focused_kegg_Pathway]],names(V(g)))
fun <- function(x){
require(igraph)
rand_genes<-sample(names(V(g)),length(pathogen_genes),replace = FALSE)
degree_random<-degree(g,rand_genes)
return(mean(as.numeric(degree_random)))
}
library(parallel)
system.time(simulations<-mclapply(1:1000,fun,mc.cores = 7))
simulations<-unlist(simulations)
ggplot(as.data.frame(simulations),aes(x=simulations))+geom_histogram(fill="steelblue")+
theme_bw()+
theme(text = element_text(size=20))+
ylab("Frequency")+
xlab("Mean degree")+
annotate("segment", x=102, xend=102, y=25, yend=0, color="black", size=2, arrow=arrow())
#random_degree <- degree(g,v = sample(names(V(g)),length(names(pathogen_degree)),replace = FALSE))
#boxplot(list(pathogen_degree,random_degree),names=c("Pathogen genes","Random"))
##6.2betweenness
between_pathogen <- IGFN_betweenness$betweenness[IGFN_betweenness$genes%in%pathogen_genes] ## long time
mean(between_pathogen)
#degree_random <- degree(g,v = sample(names(V(g)),length(essential_genes),replace = FALSE)) # long time
fun <- function(x){
require(igraph)
rand_genes<-sample(names(V(g)),length(pathogen_genes),replace = FALSE)
betweenness_random<-IGFN_betweenness$betweenness[IGFN_betweenness$genes%in%rand_genes]
return(mean(betweenness_random))
}
library(parallel)
system.time(simulations<-mclapply(1:1000,fun,mc.cores = 7))
simulations<-unlist(simulations)
ggplot(as.data.frame(simulations),aes(x=simulations))+geom_histogram(fill="steelblue",bins = 50)+
theme_bw()+
theme(text = element_text(size=20))+
ylab("Frequency")+
xlab("Mean betweenness")+
annotate("segment", x=mean(between_pathogen), xend=mean(between_pathogen), y=25, yend=0, color="red", size=2, arrow=arrow())
###6.3 eigenvector
eigenvector <-eigen_centrality(g,weights = NA)
pathogen_eigenvector<-eigenvector$vector[names(pathogen_degree)]
fun <- function(x){
require(igraph)
return(mean(sample(size = 1000,x = eigenvector$vector)))
}
library(parallel)
system.time(simulations<-mclapply(1:1000,fun,mc.cores = 7))
simulations<-unlist(simulations)
ggplot(as.data.frame(simulations),aes(x=simulations))+geom_histogram(fill="steelblue")+
theme(text = element_text(size=20))+
ylab("Frequency")+
xlab("Eigenvector")+
annotate("segment", x=102, xend=102, y=25, yend=0, color="black", size=2, arrow=arrow())
#random_eigenvector <-eigenvector$vector[names(betweenness_random)]
#boxplot(list(log(pathogen_eigenvector,2),log(random_eigenvector,2)),names=c("Pathogen genes","Random"))
#####7.15 imprinted genes and pathogen genes
#### imprinted genes and pathogen genes shortest path
PATHOGEN2IGs <-shortest.paths(graph = g,v = imprinted_genes_in_network,pathogen_genes,weights = NA)
PATHOGEN2IGs<-PATHOGEN2IGs[upper.tri(PATHOGEN2IGs)]
PATHOGEN2IGs_P <-shortest.paths(graph = g,v = paternal_imprint,pathogen_genes,weights = NA)
PATHOGEN2IGs_P <- PATHOGEN2IGs_P[upper.tri(PATHOGEN2IGs_P)]
PATHOGEN2IGs_M <-shortest.paths(graph = g,v = maternal_imprint,pathogen_genes,weights = NA)
PATHOGEN2IGs_M<-PATHOGEN2IGs_M[upper.tri(PATHOGEN2IGs_M)]
fun <- function(x){
require(igraph)
rand_genes<-sample(names(V(g)),length(pathogen_genes),replace = FALSE)
d_random<-shortest.paths(g,imprinted_genes_in_network, rand_genes,weights = NA)
d_random<-d_random[upper.tri(d_random)]
return(mean(d_random[is.finite(d_random)]))
}
library(parallel)
system.time(simulations<-mclapply(1:1000,fun,mc.cores = 7))
simulations<-unlist(simulations)
ggplot(as.data.frame(simulations),aes(x=simulations))+geom_histogram(fill="steelblue",bins = 50)+
theme_bw()+
theme(text = element_text(size=20))+
ylab("Frequency")+
xlab("Mean shortest path to pathogen related genes")+
#xlim(3,4)+
annotate("segment", x=mean(PATHOGEN2IGs[is.finite(PATHOGEN2IGs)]), xend=mean(PATHOGEN2IGs[is.finite(PATHOGEN2IGs)]), y=25, yend=0, color="black", size=1, arrow=arrow(length = unit(0.3, "cm")))+
annotate("text", x = mean(PATHOGEN2IGs[is.finite(PATHOGEN2IGs)]), y = 30, label = "IGs")+
annotate("segment", x=mean(PATHOGEN2IGs_P[is.finite(PATHOGEN2IGs_P)]), xend=mean(PATHOGEN2IGs_P[is.finite(PATHOGEN2IGs_P)]), y=25, yend=0, color="blue", size=1, arrow=arrow(length = unit(0.3, "cm")))+
annotate("text", x = mean(PATHOGEN2IGs_P[is.finite(PATHOGEN2IGs_P)]), y = 30, label = "PEGs")+
annotate("segment", x=mean(PATHOGEN2IGs_M[is.finite(PATHOGEN2IGs_M)]), xend=mean(PATHOGEN2IGs_M[is.finite(PATHOGEN2IGs_M)]), y=25, yend=0, color="tomato", size=1, arrow=arrow(length = unit(0.3, "cm")))+
annotate("text", x = mean(PATHOGEN2IGs_M[is.finite(PATHOGEN2IGs_M)]), y = 30, label = "MEGs")
### direct interaction pathogen genes
imprint_neighbours<-unique(names(unlist(neighborhood(graph = g,order = 1,nodes = imprinted_genes_in_network))))
imprint_neighbours_pathogen<-intersect(imprint_neighbours,pathogen_genes)
sub_gg<-induced_subgraph(graph = g,vids = imprinted_genes,imprint_neighbours_pathogen)
plot_personal_graph<-function(graph_object){
V(graph_object)$color[names(V(graph_object))%in%maternal_imprint]<-"tomato" # vertex color
V(graph_object)$color[names(V(graph_object))%in%paternal_imprint]<-"steelblue"
V(graph_object)$color[!names(V(graph_object))%in%imprinted_genes_in_network]<-"black"
V(graph_object)$size <- log(degree(graph_object),2)*2 # vertex size
V(graph_object)$label.cex <- 0.5 # vertex label size
V(graph_object)$label.color<-"black"
E(graph_object)$color <- "gray" # edge color
E(graph_object)$width=1
#V(graph_object)$label=NA
plot.igraph(graph_object,layout=layout_as_tree,vertex.frame.color= "white")
legend('topleft',
legend=c("Maternal","Paternal","Pathogen"),
pch=19, #shape
box.lty=2, #
pt.cex= 3, #lines size
cex=1, #box size
col=c("tomato","steelblue","black"),
y.intersp=1.5)
}
plot_personal_graph(graph_object = sub_gg)
## maximum component size
nodes_in_maximum<-names(components(sub_gg)$membership)[components(sub_gg)$membership==1]
sub_ggg<-induced_subgraph(graph = g,vids = nodes_in_maximum)
###7.155 immune related genes (new from network)
immune_related_genes <- read.table("myresearch/network/data/900immune_related.txt",header = TRUE,sep="\t",stringsAsFactors = FALSE)
immune_related_genes <-unique(c(immune_related_genes$ida,immune_related_genes$idb))
immune_related_genes_in_network<-intersect(immune_related_genes,names(V(g)))
### degree simulation
fun <- function(x){
require(igraph)
rand_genes<-sample(names(V(g)),length(immune_related_genes_in_network),replace = FALSE)
degree_random<-degree(g,rand_genes)
return(mean(as.numeric(degree_random)))
}
library(parallel)
system.time(simulations<-mclapply(1:1000,fun,mc.cores = 60))
simulations<-unlist(simulations)
ggplot(as.data.frame(simulations),aes(x=simulations))+geom_histogram(fill="steelblue",bins = 200)+
theme_bw()+
theme(text = element_text(size=20))+
ylab("Frequency")+
xlab("Mean degree")+
annotate("segment", x=62, xend=62, y=5, yend=0, color="black", size=2, arrow=arrow())+
annotate("text", x = 62, y = 6, label = "Immune genes")
### betweenness
between_pathogen <- IGFN_betweenness$betweenness[IGFN_betweenness$genes%in%immune_related_genes_in_network] ## long time
mean(between_pathogen)
#degree_random <- degree(g,v = sample(names(V(g)),length(essential_genes),replace = FALSE)) # long time
fun <- function(x){
require(igraph)
rand_genes<-sample(names(V(g)),length(immune_related_genes_in_network),replace = FALSE)
betweenness_random<-IGFN_betweenness$betweenness[IGFN_betweenness$genes%in%rand_genes]
return(mean(betweenness_random))
}
library(parallel)
system.time(simulations<-mclapply(1:1000,fun,mc.cores = 60))
simulations<-unlist(simulations)
ggplot(as.data.frame(simulations),aes(x=simulations))+geom_histogram(fill="steelblue",bins = 200)+
theme_bw()+
theme(text = element_text(size=20))+
ylab("Frequency")+
xlab("Mean betweenness")+
annotate("segment", x=mean(between_pathogen), xend=mean(between_pathogen), y=5, yend=0, color="red", size=2, arrow=arrow())+
annotate("text", x = mean(between_pathogen), y = 6, label = "Immune genes")
###### imprinted genes and pathogen genes shortest path
PATHOGEN2IGs <-shortest.paths(graph = g,v = imprinted_genes_in_network,immune_related_genes_in_network,weights = NA)
PATHOGEN2IGs<-PATHOGEN2IGs[upper.tri(PATHOGEN2IGs)]
PATHOGEN2IGs_P <-shortest.paths(graph = g,v = paternal_imprint,immune_related_genes_in_network,weights = NA)
PATHOGEN2IGs_P <- PATHOGEN2IGs_P[upper.tri(PATHOGEN2IGs_P)]
PATHOGEN2IGs_M <-shortest.paths(graph = g,v = maternal_imprint,immune_related_genes_in_network,weights = NA)
PATHOGEN2IGs_M<-PATHOGEN2IGs_M[upper.tri(PATHOGEN2IGs_M)]
fun <- function(x){
require(igraph)
rand_genes<-sample(names(V(g)),length(immune_related_genes_in_network),replace = FALSE)
d_random<-shortest.paths(g,imprinted_genes_in_network, rand_genes,weights = NA)
d_random<-d_random[upper.tri(d_random)]
return(mean(d_random[is.finite(d_random)]))
}
library(parallel)
system.time(simulations<-mclapply(1:1000,fun,mc.cores = 60))
simulations<-unlist(simulations)
ggplot(as.data.frame(simulations),aes(x=simulations))+geom_histogram(fill="steelblue",bins = 200)+
theme_bw()+
theme(text = element_text(size=20))+
ylab("Frequency")+
xlab("Mean shortest path to pathogen related genes")+
#xlim(3,4)+
annotate("segment", x=mean(PATHOGEN2IGs[is.finite(PATHOGEN2IGs)]), xend=mean(PATHOGEN2IGs[is.finite(PATHOGEN2IGs)]), y=5, yend=0, color="black", size=1, arrow=arrow(length = unit(0.3, "cm")))+
annotate("text", x = mean(PATHOGEN2IGs[is.finite(PATHOGEN2IGs)]), y = 6, label = "IGs")+
annotate("segment", x=mean(PATHOGEN2IGs_P[is.finite(PATHOGEN2IGs_P)]), xend=mean(PATHOGEN2IGs_P[is.finite(PATHOGEN2IGs_P)]), y=5, yend=0, color="blue", size=1, arrow=arrow(length = unit(0.3, "cm")))+
annotate("text", x = mean(PATHOGEN2IGs_P[is.finite(PATHOGEN2IGs_P)]), y = 6, label = "PEGs")+
annotate("segment", x=mean(PATHOGEN2IGs_M[is.finite(PATHOGEN2IGs_M)]), xend=mean(PATHOGEN2IGs_M[is.finite(PATHOGEN2IGs_M)]), y=5, yend=0, color="tomato", size=1, arrow=arrow(length = unit(0.3, "cm")))+
annotate("text", x = mean(PATHOGEN2IGs_M[is.finite(PATHOGEN2IGs_M)]), y = 6, label = "MEGs")
### visulazation
### direct interaction pathogen genes
imprint_neighbours<-unique(names(unlist(neighborhood(graph = g,order = 1,nodes = imprinted_genes_in_network))))
imprint_neighbours_pathogen<-intersect(imprint_neighbours,immune_related_genes_in_network)
sub_gg<-induced_subgraph(graph = g,vids = c(imprinted_genes_in_network,imprint_neighbours_pathogen))
plot_personal_graph<-function(graph_object){
V(graph_object)$color[names(V(graph_object))%in%maternal_imprint]<-"tomato" # vertex color
V(graph_object)$color[names(V(graph_object))%in%paternal_imprint]<-"steelblue"
V(graph_object)$color[!names(V(graph_object))%in%imprinted_genes_in_network]<-"lightgreen"
V(graph_object)$size <- log(degree(graph_object),2)*2 # vertex size
V(graph_object)$label.cex <- 0.5 # vertex label size
V(graph_object)$label.color<-"black"
E(graph_object)$color <- "gray" # edge color
E(graph_object)$width=1
#V(graph_object)$label=NA
plot.igraph(graph_object,layout=layout_as_tree,vertex.frame.color= "white")
legend('topleft',
legend=c("Maternal","Paternal","Pathogen"),
pch=19, #shape
box.lty=2, #
pt.cex= 3, #lines size
cex=1, #box size
col=c("tomato","steelblue","lightgreen"),
y.intersp=1.5)
}
nodes_in_maximum<-names(components(sub_gg)$membership)[components(sub_gg)$membership==1]
sub_ggg<-induced_subgraph(graph = g,vids = nodes_in_maximum)
#plot_personal_graph(graph_object = sub_ggg)
## 7.16: cluster_one parse with kegg
#7.16: cluster_one parse with kegg
require(org.At.tair.db)
xx<- as.list(org.At.tairPATH2TAIR)
dat <- readLines("4network_analysis_result/2cluster_one/cluster_one_out.txt")
names(dat)<-seq(1,length(dat))
dat <- strsplit(dat, "\t")
module_genes<-unique(unlist(dat))
for (name in names(xx)){ # for loop kegg
message(c(name,",",length(xx[[name]]),",",length(intersect(xx[[name]],module_genes))))
}
for (name in names(dat)){
if (length(intersect(imprinted_genes_in_network,dat[[name]]))>0){
message(name,",",length(intersect(imprinted_genes_in_network,dat[[name]])))
}
}
## focues group
module_genes_focus<-dat$`908`
for (name in names(xx)){ # for loop kegg
if (length(intersect(module_genes_focus,xx[[name]]))>0){
message(c(name,",",length(intersect(module_genes_focus,xx[[name]]))))
}
}
## 7.17: methylation related genes
#7.16: methylation related genes
methy_genes<-read.csv("other_resources/methylation_related_genes.csv",header = FALSE,stringsAsFactors = FALSE)
IGs2Methy<-shortest.paths(graph = g,v = methy_genes$V2,to = imprinted_genes_in_network,weights = NA)
IGs2Methy<-IGs2Methy[is.finite(IGs2Methy)]
PEGs2Methy<-shortest.paths(graph = g,v = methy_genes$V2,to = paternal_imprint,weights = NA)
PEGs2Methy<-PEGs2Methy[is.finite(PEGs2Methy)]
MEGs2Methy<-shortest.paths(graph = g,v = methy_genes$V2,to = maternal_imprint,weights = NA)
MEGs2Methy<-MEGs2Methy[is.finite(MEGs2Methy)]
fun <- function(x){
require(igraph)
rand_genes<-sample(names(V(g)),length(methy_genes),replace = FALSE)
d_random<-shortest.paths(g,imprinted_genes_in_network, rand_genes,weights = NA)
return(mean(as.numeric(d_random)[is.finite(as.numeric(d_random))]))
}
library(parallel)
library(ggplot2)
system.time(simulations<-mclapply(1:1000,fun,mc.cores = 7))
simulations<-unlist(simulations)
ggplot(as.data.frame(simulations),aes(x=simulations))+geom_histogram(fill="steelblue",bins = 50)+
theme_bw()+
theme(text = element_text(size=20))+
ylab("Frequency")+
xlab("Mean shortest path")+
annotate("segment", x=mean(IGs2Methy), xend=mean(IGs2Methy), y=25, yend=0, color="black", size=2, arrow=arrow())+
annotate("text", x = mean(IGs2Methy), y = 30, label = "IGs")+
annotate("segment", x=mean(PEGs2Methy[is.finite(PEGs2Methy)]), xend=mean(PEGs2Methy[is.finite(PEGs2Methy)]), y=25, yend=0, color="blue", size=2, arrow=arrow())+
annotate("text", x = mean(PEGs2Methy[is.finite(PEGs2Methy)]), y = 30, label = "PEGs")+
annotate("segment", x=mean(MEGs2Methy[is.finite(MEGs2Methy)]), xend=mean(MEGs2Methy[is.finite(MEGs2Methy)]), y=25, yend=0, color="tomato", size=2, arrow=arrow())+
annotate("text", x = mean(MEGs2Methy[is.finite(MEGs2Methy)]), y = 30, label = "MEGs")
ggplot(as.data.frame(simulations),aes(x=simulations))+geom_histogram(fill="steelblue",bins = 50)+
theme_bw()+
theme(text = element_text(size=20))+
ylab("Frequency")+
xlab("Mean shortest path")+
annotate("segment", x=median(IGs2Methy), xend=median(IGs2Methy), y=25, yend=0, color="black", size=2, arrow=arrow())+
annotate("text", x = median(IGs2Methy), y = 30, label = "IGs")+
annotate("segment", x=median(PEGs2Methy[is.finite(PEGs2Methy)]), xend=median(PEGs2Methy[is.finite(PEGs2Methy)]), y=25, yend=0, color="blue", size=2, arrow=arrow())+
annotate("text", x = median(PEGs2Methy[is.finite(PEGs2Methy)]), y = 30, label = "PEGs")+
annotate("segment", x=median(MEGs2Methy[is.finite(MEGs2Methy)]), xend=median(MEGs2Methy[is.finite(MEGs2Methy)]), y=25, yend=0, color="tomato", size=2, arrow=arrow())+
annotate("text", x = median(MEGs2Methy[is.finite(MEGs2Methy)]), y = 30, label = "MEGs")
## 7.18 paralogs genes
##paralogs genes
paralogs_genes <-read.table("/home/wuzefeng/MyResearch/networks/2network_prediction/other_resources/paralogs/90IGs_paralogs.txt",header = TRUE,stringsAsFactors = FALSE,sep = "\t")
paralogs_genes<-unique(paralogs_genes$paralogue_genes)[unique(paralogs_genes$paralogue_genes) %in% names(V(g))][!unique(paralogs_genes$paralogue_genes)[unique(paralogs_genes$paralogue_genes) %in% names(V(g))]%in% imprinted_genes_in_network]
## degree comparison
require(ggsignif)
df<-data.frame(degree=c(degree(g,imprinted_genes_in_network),degree(g,paralogs_genes)),
class=c(rep("IGs",length(imprinted_genes_in_network)),rep("IGPs",length(paralogs_genes))),stringsAsFactors = FALSE)
df$class<-factor(df$class,levels = c("IGs","IGPs"))
p_degree<-ggplot(df,aes(y=degree,x=class,fill=class))+geom_boxplot()+theme_bw(base_size = 20)+theme(legend.position = "none")+geom_signif(comparisons = list(c("IGs","IGPs")),test.args = "greater")+xlab("")+ylab("Degree")+scale_fill_manual(values = c("#E69F00", "#56B4E9"))
#### betweenness
df<-data.frame(betweenness=c(imprinted_betweenness,IGFN_betweenness$betweenness[IGFN_betweenness$genes%in%paralogs_genes]),
class=c(rep("IGs",length(imprinted_genes_in_network)),rep("IGPs",length(paralogs_genes))),stringsAsFactors = FALSE)
df$class<-factor(df$class,levels = c("IGs","IGPs"))
p_betweenness<-ggplot(df,aes(y=betweenness,x=class,fill=class))+geom_boxplot()+theme_bw(base_size = 20)+geom_signif(comparisons = list(c("IGs","IGPs")),test.args = "greater")+xlab("")+ylab("Betweenness")+theme(legend.position = "none")+scale_fill_manual(values = c("#E69F00", "#56B4E9"))
### closeness
df<-data.frame(closeness=c(closeness(g,imprinted_genes_in_network),closeness(g,paralogs_genes)),
class=c(rep("IGs",length(imprinted_genes_in_network)),rep("IGPs",length(paralogs_genes))),stringsAsFactors = FALSE)
df$class<-factor(df$class,levels = c("IGs","IGPs"))
p_closeness<-ggplot(df,aes(y=closeness,x=class,fill=class))+geom_boxplot()+theme_bw(base_size = 20)+geom_signif(comparisons = list(c("IGs","IGPs")),test.args = "greater")+xlab("")+ylab("Closeness")+theme(legend.position = "none")+scale_fill_manual(values = c("#E69F00", "#56B4E9"))
library(gridExtra)
grid.arrange(p_degree, p_betweenness, p_closeness,nrow=1, ncol=3)
### 8.1 famod plot high confidence
edge_list.df<-as.data.frame(as_edgelist(IGPN),stringsAsFactors = FALSE)
colnames(edge_list.df)<-c("X1","X2")
gene_list<-unique(c(edge_list.df$X1,edge_list.df$X2)) #2746
gene_list_numbers <-seq(0,length(gene_list)-1)
gene_name2number<-data.frame(gene_list,gene_list_numbers,stringsAsFactors = FALSE)
edge_list.df$g1<-gene_name2number$gene_list_numbers[match(edge_list.df$X1,gene_name2number$gene_list)]
edge_list.df$g2<-gene_name2number$gene_list_numbers[match(edge_list.df$X2,gene_name2number$gene_list)]
imprinted_genes$color<-ifelse(imprinted_genes$V2=="m",1,2)
edge_list.df$c1<- imprinted_genes$color[match(edge_list.df$X1,imprinted_genes$V1)]
edge_list.df$c2<- imprinted_genes$color[match(edge_list.df$X2,imprinted_genes$V1)]
edge_list.df$c1[is.na(edge_list.df$c1)]=0
edge_list.df$c2[is.na(edge_list.df$c2)]=0
fanmond_input<-edge_list.df[,c(3,4,5,6)]
write.table(fanmond_input,"4network_analysis_result/2low_confidence_imp_result/network_with_low_imp.txt",row.names = FALSE,col.names = FALSE,quote = FALSE,sep = "\t")
## plot
m<-read.table("/home/wuzefeng/MyResearch/networks/2network_prediction/4network_analysis_result/2high_confidence_imp_result/imp_fanmod/motif.txt",sep="\t",header = TRUE)
m$num<-seq(1,nrow(m))
m$nz<-m$Z.Score/sqrt(sum(m$Z.Score^2))
ggplot(m,aes(x=num,y=nz,group=1))+ylim(-0.8,0.8)+
geom_vline(xintercept = 0)+
theme_minimal(base_size = 20)+
scale_x_discrete(limits=seq(1,28),labels=seq(1,28))+xlab("")+
ylab("Normalized Z-score")+
geom_rect(aes(xmin=m$num-0.5,
xmax=m$num+0.5,
ymin=-Inf,
ymax=Inf),
fill = rep(c("gray70","#F8F8FF"),14))+
geom_line(color="steelblue")+
geom_point(size=6, shape=20,color="steelblue")+
geom_hline(yintercept = 0)
### enrich motif instance extract
### motif instance extract
system("grep 011111112 fanmod_input.txt.csv.dump | awk 'BEGIN{FS=","}{print $2,$3,$4}' > 2motif18/motif18.txt")
focused_motif<- read.table("../2network_prediction/4network_analysis_result/2high_confidence_imp_result/imp_fanmod/2motif18/motif18.txt")
focused_motif$V4<-gene_name2number$gene_list[match(focused_motif$V1,gene_name2number$gene_list_numbers)]
focused_motif$V5<-gene_name2number$gene_list[match(focused_motif$V2,gene_name2number$gene_list_numbers)]
focused_motif$V6<-gene_name2number$gene_list[match(focused_motif$V3,gene_name2number$gene_list_numbers)]
class_genes<-function(x){
if (x %in% paternal_imprint){
return("P")
}
if (x %in% maternal_imprint){
return("M")
}
if (! x %in% paternal_imprint & ! x %in% paternal_imprint){
return("N")
}
}
focused_motif$V7<-unlist(lapply(focused_motif$V4,class_genes))
focused_motif$V8<-unlist(lapply(focused_motif$V5,class_genes))
focused_motif$V9<-unlist(lapply(focused_motif$V6,class_genes))
focused_motif_sort<-as.data.frame(t(apply(focused_motif,1,function(x)x[c(4,5,6)][order(x[c(7,8,9)])])),stringsAsFactors = FALSE)
colnames(focused_motif_sort)<-sort(c("M","N","P"))
write.table(focused_motif_sort,file="4network_analysis_result/2high_confidence_imp_result/imp_fanmod/2motif18/2motif_18.txt",sep="\t",quote = FALSE,row.names = FALSE,col.names = TRUE)
### low_confidence imp export famond
edge_list.df<-as.data.frame(as_edgelist(IGPN),stringsAsFactors = FALSE)
colnames(edge_list.df)<-c("X1","X2")
gene_list<-unique(c(edge_list.df$X1,edge_list.df$X2)) #2746
gene_list_numbers <-seq(0,length(gene_list)-1)
gene_name2number<-data.frame(gene_list,gene_list_numbers,stringsAsFactors = FALSE)
edge_list.df$g1<-gene_name2number$gene_list_numbers[match(edge_list.df$X1,gene_name2number$gene_list)]
edge_list.df$g2<-gene_name2number$gene_list_numbers[match(edge_list.df$X2,gene_name2number$gene_list)]
imprinted_genes<-read.table("~/MyResearch/Imprinting_prediction/imprint_gene_list/6.1imprinted.list_ara",stringsAsFactors = FALSE)
imprinted_genes$color<-ifelse(imprinted_genes$V3=="m",1,2)
edge_list.df$c1<- imprinted_genes$color[match(edge_list.df$X1,imprinted_genes$V1)]
edge_list.df$c2<- imprinted_genes$color[match(edge_list.df$X2,imprinted_genes$V1)]
edge_list.df$c1[is.na(edge_list.df$c1)]=0
edge_list.df$c2[is.na(edge_list.df$c2)]=0
fanmond_input<-edge_list.df[,c(3,4,5,6)]
write.table(fanmond_input,"4network_analysis_result/2low_confidence_imp_result/network_with_low_imp.txt",row.names = FALSE,col.names = FALSE,quote = FALSE,sep = "\t")
## plot
m1<-read.table("/home/wuzefeng/MyResearch/networks/2network_prediction/4network_analysis_result/2low_confidence_imp_result/famod/2motfi_enrich.txt",sep="\t",header = TRUE)
m1$nz<-m1$Z.Score/sqrt(sum(m1$Z.Score^2))
m1<-m1[match(m$Adj,m1$Adj),]
m1$num<-seq(1,nrow(m1))
ggplot(m1,aes(x=num,y=nz,group=1))+ylim(-0.8,0.8)+
geom_vline(xintercept = 0)+
theme_minimal(base_size = 20)+
scale_x_discrete(limits=seq(1,28),labels=seq(1,28))+xlab("")+
ylab("Normalized Z-score")+
geom_rect(aes(xmin=m1$num-0.5,
xmax=m1$num+0.5,
ymin=-Inf,
ymax=Inf),
fill = rep(c("gray70","#F8F8FF"),14))+
geom_line(color="steelblue")+
geom_point(size=6, shape=20,color="steelblue")+
geom_hline(yintercept = 0)
### motif analysis
# count interecting peg and meg for each partner
motif_peg <-c()
motif_meg <-c()
peg_threshold = 1
meg_threshold = 1
partners_peg_meg<-list()
for (p in partners){
neib<-names(neighbors(graph = g,v = p))
neib_peg <-intersect(paternal_imprint, neib)
neib_meg <-intersect(maternal_imprint, neib)
message("Neighbors number is: ",length(neib))
message("-----PEGs Neighbors number is: ",length(neib_peg))
message("-----MEGs Neighbors number is: ",length(neib_meg))
motif_peg<-c(motif_peg,length(neib_peg))
motif_meg <-c(motif_meg, length(neib_meg))
if (length(neib_meg)>=1 & length(neib_peg)>=1){
edge_combins <- expand.grid(neib_meg,neib_peg)
colnames(edge_combins)<-c("MEG","PEG")
edge_combins$Partner<-p
partners_peg_meg[[p]]<-edge_combins
}
}
partners_peg_meg = do.call(rbind, partners_peg_meg)
partners_peg_meg$link <-apply(partners_peg_meg,1,function(x) ifelse(are.connected(g,x[1],x[2]),1,0)) # test link or not link between peg and meg
selected_partners<-partners_peg_meg%>%group_by(Partner,add = TRUE)%>%summarise(total = n(), type = n_distinct(link))%>%filter(type>1) # keep both peg-meg linked and peg-med unlinked two types
selected_partners_peg_meg<-subset(partners_peg_meg,partners_peg_meg$Partner%in%selected_partners$Partner)
### import dn/ds value
#sta<-selected_partners_peg_meg%>%group_by(Partner,link)%>%summarise(n=n()) # numnber of linked and non-linked PEG-MEG for each partner
#plot(sta$n[sta$link==0],sta$n[sta$link!=0])
dnds<-read.table("myresearch/network/data/TAIR2lyrata.ds.mart_export.txt",sep="\t",header = TRUE,stringsAsFactors = FALSE)
dnds<-dnds[complete.cases(dnds),]
dnds<-unique(dnds)
dnds<-dnds %>% group_by(Gene.stable.ID) %>% slice(which.min(dS.with.Arabidopsis.lyrata)) # select minumum ds for same genes
dnds$dnds<-dnds$dN.with.Arabidopsis.lyrata/dnds$dS.with.Arabidopsis.lyrata
rownames(dnds)<-dnds$Gene.stable.ID
selected_partners_peg_meg$delta_dnds<-apply(selected_partners_peg_meg,1,function(x) abs(dnds[x[1],]$dnds-dnds[x[2],]$dnds))
selected_partners_peg_meg<-selected_partners_peg_meg[complete.cases(selected_partners_peg_meg),]
require(ggsignif)
ggplot(selected_partners_peg_meg,aes(x=as.factor(link),y=delta_dnds))+
geom_boxplot()+
theme_bw()+
theme(text = element_text(size = 20))+
xlab("class")+
scale_x_discrete(labels=c("Non-linked PEG-MEG", "Linked PEG-MEG"))+
geom_signif(comparisons = list(c("0","1")),test = "wilcox.test",map_signif_level = "FALSE")
### HIC data analysis
### hic data analysis
# 1.1 find interacting impritned gene paris by katyoplote
library(GenomicInteractions)
require(GenomicFeatures)
require(karyoploteR)
tr<-makeTxDbFromGFF("~/MyResearch/genome.db/TAIR/gtf/Arabidopsis_thaliana.TAIR10.31.gtf",format = "gtf")
genes<-genes(tr)
GenomeInteract<-function(interaction_file="other_resources/HIC/sigInteractions_10kb.csv"){
ara_intra_hic<-read.csv(interaction_file)
region1<-GRanges(seqnames = ara_intra_hic$chrom_left,ranges = IRanges(start = ara_intra_hic$start_left,end = ara_intra_hic$end_left))
region2<-GRanges(seqnames = ara_intra_hic$chrom_right,ranges = IRanges(start = ara_intra_hic$start_right,end = ara_intra_hic$end_right))
gi <- GInteractions(region1, region2)
gi$reads<-ara_intra_hic$Reads.counts
gi$p_value<-ara_intra_hic$p.value
return(gi)
}
GI_intra<-GenomeInteract(interaction_file="other_resources/HIC/sigInteractions_10kb.csv")
GI_inter <-GenomeInteract(interaction_file = "other_resources/HIC/sigInterChrInteractions_10kb.csv")
GI_all<- c(GI_intra,GI_inter) # all hic interaction
Interaction_analysis<-function(gene_list=imprinted_genes,Hic_interacts=GI_all){
IGs_ranges<-genes(tr)[gene_list]
##### genes overlaps with interacting paris
overlaps1<-findOverlaps(query = IGs_ranges,subject = Hic_interacts,use.region="first") # left overlap
message(c("Left overlapped gene number:",length(unique(queryHits(overlaps1)))))
overlaps2<-findOverlaps(query = IGs_ranges,subject = Hic_interacts,use.region="second") # right overlaps
message(c("Right overlapped gene number:",length(unique(queryHits(overlaps2)))))
## find target interaction pairs
interac_freq<-sort(table(c(unique(subjectHits(overlaps1)),unique(subjectHits(overlaps2)))),decreasing = TRUE)
interac_freq<-interac_freq[interac_freq>1]
message(c("Interaction gene pairs:",length(interac_freq)))
### visuliaztion (mapping interacting genes into interaction pairs)
ara_pos<-read.table("/home/wuzefeng/MyResearch/genome.db/TAIR/dna/chrom_size")
ara_pos$start=1
colnames(ara_pos)<-c("chr","end","start")
ara_pos<-ara_pos[,c(1,3,2)]
ara_pos<-ara_pos[order(ara_pos$chr),]
kp <- plotKaryotype(genome = toGRanges(ara_pos),plot.type = 1)
## get interaction genes
for (name in names(interac_freq)){
target_IG_left <- IGs_ranges[queryHits(overlaps1)[subjectHits(overlaps1)==name]]
target_IG_right <- IGs_ranges[queryHits(overlaps2)[subjectHits(overlaps2)==name]]
genes_interacts<-data.frame(left = target_IG_left,right = target_IG_right)
colnames(genes_interacts)[7:9]<-c("link.chr","link.start","link.end")
#print(genes_interacts)
genes_interacts<-toGRanges(genes_interacts[,c(1,2,3,7,8,9,11,6,12)])
strand(genes_interacts)<-strand(target_IG_left)
kpPlotLinks(kp,genes_interacts,arch.height = 0.8,col="blue")
kpText(kp,data = target_IG_left,labels = target_IG_left$gene_id,y = seq(target_IG_left$gene_id)*0.1,col="tomato",cex=0.8)
kpText(kp,data = target_IG_right,labels = target_IG_right$gene_id,y = -(seq(target_IG_right$gene_id))*0.5,col="tomato",cex=0.8)
}
return(length(interac_freq))
}
Interaction_analysis(gene_list = imprinted_genes,Hic_interacts=GI_all)
Interaction_analysis_extend_promoter<-function(gene_list=imprinted_genes,Hic_interacts=GI_all){
IGs_GR<-genes(tr)[gene_list]
IGs_ranges<-punion(promoters(IGs_GR,upstream = 1000),IGs_GR)
IGs_ranges$gene_id<-names(IGs_ranges)
##### genes overlaps with interacting paris
overlaps1<-findOverlaps(query = IGs_ranges,subject = Hic_interacts,use.region="first")
message(c("Left overlapped gene number:",length(unique(queryHits(overlaps1)))))
overlaps2<-findOverlaps(query = IGs_ranges,subject = Hic_interacts,use.region="second")
message(c("Right overlapped gene number:",length(unique(queryHits(overlaps2)))))
## find target interaction pairs
interac_freq<-sort(table(c(unique(subjectHits(overlaps1)),unique(subjectHits(overlaps2)))),decreasing = TRUE)
interac_freq<-interac_freq[interac_freq>1]
message(c("Interaction gene pairs:",length(interac_freq)))
### visuliaztion (mapping interacting genes into interaction pairs)
ara_pos<-read.table("/home/wuzefeng/MyResearch/genome.db/TAIR/dna/chrom_size")
ara_pos$start=1
colnames(ara_pos)<-c("chr","end","start")
ara_pos<-ara_pos[,c(1,3,2)]
ara_pos<-ara_pos[order(ara_pos$chr),]
kp <- plotKaryotype(genome = toGRanges(ara_pos),plot.type = 1)
interac_pairs<-list()
## get interaction genes
for (name in names(interac_freq)){
message(name)
target_IG_left <- IGs_ranges[queryHits(overlaps1)[subjectHits(overlaps1)==name]]
target_IG_right <- IGs_ranges[queryHits(overlaps2)[subjectHits(overlaps2)==name]]
genes_interacts<-data.frame(left = target_IG_left,right = target_IG_right)
interac_pairs[[name]]<-genes_interacts[,c(6,12)]
colnames(genes_interacts)[7:9]<-c("link.chr","link.start","link.end")
genes_interacts<-toGRanges(genes_interacts[,c(1,2,3,7,8,9,11,6,12)])
strand(genes_interacts)<-strand(target_IG_left)
kpPlotLinks(kp,genes_interacts,arch.height = 0.8,col="blue")
kpText(kp,data = target_IG_left,labels = target_IG_left$gene_id,y = seq(target_IG_left$gene_id)*0.1,col="tomato",cex=0.8)
kpText(kp,data = target_IG_right,labels = target_IG_right$gene_id,y = -(seq(target_IG_right$gene_id))*0.5,col="tomato",cex=0.8)
}
return(interac_pairs)
}
Interaction_analysis_extend_promoter(gene_list = imprinted_genes,Hic_interacts=GI_all)
#simulation to interacting pair number
Interaction_simulation_analysis<-function(gene_list=imprinted_genes,Hic_interacts=GI_all){
IGs_ranges<-genes(tr)[gene_list]
##### genes overlaps with interacting paris
overlaps1<-findOverlaps(query = IGs_ranges,subject = Hic_interacts,use.region="first")
message(c("Left overlapped gene number:",length(unique(queryHits(overlaps1)))))
overlaps2<-findOverlaps(query = IGs_ranges,subject = Hic_interacts,use.region="second")
message(c("Right overlapped gene number:",length(unique(queryHits(overlaps2)))))
## find target interaction pairs
interac_freq<-sort(table(c(unique(subjectHits(overlaps1)),unique(subjectHits(overlaps2)))),decreasing = TRUE)
interac_freq<-interac_freq[interac_freq>1]
message(c("Interaction gene pairs:",length(interac_freq)))
return(length(interac_freq))
}
simulations_genes_pairs<-c()
for(m in 1:1000){
message(m)
simulations_genes_pairs<-c(simulations_genes_pairs,Interaction_simulation_analysis(gene_list = sample(names(genes(tr)),length(imprinted_genes)),Hic_interacts=GI_all))
}
ggplot(data.frame(genes_pairs=simulations_genes_pairs,stringsAsFactors = FALSE),aes(x=genes_pairs))+
geom_histogram(bins = 100)+
theme_bw()+
theme(text = element_text(size = 20))+
annotate(geom = "segment",x = 8,xend = 8,y = 25,yend = 0,arrow=arrow(),color="red",size=2)+
annotate(geom="text",label="Mean", x = 8, y = 30,size=5)+
ylab("Frequency")+xlab("Interacting gene pairs")
## 1.2 (option) find interacting imprinted gene paris by circulize (circos)
ara_pos<-read.table("/home/wuzefeng/MyResearch/genome.db/TAIR/dna/chrom_size")
ara_pos$start=1
colnames(ara_pos)<-c("chr","end","start")
ara_pos<-ara_pos[,c(1,3,2)]
ara_pos<-ara_pos[order(ara_pos$chr),]
#####(i) initiate circos (order aware !!! (0:inner--->1:outer))
library(circlize)
circos.initializeWithIdeogram(ara_pos,plotType = NULL)
##### epxression
## expression
library(stringr)
IG_bed = genes[gene_label_list]
IG_bed<-as.data.frame(IG_bed)[,c(1,2,3)]
### (i) expression heatmap
tissues<-unique(str_sub(colnames(rna_expression),start = 1,end = -2))
tissue_gene_expression<-list()
for (tissue in tissues){
tissue_gene_expression[[tissue]]<-rowSums(rna_expression[,grep(tissue,colnames(rna_expression))])/3
}
gene_expression_bed<-do.call(cbind.data.frame, tissue_gene_expression)
gene_expression_bed_IGs<-gene_expression_bed[rownames(IG_bed),]
library(BBmisc)
gene_expression_bed_IGs<-t(apply(gene_expression_bed_IGs,1,function(x) normalize(x,range=c(-1,1),method="range")))
expression_IG_bed <- cbind(IG_bed,gene_expression_bed_IGs)
col_fun = colorRamp2(c(-1, 0, 1), c("RED", "black", "green"))
circos.genomicHeatmap(expression_IG_bed, col = col_fun, side = "outside", border = "white")
#######(ii) get interaction paris (bed file and corss links)
Interaction_analysis<-function(gene_list=imprinted_genes,Hic_interacts=GI_all){
IGs_ranges<-genes(tr)[gene_list]
##### genes overlaps with interacting paris
overlaps1<-findOverlaps(query = IGs_ranges,subject = Hic_interacts,use.region="first") # left overlap
message(c("Left overlapped gene number:",length(unique(queryHits(overlaps1)))))
overlaps2<-findOverlaps(query = IGs_ranges,subject = Hic_interacts,use.region="second") # right overlaps
message(c("Right overlapped gene number:",length(unique(queryHits(overlaps2)))))
## find target interaction pairs
interac_freq<-sort(table(c(unique(subjectHits(overlaps1)),unique(subjectHits(overlaps2)))),decreasing = TRUE)
interac_freq<-interac_freq[interac_freq>1]
message(c("Interaction gene pairs:",length(interac_freq)))
label_bed_datalist = list()
## get each interaction genes paris or links
for (name in names(interac_freq)){
target_IG_left <- IGs_ranges[queryHits(overlaps1)[subjectHits(overlaps1)==name]]
target_IG_right <- IGs_ranges[queryHits(overlaps2)[subjectHits(overlaps2)==name]]
## interactinf imprinted genes paris to two bed files
genes_interacts<-data.frame(left = target_IG_left,right = target_IG_right) # data frame is usful when one to multiple
bed_left <- genes_interacts[,1:6]
bed_right <- genes_interacts[,7:12]
## add links to circos
colnames(bed_left)<-colnames(bed_right)<-c("chr","start","end","value","strand","gene_ID")
circos.genomicLink(bed_left, bed_right, col = rand_color(nrow(bed_left), transparency = 0), border = "black",rou = 0.3)
## add label to bed file. (not loop to add label,because for each add, it will add a track)
label_bed_left<-unique(bed_left)
label_bed_right<-unique(bed_right)
label_bed_datalist[[name]]<-rbind(label_bed_left,label_bed_right)
}
label_bed = do.call(rbind, label_bed_datalist)
#print(head(label_bed))
circos.genomicLabels(bed = unique(label_bed),labels.column = 6,side = "outside",col=ifelse(unique(label_bed)$gene_ID%in%maternal_imprint,"tomato","blue"),labels_height = 0.19)
return(unique(label_bed$gene_ID))
}
gene_label_list<-Interaction_analysis()
##(iii) chromosomes
circos.track(ylim = c(0, 1), panel.fun = function(x, y) {
chr = CELL_META$sector.index
xlim = CELL_META$xlim
ylim = CELL_META$ylim
#circos.rect(xlim[1], 0, xlim[2], 1, col = rand_color(7))
circos.text(mean(xlim), mean(ylim), chr, cex = 1, col = "white",
facing = "inside", niceFacing = TRUE)
}, track.height = 0.15, bg.border = NA, bg.col=c("#8B6508", "#DEB887", "#458B74", "#483D8B", "#104E8B", "#CD9B1D", "#6495ED")
)
### gene density
circos.genomicDensity(as.data.frame(genes), window.size = 1e6,track.height = 0.15,bg.border="black")
#1.2
### hic tf-->impritned genes
tf_hic<-read.table("~/Desktop/imprinted_TF.txt",sep=",",stringsAsFactors = FALSE,header = TRUE)
library(gplots)
library(VennDiagram)
venn.diagram(
x = list(
IFGN = tf_hic$neigs,
HiC = rownames(mmm)
),
imagetype = "tiff",
filename = "./3D-quadruple_Venn.tiff",#-outfile-name--#
col = "black", #----border_colour#
lty='dotted',
lwd = 4,
fill = c("cornflowerblue", "green"),#filling colour,length=length(x)#
alpha = 0.50,
label.col = c("orange", "white", "darkorchid4"),#digit colour#
cex = 1.5,
fontfamily = "serif",
fontface = "bold",
cat.col = c("darkblue", "darkgreen"),
cat.cex = 1.5,
#cat.dist = c(0.25,0.25),
cat.fontfamily = "serif",
cat.dist = c(0.051,0.051),
main="TFs associated with imprinted genes",
main.pos= c(0.5,0.53),
margin=4)
##1.3 DNase-seq footprint and fimo TF-target gene network and hic enhancer
#### hiv plot for enhancer
promoter_tfs_of_dnase<-read.csv("other_resources/LZH_dnase/impGenePromoterTF.csv")
promoter_tfs_of_dnase$type<-"promoter"
colnames(promoter_tfs_of_dnase)<-c("a","b","type")
enhancer_tfs_of_dnase<-read.csv("other_resources/LZH_dnase/impGeneEnhancerTF.csv")
enhancer_tfs_of_dnase<-enhancer_tfs_of_dnase[,c(2,1)]
enhancer_tfs_of_dnase$type<-"enhancer"
colnames(enhancer_tfs_of_dnase)<-c("a","b","type")
df<-rbind(promoter_tfs_of_dnase,enhancer_tfs_of_dnase)
library(ggraph)
pe_tf <- graph_from_data_frame(df)
#ggraph(pe_tf, layout = 'kk',circular=TRUE) + geom_edge_link(aes(colour = type))
###plot by igraph
par(bg="white",mar=c(2,2,2,2))
V(pe_tf)$color[names(V(pe_tf))%in%promoter_tfs_of_dnase$a]<-"tomato" # vertex color
V(pe_tf)$color[names(V(pe_tf))%in%promoter_tfs_of_dnase$b]<-"steelblue"
V(pe_tf)$size <- 8 # vertex size
V(pe_tf)$label.cex <- 0.8 # vertex label size
V(pe_tf)$label.color<-"black"
E(pe_tf)$color<-ifelse(E(pe_tf)$type=="promoter","orange","gray") # edge color
E(pe_tf)$width=3
plot(pe_tf,layout=layout.fruchterman.reingold,
vertex.frame.color= NA,
vertex.color=V(pe_tf)$color,
vetex.shape=V(pe_tf)$shape)
legend('topleft',
#bg="white",
text.col="black",
legend=c("Paternal","Maternal","Correlated","Anti-correlated"),
pch=c(19,19,NA,NA), #shape
lty = c(0.5,0.5,1,1),
box.lty = 2, #
pt.cex = 3, #lines size
cex=1, #box size
col=c("tomato","steelblue","orange","gray"),
y.intersp=1.5)
### hive plot
## hive plot for TFs in enhancer, promoter and imprinted genes
library(HiveR)
promoter_tfs_of_dnase<-read.csv("other_resources/LZH_dnase/impGenePromoterTF.csv")
promoter_tfs_of_dnase$promoterTFID<-paste("p",promoter_tfs_of_dnase$promoterTFID,sep = "")
promoter_tfs_of_dnase$weight<-1
colnames(promoter_tfs_of_dnase)<-c("tf","target","weight")
enhancer_tfs_of_dnase<-read.csv("other_resources/LZH_dnase/impGeneEnhancerTF.csv")
enhancer_tfs_of_dnase<-enhancer_tfs_of_dnase[,c(2,1)]
enhancer_tfs_of_dnase$EnhancerTFID<-paste("e",enhancer_tfs_of_dnase$EnhancerTFID,sep="")
enhancer_tfs_of_dnase$weight<-1
colnames(enhancer_tfs_of_dnase)<-c("tf","target","weight")
df<-rbind(promoter_tfs_of_dnase,enhancer_tfs_of_dnase)
hiv<-edge2HPD(df)
hiv$nodes$axis[startsWith(hiv$nodes$lab,"p")]<-2
hiv$nodes$axis[startsWith(hiv$nodes$lab,"e")]<-3
hiv$nodes$axis<-as.integer(hiv$nodes$axis)
## node position or radium
#hiv$nodes$radius<-as.numeric(sample(seq(1:1000),length(hiv$nodes$radius),replace = TRUE))
hiv$nodes$radius<-as.numeric(c(seq(1:length(hiv$nodes$axis[startsWith(hiv$nodes$lab,"p")])),
seq(1:length(hiv$nodes$axis[startsWith(hiv$nodes$lab,"e")])),
seq(1:length(hiv$nodes$axis[startsWith(hiv$nodes$lab,"AT")]))))
# node color
#hiv$nodes$color[startsWith(hiv$nodes$lab,"p")]<-"red"
#hiv$nodes$color[startsWith(hiv$nodes$lab,"e")]<-"blue"
#hiv$nodes$color[!startsWith(hiv$nodes$lab,"p")&!startsWith(hiv$nodes$lab,"e")]<-"yellow"
## axis color
hiv$axis.cols<-"black" #c("#E41A1C", "#377EB8", "#4DAF4A")
## plot
plotHive(hiv,axLabs = c("Imprinted genes", "TFs in promoter","TFs in enhancer"),
ch = 5,
dr.nodes = FALSE, # whether show the node
bkgnd = "white",
axLab.gpar=gpar(col="black"),
axLab.pos = c(20, 80, 80),
rot = c(0, 25, -25))
|
e40b1870951f8e66e9e1a159bf09a41fb889487f
|
d8ed284412c99f0ca03491b6b8a65e2ae1ae3964
|
/R/FrEDI.R
|
8b2b48340b7b3ff63bf31a293f43c7c58282ec24
|
[
"MIT"
] |
permissive
|
jwillwerth/FrEDI
|
e3cd3bef0ee6d05cdc7c5f8518728bb35413fab7
|
1d698e41fe4e70f4e6d21eb2958702d0c77d6d01
|
refs/heads/main
| 2023-08-15T23:47:54.396764
| 2021-10-15T17:17:06
| 2021-10-15T17:17:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,357
|
r
|
FrEDI.R
|
### This file documents the R temperature binning package
#' README
#' FrEDI: The Framework for Evaluating Damages and Impacts
#'
#' [FrEDI] is an R package being developed by the U.S. Environmental Protection Agency (EPA). The functions and data provided by this package can be used to estimate climate change impacts for the contiguous United States (CONUS) using the Framework for Evaluating Damages and Impacts (FrEDI), developed as part of EPA's [Climate Change Impacts and Risk Analysis](https://epa.gov/cira/) (CIRA) project. [FrEDI] contains R code that implement FrEDI and allow users to project impacts from climate change and sea level rise for a selected set of sectors.
#'
#' For more information on the Framework and the CIRA project, visit https://epa.gov/cira/, especially https://epa.gov/cira/FrEDI.
#'
#' @section Overview of Functions:
#' The function [FrEDI::run_fredi()] provided in this package is the primary function implementing FrEDI The main inputs to [FrEDI::run_fredi()] are climate scenarios (temperature in degrees Celsius, global mean sea level rise in centimeters) and socioeconomic scenarios (gross domestic product, regional population). [FrEDI::run_fredi()] projects climate impacts for these scenarios using the temperature binning framework. Users can also specify the levels at which to aggregate the climate impacts.
#'
#' [FrEDI] also contains functions to assist in the pre-processing of input scenarios and the post-processing of outputs. For instance, the pre-processing function [FrEDI::import_inputs()] helps users to import scenarios from user-specified comma-separated value (CSV) files, while the post-processing function [FrEDI::get_plots()] generates a couple of different types of images (creates heatmaps and ribbon plots of climate impacts over time) from the outputs of [FrEDI::run_fredi()].
#'
#' Additional helper functions provided by [FrEDI] include [FrEDI::convertTemps()], [FrEDI::temps2slr()], [FrEDI::get_sectorInfo()], and [FrEDI::aggregate_impacts()].
#'
#' [FrEDI::convertTemps()] helps users to convert between global mean temperature and temperatures for the contiguous United States (CONUS) (both in degrees Celsius). [FrEDI::temps2slr()] helps users to estimate global mean sea level rise (GMSL, in centimeters) from global mean temperature in degrees Celsius.
#'
#' [FrEDI::get_sectorInfo()] allows users to access a list of sectors within FrEDI and related sector information.
#'
#' The post-processing helper function [FrEDI::aggregate_impacts()] helps users to aggregate and/or summarize the outputs of temperature binning (calculates national totals, model averages, impact totals, and interpolations between multiple impact years (for applicable sectors). Users have the option to run [FrEDI::aggregate_impacts()] on the outputs of [FrEDI::run_fredi()]. However, users can alternatively specify aggregation levels as arguments to the main function, [FrEDI::run_fredi()], which will run [FrEDI::aggregate_impacts()] for specified levels before returning outputs (by default, [FrEDI::run_fredi()] returns results aggregated to all aggregation levels).
#'
#'
#' @section Overview of Package Contents:
#' [FrEDI] consists of files in the following directories:
#' * **R**. Contains function definitions (files ending in `".R"`) and configuration files (ending in `".rda"`).
#' * **data**. Contains R Data files ending in `".rdb"`, `".rds"`, and `".rdx"`, containing data included with the package.
#' * **help** and **html**. Contain documentation for functions available to the user, including function descriptions, lists of arguments and outputs, and examples. See `"html/00Index.html"` or the individual R help functions for more information about individual functions.
#' * **Meta**. Contains RDS files (ending in `".rds"` with information about the package contents.
#' * **extdata/scenarios**. Contains three CSV files for users to test the function for importing data. (`"pop_scenario.csv"`, `"slr_from_GCAM.csv"`, and `"GCAM_scenario.csv"` respectively contain examples of a population scenario, sea level rise scenario, and temperature scenario. For more information, refer to documentation for the function `"import_inputs()"`.
#'
#' The FrEDI R package contains a dataset with default results `defaultResults`, which contains annual impacts produced by [FrEDI::run_fredi()] for the default scenarios (i.e., default temperature, GDP and regional population trajectories), and can be loaded into the R workspace (`load(defaultResults))`.
#'
#' Typical use will involve `library(FrEDI)` or `require(FrEDI)`.
#'
#'
#' @section Status:
#' All code in this repository is being provided in a "draft" state and has not been reviewed or cleared by US EPA. This status will be updated as models are reviewed.
#'
#' @section Dependencies:
#' [FrEDI] requires R (>= 3.5.0).
#'
#' Installing [FrEDI] requires [devtools] (Tools to Make Developing R Packages Easier). More information on [devtools] can be found [here.](https://cran.r-project.org/web/packages/devtools/index.html) [devtools] can be installed using `install.packages("devtools")`, or see [link](https://www.r-project.org/nosvn/pandoc/devtools.html) for more information.
#'
#' [FrEDI] depends on:
#' * [tidyr] (Tidy Messy Data). The official documentation for [tidyr] can be found [here](https://cran.r-project.org/web/packages/tidyr/index.html). [tidyr] can be installed using `install.packages("tidyr")`, or see [link](https://tidyr.tidyverse.org/) for more information.
#' * [dplyr] (A Grammar of Data Manipulation). The official documentation for [dplyr] can be found [here](https://cran.r-project.org/web/packages/dplyr/index.html). dplyr can be installed using `install.packages("dplyr")`, or see [link](https://dplyr.tidyverse.org/) for more information.
#' * [ggplot2] (Create Elegant Data Visualisations Using the Grammar of Graphics). The official documentation for [ggplot2] can be found [here](https://cran.r-project.org/web/packages/ggplot2/index.html). [ggplot2] can be installed using `install.packages("ggplot2")`, or see [link](https://ggplot2.tidyverse.org/) for more information.
#'
#' @section License:
#' This repository is released under the MIT License.
#'
#' @section EPA Disclaimer:
#' The United States Environmental Protection Agency (EPA) GitHub project code is provided on an "as is" basis and the user assumes responsibility for its use. EPA has relinquished control of the information and no longer has responsibility to protect the integrity, confidentiality, or availability of the information. Any reference to specific commercial products, processes, or services by service mark, trademark, manufacturer, or otherwise, does not constitute or imply their endorsement, recommendation or favoring by EPA. The EPA seal and logo shall not be used in any manner to imply endorsement of any commercial product or activity by EPA or the United States Government.
#'
#' By submitting a pull request to the GitHub and/or by installing this package, you make an agreement with EPA that you will not submit a claim of compensation for services rendered to EPA or any other federal agency. Further, you agree not to charge the time you spend developing software code related to this project to any federal grant or cooperative agreement.
#'
#'
#' @docType package
#' @name FrEDI
#' @md
NULL
#> NULL
|
ab9cb711d8c3268a9251cba1b126748f57c8e131
|
c773007a1faad874a93f2597092c18077b9252d7
|
/server.R
|
048117b43d3e41cde2b7e02f92930bef294353e6
|
[] |
no_license
|
arunsri91/Chattalyzer
|
fa00952c63fd6be9ba4094123ae70f9428ef64a7
|
3bd62675b5c4dc099f1a63f05df02c8cd747903c
|
refs/heads/master
| 2020-03-18T21:34:41.121670
| 2018-05-29T11:56:14
| 2018-05-29T11:56:14
| 135,287,585
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 32,162
|
r
|
server.R
|
# This is the server logic for a Shiny web application.
# You can find out more about building applications with Shiny here:
#
# http://shiny.rstudio.com
#
library(shiny)
shinyServer(function(input, output) {
output$plot <- renderPlot({
# input$file1 will be NULL initially. After the user selects
# and uploads a file, head of that data file by default,
# or all rows if selected, will be shown.
req(input$file1)
WappChat <- readLines(input$file1$datapath)
#header = input$header,
#sep = input$sep,
#quote = input$quote)
#############DTM AND BOW BUILDING#############################
require(text2vec)||install.packages(text2vec)
require(data.table)||install.packages(data.table)
require(stringr)||install.packages(stringr)
require(tm)||install.packages(tm)
require(RWeka)||install.packages(RWeka)
require(tokenizers)||install.packages(tokenizers)
require(slam)||install.packages(slam)
require(wordcloud)||install.packages(wordcloud)
require(ggplot2)||install.packages(ggplot2)
require("ggmap")||install.packages("ggmap")
require("leaflet")||install.packages("leaflet")
require("plotGoogleMaps")||install.packages("plotGoogleMaps")
require("text2vec")||install.packages("text2vec")
require("ggmap")||require("ggmap")
require("gsubfn")||require("gsubfn")
require("dplyr")||install.packages("dplyr")
require(memoise)||install.packages(memoise)
require(ggplot2)||install.packages(ggplot2)
require(lubridate)||install.packages(lubridate)
require(reshape2)||install.packages(reshape2)
require(tm)||install.packages(tm)
require(SnowballC)||install.packages(SnowballC)
require(wordcloud)||install.packages(wordcloud)
require(RColorBrewer)||install.packages(RColorBrewer)
require(stringr)||install.packages(stringr)
require(syuzhet)||install.packages(syuzhet)
require(dplyr)||install.packages(dplyr)
library(text2vec)
library(data.table)
library(stringr)
library(tm)
library(RWeka)
library(tokenizers)
library(slam)
library(wordcloud)
library(ggplot2)
library("ggmap")
library("leaflet")
library("plotGoogleMaps")
library("text2vec")
require("ggmap")
require("gsubfn")
library("dplyr")
library(memoise)
################### SENTIMENT ANALYSIS############################
library(ggplot2)
library(lubridate)
#install.packages("Scale")
#library(Scale)
library(reshape2)
library(tm)
library(SnowballC)
library(wordcloud)
library(RColorBrewer)
library(stringr)
#install.packages("syuzhet")
#install.packages("syuzhet")
library(syuzhet)
library(dplyr )
wappDf = data.frame(NULL)
#wappDft
#l=5
for (l in 1:length(WappChat))
{
text = WappChat[l]
split = strsplit(text,": ") # Notice \\? in the pattern
#length(split[[1]])
if (length(split[[1]]) == 2 )
{
datestmp = split[[1]][1]
message = gsub("[^[:alnum:][:space:]]","",split[[1]][2])
message = gsub("Ã|â|â|ð|à|²|¹|à|³³|Â","",message)
}
else if ((length(split[[1]])) == 1)
{
datestmp =""
message = gsub("[^[:alnum:][:space:]]","",split[[1]][1])
message=gsub("Ã|â|â|ð|à|²|¹|à|³³","",message)
}
else if ((length(split) ==0))
{
datestmp=""
message=""
}
if (datestmp != "")
{
date = gsub("\\[","",strsplit(datestmp,',')[[1]][1])
name = gsub("[^[:alnum:][:space:]]","",strsplit(datestmp,']')[[1]][2])
name = gsub("Ã|â|â|ð|à|à|Â","",name)
name = gsub("\\s","",name)
#name = strsplit(datestmp,']')[[1]][2]
}
if (datestmp == '')
{
date = "Not Available"
name = "Not Available"
}
wappDft = data.frame(name,date,message,stringsAsFactors = F)
wappDf = rbind(wappDf,wappDft)
}
wappDf_new <- data.frame(NULL)
wappDf_new <- wappDf[!duplicated(wappDf), ]
if (input$disp == "Chatcount"){
par(mar = c(0,0,0,0))
plot(c(0, 1), c(0, 1), ann = F, bty = 'n', type = 'n', xaxt = 'n', yaxt = 'n')
text(x = 0.5, y = 0.55, paste("Welcome to Whatsapp Chatter Analytics. \n",
length(wappDf_new$message), " Messages Uploaded & Ready for Analysis \n Please Select on different Radio Boxes under insights and \n Click on Track! to get the actual insights"),
cex = 1.2, col = "blue", family="serif", font=2, adj=0.5)
#text(x = 0.34, y = 0.9, paste(" Currently it contains : ",length(wappDf_new$message),
# "Chat Messages From The Whatsapp Group "),
# cex = 1.5, col = "gray30", family="serif", font=2, adj=0.5)
}
else if(input$disp == "UserStat") {
#setProgress(message = "Pulling Top 15 Active Users.........")
summarized_users <- wappDf_new %>%
group_by(wappDf_new$name) %>%
summarise(number = n())
head(summarized_users)
active_users <- summarized_users[order(summarized_users$number,decreasing = TRUE),]
active_users <- subset(active_users,active_users$`wappDf_new$name` != "Not Available")
Atleast_1post <- length(active_users$`wappDf_new$name`)
active_users_df <- as.data.frame(active_users)
if(nrow(active_users)>=15){
top_15_user <- active_users_df[1:15,]
}else{
top_15_user <- active_users_df[1:nrow(active_users),]
}
top_15contrib <- round(col_sums(as.matrix(top_15_user$number)) / colSums(as.matrix(active_users_df$number)),2)
active_users_df$inactive <- ifelse(active_users_df$number<=5,1,0)
#head(active_users_df)
#tail(active_users_df)
temp_inactive <- round(colSums(as.matrix(active_users_df$inactive)) / nrow(active_users_df),2)
display_text<-paste("=>A total of ",nrow(active_users_df),"users have been involved in the Conversation. <=\n","=>Among all the Users, ",temp_inactive*100,
"% of Users Have been very much inactive by not posting \n for more than 5 times in the entire whatsapp chat and the rest \n",
(1 - temp_inactive)*100, "% of population have posted more than 5 times. <=\n => Also the Top N (here N is = ",nrow(top_15_user),") Users contributed to ",
(top_15contrib)*100,"% of the overall chat conversation\n", "while the rest ",nrow(active_users_df)-nrow(top_15_user),
"Users contribute to ", (1 - top_15contrib)*100,"% of the conversations <=\n",
" \n",
" \n",
" Assumptions - This insight works well only if number of users in \n",
" the group is atleast 15 or above \n",
"=>Top N- Can be 15 if users > 15 in group or \n",
"Equal To Number of Users if Total Users is Less than 15<=\n")
par(mar = c(0,0,0,0))
plot(c(0, 1), c(0, 1), ann = F, bty = 'n', type = 'n', xaxt = 'n', yaxt = 'n')
text(x = 0.6, y = 0.6,display_text,
cex = 1.2, col = "blue", family="Arial", font=2, adj=0.5)
}
else if (input$disp =="Active_Days_Post_Count_Order") {
withProgress({
setProgress(message = "Pulling Top 15 Active Days.....")
summarized_date <- wappDf_new %>%
group_by(wappDf_new$date) %>%
summarise(number = n())
head(summarized_date)
date_post <- summarized_date[order(summarized_date$number,decreasing = TRUE),]
date_post <- subset(date_post,date_post$`wappDf_new$date`!="Not Available")
date_post_df <- as.data.frame(date_post)
top_15_active_days <- date_post_df[1:15,]
colnames(top_15_active_days) <- c("Days","Posts_Count")
#barplot(top_10$Count,main="Page Views", horiz=TRUE,names.arg=top_10$ContactName,las=1)
top_15_active_days$Days<-reorder(top_15_active_days$Days,-top_15_active_days$Posts_Count)
#############MOST ACTIVE DAYS IN WHAATSAPP GROUP######################
#output$plot <- renderplot({
top_day <- as.array(as.character(top_15_active_days$Days))
top_cnt <- as.array(as.numeric(top_15_active_days$Posts_Count))
dist_text <- paste("Most Active day of the Group is ",top_day[1]," with ",top_cnt[1]," Posts")
(ggplot(top_15_active_days,aes(Days,Posts_Count),colour='red')+
geom_bar(stat="identity")+
ggtitle("MOST 15 ACTIVE DAYS BASED ON NO OF POST", subtitle = dist_text)+
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.5)))
})
#return(top_15_active_days)
}
else if (input$disp =="Active_Days_Chronological_Order") {
withProgress({
summarized_date <- wappDf_new %>%
group_by(wappDf_new$date) %>%
summarise(number = n())
head(summarized_date)
date_post <- summarized_date[order(summarized_date$number,decreasing = TRUE),]
date_post <- subset(date_post,date_post$`wappDf_new$date`!="Not Available")
date_post_df <- as.data.frame(date_post)
#plot(as.data.frame(active_users))
#?subset()
#install.packages("plot_ly")
top_15_active_days <- as.data.frame(NULL)
top_15_active_days <- date_post_df[1:15,]
colnames(top_15_active_days) <- c("Days","Count")
top_15_active_days$Days <- as.character(top_15_active_days$Days)
#top_15_active_days$Days <- as.Date(top_15_active_days$Days,"%d/%m/%Y")
top_15_active_days$Datetemp <- as.Date(top_15_active_days$Days,"%d/%m/%Y")
top_15_active_days$Datetemp <- as.character(top_15_active_days$Datetemp)
top_15_active_days$Datetemp <- str_replace_all(top_15_active_days$Datetemp,"-","")
top_15_active_days <- top_15_active_days[order(top_15_active_days$Datetemp,decreasing = F),]
#barplot(top_10$Count,main="Page Views", horiz=TRUE,names.arg=top_10$ContactName,las=1)
top_15_active_days$Days<-reorder(top_15_active_days$Days,as.numeric(top_15_active_days$Datetemp))
#############MOST ACTIVE DAYS IN WHAATSAPP GROUP######################
#plot(top_15_active_days,type="b")
ggplot(top_15_active_days,aes(Days,Count),colour='red')+
geom_bar(stat="identity")+
ggtitle("MOST ACTIVE DAYS",subtitle = "Top 15 Active Days pulled and arranged in Chronological Order")+
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.5))
})
#return(top_15_active_days)
}
else if (input$disp =="ActiveUsers") {
withProgress({
setProgress(message = "Pulling Top 15 Active Users.........")
summarized_users <- wappDf_new %>%
group_by(wappDf_new$name) %>%
summarise(number = n())
#head(summarized_users)
active_users <- summarized_users[order(summarized_users$number,decreasing = TRUE),]
active_users <- subset(active_users,active_users$`wappDf_new$name` != "Not Available")
Atleast_1post <- length(active_users$`wappDf_new$name`)
active_users_df <- as.data.frame(active_users)
top_15 <- active_users_df[1:15,]
colnames(top_15) <- c("ContactName","Posts_Count")
top_15$ContactName<-reorder(top_15$ContactName,-top_15$Posts_Count)
top_per <- as.array(as.character(top_15$ContactName))
top_post <- as.array(as.character(top_15$Posts_Count))
#top_per[1]
dis_text <- paste("Most Active Person is ",top_per[1]," with ",top_post[1],
"posts ")
ggplot(top_15,aes(ContactName,Posts_Count),colour='red')+
geom_bar(stat="identity")+
ggtitle("ACTIVE USERS",subtitle = dis_text)+
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=0.5))
})
}
else if (input$disp =="InactiveUsers") {
withProgress({
setProgress(message = "Pulling Top 20 InActive Users.........")
summarized_users <- wappDf_new %>%
group_by(wappDf_new$name) %>%
summarise(number = n())
head(summarized_users)
active_users <- summarized_users[order(summarized_users$number,decreasing = FALSE),]
#head(active_users)
active_users <- subset(active_users,active_users$`wappDf_new$name` != "Not Available")
########### This displays the number of users who had posted atleast once inside group#
Atleast_1post <- length(active_users$`wappDf_new$name`)
active_users_df <- as.data.frame(active_users)
inactive_users <- summarized_users[order(summarized_users$number,decreasing = FALSE),]
inactive_users <- subset(inactive_users,inactive_users$`wappDf_new$name` != "Not Available")
inactive_users_df <- as.data.frame(inactive_users)
inactive_users_new <- subset(inactive_users_df,inactive_users_df$number <=5)
#head(inactive_users)
summarized_inusers <- inactive_users_new %>%
group_by(inactive_users_new$number) %>%
summarise(number = n())
inactive_user_bucket <- as.data.frame(summarized_inusers)
colnames(inactive_user_bucket) <- c("No_of_Posts","No_Of_Users")
ggplot(inactive_user_bucket,aes(No_of_Posts,No_Of_Users),colour='red')+
geom_bar(stat="identity")+
ggtitle("INACTIVE USERS ",subtitle = "No of Inactive Users Who have Posted a min of 1 post and Max of 5 posts")+
theme(axis.text.x=element_text(angle=90,hjust=1,vjust=1))
})
}
else if (input$disp =="FrequentWords") {
library("wordcloud")
library("tm")
l <- iconv(wappDf_new$message, to='ASCII//TRANSLIT')
#create corpus
lc <- Corpus(VectorSource(l))
#clean up
lc <- tm_map(lc, content_transformer(tolower))
lc <- tm_map(lc, removePunctuation)
lc <- tm_map(lc, function(x)removeWords(x,stopwords()))
lc <- tm_map(lc,removeWords,stopwords("en"))
lc <- tm_map(lc,removeWords,c('photojpg','attached','contact','admin','add','message',
'going','learning','walking','talking','talk','walk',
'again','find','disha','nayi','website','day','must',
'another','doesnt','years',
'can','when','she','he','him','her','them','may','has',
'91aa','make','been','would','has','might','pls','does','you',
'how','have','what','yes','no','your','their','etc','than','his','please',
'also','any','from','much','about','anyone','doing','why','where','too',
'dont','its','should','some','why','try','only','lot','number','group',
'very','more','even','take','different','sure','most','know','now','give',
'got','other','added','changed','true','many','well','same','lot','get',
'all','like','thats','our','both','were','new','see','here','used','both',
'never','did','had','back','need','needs','done','around','asd','year',
'days','keep','let','who','just','long','after','name','things','sometimes',
'understand','image','thanks','thank','one','help','other','others','thing',
'above','below','which','able','use','near','every','share','come',
'dear','since','pages','always','stop','omitted','someone','something',
'want','deleted','way','right','feel','think','suggest','using','nice',
'time','phone','out','stay','start','details','free','still','important',
'small','few','idea','life','ask','great','yrs','say',
'said','delay','senior','special','because','cure','read',
'work','once','words','touch','hand','body','friends',
'cant','wont','tell','friends','little','issues','yourself',
'themself','check','words','two','three','four','five',
'six','seven','eight','helpful','play','anything','home','old',
'first','early','while','those','input','video','better','part',
'cant','cannot','put','wrong','aba','available','makes',
'left','own','based','really','change','mother','address',
'being','age','giving','give','forward','taking','months',
'bemer','through','wish','else','giving','look','works',
'before','says','during','though','head','actually','month',
'looking','person'))
library(RColorBrewer)
pal2 <- brewer.pal(8,"Dark2")
wordcloud(lc,min.freq=2,max.words=75, random.order=F,random.color = T, colors=pal2)
}
else if (input$disp =="Rarewords") {
verbatimTextOutput("Under construction")
}
else if (input$disp =="Cluster") {
}
else if (input$disp =="Emotion") {
#texts <- readLines("_chat.txt")
texts <- wappDf_new$message
docs <- Corpus(VectorSource(texts))
#clean our chat data
trans <- content_transformer(function (x , pattern ) gsub(pattern, " ", x))
docs <- tm_map(docs, trans, "/")
docs <- tm_map(docs, trans, "@")
docs <- tm_map(docs, trans, "\\|")
docs <- tm_map(docs, content_transformer(tolower))
docs <- tm_map(docs, removeNumbers)
docs <- tm_map(docs, removeWords, stopwords("en"))
docs <- tm_map(docs, removeWords, c("„","â€â"))
#docs <- tm_map(docs, removewords, )
docs <- tm_map(docs, removePunctuation)
docs <- tm_map(docs, stripWhitespace)
#docs <- tm_map(docs, stemDocument)
#create the document term matrix
dtm <- TermDocumentMatrix(docs)
mat <- as.matrix(dtm)
v <- sort(rowSums(mat),decreasing=TRUE)
#Data frame
data <- data.frame(word = names(v),freq=v)
withProgress({
setProgress(message="Analyzing Emotions........")
Sentiment <- get_nrc_sentiment(texts)
})
#dim(Sentiment)
text_df <- as.data.frame(texts)
text <- cbind(texts,Sentiment)
#count the sentiment words by category
TotalSentiment <- data.frame(colSums(text[,c(2:9)]))
names(TotalSentiment) <- "count"
TotalSentiment <- cbind("sentiment" = rownames(TotalSentiment), TotalSentiment)
rownames(TotalSentiment) <- NULL
sentiment_array <- TotalSentiment[order(TotalSentiment$count,decreasing = T),]
temp_array <- as.array(as.character(sentiment_array$sentiment))
#temp_array[1]
new_display_text <- paste("THe Top Three Emotion across the Discussion is ",toupper(temp_array[1]) ,
" followed by " , toupper(temp_array[2]), " followed by ",
toupper(temp_array[3]))
#head(TotalSentiment)
#total sentiment score of all texts
ggplot(data = TotalSentiment, aes(x = sentiment, y = count)) +
geom_bar(aes(fill = sentiment), stat = "identity") +
theme(legend.position = "none") +
#geom_label(data = new_display_text)+
xlab("Emotion") + ylab("Total Count") + ggtitle("Emotion Analysis",subtitle=new_display_text)
}
else if (input$disp =="Sentiment") {
#withProgress()
#texts <- readLines("_chat.txt")
texts <- wappDf_new$message
docs <- Corpus(VectorSource(texts))
#clean our chat data
trans <- content_transformer(function (x , pattern ) gsub(pattern, " ", x))
docs <- tm_map(docs, trans, "/")
docs <- tm_map(docs, trans, "@")
docs <- tm_map(docs, trans, "\\|")
docs <- tm_map(docs, content_transformer(tolower))
docs <- tm_map(docs, removeNumbers)
docs <- tm_map(docs, removeWords, stopwords("en"))
docs <- tm_map(docs, removeWords, c("„","â€â"))
#docs <- tm_map(docs, removewords, )
docs <- tm_map(docs, removePunctuation)
docs <- tm_map(docs, stripWhitespace)
#docs <- tm_map(docs, stemDocument)
#create the document term matrix
dtm <- TermDocumentMatrix(docs)
mat <- as.matrix(dtm)
v <- sort(rowSums(mat),decreasing=TRUE)
#Data frame
data <- data.frame(word = names(v),freq=v)
withProgress({
setProgress(message="Analyzing Sentiments......")
Sentiment <- get_nrc_sentiment(texts)
})
#dim(Sentiment)
text_df <- as.data.frame(texts)
#dim(text_df)
#head(Sentiment)
text <- cbind(texts,Sentiment)
#head(text)
#text[2386,]
#write.csv(text,"Sentiments_classified.csv")
#count the sentiment words by category
TotalSentiment <- data.frame(colSums(text[,c(2:9)]))
names(TotalSentiment) <- "count"
TotalSentiment <- cbind("sentiment" = rownames(TotalSentiment), TotalSentiment)
rownames(TotalSentiment) <- NULL
nrc_data <- Sentiment
temp_sent <- NULL
temp_sent <- colSums(prop.table(nrc_data[, 9:10]))
temp_sentdf <- as.data.frame(temp_sent)
temp_sentd <- c("negative","positive")
temp_sentdf2 <- as.data.frame(temp_sentd)
new_df <- cbind(temp_sentdf,temp_sentdf2)
sored_df <- new_df[order(new_df$temp_sent,decreasing = T),]
display_text <- paste("This Group chat is ",round(as.numeric(temp_sent[1]),2), " % Negative and ",
round(as.numeric(temp_sent[2]),2), " % Positive",
" and overall ",toupper(as.character(sored_df[1,2])) , " Sentiment is Prevalent")
#paste("the most common one is ",temp_sent[1])
anger <- which(Sentiment$anger>0)
texts[anger]
barplot(
sort(colSums(prop.table(nrc_data[, 9:10]))),
horiz = TRUE,
cex.names = 0.7,
las = 1,
sub = display_text,
main = "Sentiment Analysis", xlab="Percentage"
)
}
}
)
# output$DateText <- renderPlot(
# {
# WappChat <- readLines(input$file1$datapath)
# #header = input$header,
# #sep = input$sep,
# #quote = input$quote)
# #############DTM AND BOW BUILDING#############################
# require(text2vec)||install.packages(text2vec)
# require(data.table)||install.packages(data.table)
# require(stringr)||install.packages(stringr)
# require(tm)||install.packages(tm)
# require(RWeka)||install.packages(RWeka)
# require(tokenizers)||install.packages(tokenizers)
# require(slam)||install.packages(slam)
# require(wordcloud)||install.packages(wordcloud)
# require(ggplot2)||install.packages(ggplot2)
# require("ggmap")||install.packages("ggmap")
# require("leaflet")||install.packages("leaflet")
# require("plotGoogleMaps")||install.packages("plotGoogleMaps")
# require("text2vec")||install.packages("text2vec")
# require("ggmap")||require("ggmap")
# require("gsubfn")||require("gsubfn")
# require("dplyr")||install.packages("dplyr")
# require(memoise)||install.packages(memoise)
# require(ggplot2)||install.packages(ggplot2)
# require(lubridate)||install.packages(lubridate)
# require(reshape2)||install.packages(reshape2)
# require(tm)||install.packages(tm)
# require(SnowballC)||install.packages(SnowballC)
# require(wordcloud)||install.packages(wordcloud)
# require(RColorBrewer)||install.packages(RColorBrewer)
# require(stringr)||install.packages(stringr)
# require(syuzhet)||install.packages(syuzhet)
# require(dplyr)||install.packages(dplyr)
# library(text2vec)
# library(data.table)
# library(stringr)
# library(tm)
# library(RWeka)
# library(tokenizers)
# library(slam)
# library(wordcloud)
# library(ggplot2)
# library("ggmap")
# library("leaflet")
# library("plotGoogleMaps")
# library("text2vec")
# require("ggmap")
# require("gsubfn")
# library("dplyr")
# library(memoise)
# ################### SENTIMENT ANALYSIS############################
# library(ggplot2)
# library(lubridate)
# #install.packages("Scale")
# #library(Scale)
# library(reshape2)
# library(tm)
# library(SnowballC)
# library(wordcloud)
# library(RColorBrewer)
# library(stringr)
# #install.packages("syuzhet")
# #install.packages("syuzhet")
# library(syuzhet)
# library(dplyr )
# wappDf = data.frame(NULL)
# #wappDft
# #l=5
#
# for (l in 1:length(WappChat))
# {
# text = WappChat[l]
#
# split = strsplit(text,": ") # Notice \\? in the pattern
# #length(split[[1]])
# if (length(split[[1]]) == 2 )
# {
# datestmp = split[[1]][1]
#
# message = gsub("[^[:alnum:][:space:]]","",split[[1]][2])
# message = gsub("Ã|â|â|ð|à|²|¹|à|³³|Â","",message)
# }
# else if ((length(split[[1]])) == 1)
# {
# datestmp =""
#
# message = gsub("[^[:alnum:][:space:]]","",split[[1]][1])
# message=gsub("Ã|â|â|ð|à|²|¹|à|³³","",message)
#
# }
# else if ((length(split) ==0))
# {
# datestmp=""
#
# message=""
# }
#
#
# #latitude = strsplit(message,",")[[1]][1]
#
# #longitude = strsplit(message,",")[[1]][2]
# if (datestmp != "")
# {
# date = gsub("\\[","",strsplit(datestmp,',')[[1]][1])
# name = gsub("[^[:alnum:][:space:]]","",strsplit(datestmp,']')[[1]][2])
# name = gsub("Ã|â|â|ð|à|à|Â","",name)
# name = gsub("\\s","",name)
# #name = strsplit(datestmp,']')[[1]][2]
# }
# if (datestmp == '')
# {
# date = "Not Available"
# name = "Not Available"
# }
# wappDft = data.frame(name,date,message,stringsAsFactors = F)
# wappDf = rbind(wappDf,wappDft)
# }
# wappDf_new <- data.frame(NULL)
# wappDf_new <- wappDf[!duplicated(wappDf), ]
#
#
# library("wordcloud")
# library("tm")
# l <- iconv(wappDf_new$message, to='ASCII//TRANSLIT')
# #create corpus
# lc <- Corpus(VectorSource(l))
#
# #clean up
#
# lc <- tm_map(lc, content_transformer(tolower))
# lc <- tm_map(lc, removePunctuation)
# lc <- tm_map(lc, function(x)removeWords(x,stopwords()))
# lc <- tm_map(lc,removeWords,stopwords("en"))
# lc <- tm_map(lc,removeWords,c('photojpg','attached','contact','admin','add','message',
# 'going','learning','walking','talking','talk','walk',
# 'again','find','disha','nayi','website','day','must',
# 'another','doesnt','years',
# 'can','when','she','he','him','her','them','may','has',
# '91aa','make','been','would','has','might','pls','does','you',
# 'how','have','what','yes','no','your','their','etc','than','his','please',
# 'also','any','from','much','about','anyone','doing','why','where','too',
# 'dont','its','should','some','why','try','only','lot','number','group',
# 'very','more','even','take','different','sure','most','know','now','give',
# 'got','other','added','changed','true','many','well','same','lot','get',
# 'all','like','thats','our','both','were','new','see','here','used','both',
# 'never','did','had','back','need','needs','done','around','asd','year',
# 'days','keep','let','who','just','long','after','name','things','sometimes',
# 'understand','image','thanks','thank','one','help','other','others','thing',
# 'above','below','which','able','use','near','every','share','come',
# 'dear','since','pages','always','stop','omitted','someone','something',
# 'want','deleted','way','right','feel','think','suggest','using','nice',
# 'time','phone','out','stay','start','details','free','still','important',
# 'small','few','idea','life','ask','great','yrs','say',
# 'said','delay','senior','special','because','cure','read',
# 'work','once','words','touch','hand','body','friends',
# 'cant','wont','tell','friends','little','issues','yourself',
# 'themself','check','words','two','three','four','five',
# 'six','seven','eight','helpful','play','anything','home','old',
# 'first','early','while','those','input','video','better','part',
# 'cant','cannot','put','wrong','aba','available','makes',
# 'left','own','based','really','change','mother','address',
# 'being','age','giving','give','forward','taking','months',
# 'bemer','through','wish','else','giving','look','works',
# 'before','says','during','though','head','actually','month',
# 'looking','person'))
# library(RColorBrewer)
#
# pal2 <- brewer.pal(8,"Dark2")
# wordcloud(lc,min.freq=2,max.words=75, random.order=F,random.color = T, colors=pal2)
#
# }
# )
})
|
429c76c6d5c6e6fd9acdcc1d6af0edb51c00bf41
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/HPbayes/examples/mp8.ll.Rd.R
|
70f95d98c8eecd053393d086fe66bcad0fabbf7c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 593
|
r
|
mp8.ll.Rd.R
|
library(HPbayes)
### Name: mp8.ll
### Title: Binomial likelihood for a set of Heligman-Pollard Parameters
### Aliases: mp8.ll
### Keywords: misc
### ** Examples
##A set of parameters##
theta <- c(0.06008, 0.31087, 0.34431, 0.00698,
1.98569, 26.71071, 0.00022, 1.08800)
##Deaths and persons at risk##
lx <- c(1974, 1906, 1860, 1844, 1834, 1823, 1793, 1700, 1549, 1361,
1181, 1025, 870, 721, 571, 450, 344, 256, 142, 79, 41, 8)
dx <- c(68, 47, 16, 10, 13, 29, 92, 151, 188, 179, 156, 155, 147, 150,
122, 106, 88, 113, 63, 38, 32, 8)
mp8.ll(theta=theta, nrisk=lx, ndeath=dx)
|
73bcf7845972f7c28a82fc9de7cea1f6a7095e8d
|
142ac9941ab626c0523ede00777bee280d95c2f0
|
/shinyKmeans2/ui.R
|
e10c59973503358cd3457df8ecf0796929a5c67d
|
[
"MIT"
] |
permissive
|
caleblareau/shinyTeach
|
66e7c86f3fdf6425754ee7645e427cf711b96328
|
0ffef423431298087bfd30b668b670fd59eb50c2
|
refs/heads/master
| 2021-01-12T11:55:16.787504
| 2017-05-21T18:02:31
| 2017-05-21T18:02:31
| 69,316,315
| 6
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,654
|
r
|
ui.R
|
source("extRa/startup.R")
shinyUI(
navbarPage(
HTML("<img src='harvard-logo.png'/>"),
tabPanel("Visualize",
fluidPage(
pageWithSidebar(
headerPanel('ADVANCED'),
sidebarPanel(
selectInput('xcol', 'X Variable', names(df)[c(-1,-2,-3,-5)],
selected=names(df)[[13]], selectize = TRUE),
selectInput('ycol', 'Y Variable', names(df)[c(-1,-2,-3,-5)],
selected=names(df)[[19]], selectize = TRUE),
numericInput('clusters', 'Cluster count', 4, min = 1, max = 9),
uiOutput('xmean'),
conditionalPanel( condition = "input.clusters == 3", # This is Javascript
uiOutput('ymean')
)
),
mainPanel(
plotlyOutput('plot1'),
plotlyOutput('brush')
)
)
)
),
tabPanel("Guide",
includeMarkdown("www/guide.Rmd")
),
##########
# FOOTER
##########
theme = shinytheme("cerulean"),
footer = HTML(paste0('<P ALIGN=Center>Kmeans2 © <A HREF="mailto:caleblareau@g.harvard.edu">Caleb Lareau</A>')),
collapsible = TRUE,
fluid = TRUE,
windowTitle = "Kmeans2"
)
)
|
aa5783368ad8ba2ce800850e2610592f10b38f78
|
897f0581bfc3403318f56072f7af1163b8189733
|
/Thaps/ttest.R
|
7a0849b88e76d39b63cd25122d06a1c41a555d9e
|
[] |
no_license
|
jashworth-UTS/ja-scripts
|
2985891e628bae59b1f4b8696739cbf63b5a2dc2
|
ac837ac0fee63c27b3b8ac4d9a5022810fb31976
|
refs/heads/master
| 2021-05-28T18:39:20.272694
| 2015-02-04T02:35:17
| 2015-02-04T02:35:17
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,303
|
r
|
ttest.R
|
# Justin Ashworth
# Institute for Systems Biology
# 2011
ratiocolregex = 'Day'
do.ttest =
function(ratiosfile='probe.ratios.mean.tsv')
{
d = read.delim(ratiosfile)
ratiocols = grep(ratiocolregex,names(d),value=T)
ratios = as.matrix( d[ , ratiocols ] )
colnames(ratios) = ratiocols
day1 = grep('Day1',colnames(ratios))
day4 = grep('Day4',colnames(ratios))
d$pvs = apply( ratios, 1, function(x) { res = t.test( x[day1], x[day4] ); res$p.value } )
d$p.adj = p.adjust(d$pvs)
d$change = apply( ratios, 1, function(x) { mean( as.numeric(x[day4]) ) } )
d$sd = apply( ratios, 1, function(x) { sd( as.numeric(x[day4]) ) } )
d = d[ order(d$pvs), ]
d = d[ , c( which( !grepl('Day',names(d)) ), grep('Day',names(d)) ) ]
tsv( d, paste( gsub('.tsv','',ratiosfile), 'ttest.tsv', sep='.' ) )
return(d)
}
plot.ttest =
function(d)
{
library(gplots)
dd=d[ 1:80, ]
ratiocols = grep(ratiocolregex,names(dd))
sig.mat = as.matrix( dd[ , ratiocols ] )
rownames(sig.mat) = dd$geneid
png('ttest.day1.vs.day4.png',width=1000,height=1000)
op=par(oma=c(12,0,0,16))
heatmap.2( sig.mat, trace='none', scale='none', col=colorpanel(32,'blue','black','yellow'), dendrogram='none', labRow='', Rowv='none', Colv='none', main = 'Gwenn\'s chemostats, CO2 increasing\nttest, Day 1 vs. Day 4' )
dev.off()
# known genes
ddd=dd[ !is.na(dd$desc) & !dd$desc=='' & !grepl('hypothetical',dd$desc), ]
ratiocols = grep(ratiocolregex,names(ddd))
sig.mat = as.matrix( ddd[ , ratiocols ] )
row.names(sig.mat) = ddd$desc
png('ttest.day1.vs.day4.known.png',width=1000,height=1000)
op=par(oma=c(12,0,0,16))
heatmap.2( sig.mat, trace='none', col=colorpanel(32,'blue','black','yellow'), dendrogram='none', Colv='none', main = 'Gwenn\'s chemostats, CO2 increasing\nt-test, Day 1 vs. Day 4' )
dev.off()
# d=read.delim('probe.ratios.mean.ttest.tsv')
day1day4 = c( rep('Day1',4), rep('Day4',4) )
day1 = grep( 'Day1', colnames(d) )
day4 = grep( 'Day4', colnames(d) )
png('ttest.day1.vs.day4.top.png',width=1000,height=1000)
par(mfrow=c(3,3))
par(cex=1.5)
par(cex.main=0.75)
apply( d[1:9,], 1, function(x) { stripchart( as.numeric(c(x[day1],x[day4]))~day1day4, main=paste('p.adj=',x[colnames(d)=='p.adj'],sep=''), ,vertical=T, xlab=x[1], ylab='log ratio',pch=21 ) } )
dev.off()
}
#d = do.ttest()
#plot.ttest(d)
|
46f1520717bb38412b7b2883e616b556afce9bd2
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/IDE/examples/constant_basis.Rd.R
|
e4ec92ca3984b1d8710fa102e69dbc885c741449
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 168
|
r
|
constant_basis.Rd.R
|
library(IDE)
### Name: constant_basis
### Title: Create a single, constant basis function
### Aliases: constant_basis
### ** Examples
basis1 <- constant_basis()
|
3362b7d1a8deb35951775bed4d6a1092a7837d17
|
1a1ff0d19b5cd9c2fb6d3965f8a4771c306b4e45
|
/demographics.R
|
99f74a1d8e9a008b1f63f0f328cf483fa739da64
|
[] |
no_license
|
zkpt-org/intervention
|
bab4d011292a34d74769f9ae8b41e819ef3f2cdc
|
af600d5549a089892bc5eb880d2c5e0ebcbb4ec3
|
refs/heads/master
| 2020-05-21T00:36:06.168394
| 2013-07-10T20:42:49
| 2013-07-10T20:42:49
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,198
|
r
|
demographics.R
|
#Add columns for pre-existing conditions
user_data['Diabetes']<-0
user_data['HighBloodPressure']<-0
user_data['HighCholesterol']<-0
user_data['HeartDisease']<-0
user_data['MetabolicSyndrome']<-0
user_data['Other']<-0
#Extract pre-existing conditions
hiblood=sqldf("SELECT UserId FROM condition WHERE Key = 'QUESTION_CHRONIC' AND Value LIKE '%High Blood Pressure'
OR Key = 'QUESTION_CHRONIC_2' AND Value LIKE '%High Blood Pressure'
OR Key = 'QUESTION_CHRONIC_3' AND Value LIKE '%High Blood Pressure'
OR Key = 'QUESTION_HYPERTENSION' AND Value LIKE '%High Blood Pressure'
OR Key = 'QUESTION_HYPERTENSION_2' AND Value LIKE '%High Blood Pressure'
OR Key = 'QUESTION_HYPERTENSION_3' AND Value LIKE '%High Blood Pressure'")
hiblood['hiblood']<-1
diabetes=sqldf("SELECT UserId FROM condition WHERE Key = 'QUESTION_CHRONIC' AND Value LIKE '%Diabetes'
OR Key = 'QUESTION_CHRONIC_2' AND Value LIKE '%Diabetes'
OR Key = 'QUESTION_CHRONIC_3' AND Value LIKE '%Diabetes'
OR Key = 'QUESTION_HYPERTENSION' AND Value LIKE '%Diabetes'
OR Key = 'QUESTION_HYPERTENSION_2' AND Value LIKE '%Diabetes'
OR Key = 'QUESTION_HYPERTENSION_3' AND Value LIKE '%Diabetes'")
diabetes['diabetes']<-1
highcol=sqldf("SELECT UserId FROM condition WHERE Key = 'QUESTION_CHRONIC' AND Value LIKE '%High Cholesterol'
OR Key = 'QUESTION_CHRONIC_2' AND Value LIKE '%High Cholesterol'
OR Key = 'QUESTION_CHRONIC_3' AND Value LIKE '%High Cholesterol'
OR Key = 'QUESTION_HYPERTENSION' AND Value LIKE '%High Cholesterol'
OR Key = 'QUESTION_HYPERTENSION_2' AND Value LIKE '%High Cholesterol'
OR Key = 'QUESTION_HYPERTENSION_3' AND Value LIKE '%High Cholesterol'")
highcol['hicol']<-1
heartdis=sqldf("SELECT UserId FROM condition WHERE Key = 'QUESTION_CHRONIC' AND Value LIKE '%Heart Disease'
OR Key = 'QUESTION_CHRONIC_2' AND Value LIKE '%Heart Disease'
OR Key = 'QUESTION_CHRONIC_3' AND Value LIKE '%Heart Disease'
OR Key = 'QUESTION_HYPERTENSION' AND Value LIKE '%Heart Disease'
OR Key = 'QUESTION_HYPERTENSION_2' AND Value LIKE '%Heart Disease'
OR Key = 'QUESTION_HYPERTENSION_3' AND Value LIKE '%Heart Disease'")
heartdis['heartdis']<-1
metabo=sqldf("SELECT UserId FROM condition WHERE Key = 'QUESTION_CHRONIC' AND Value LIKE '%Metabolic Syndrome'
OR Key = 'QUESTION_CHRONIC_2' AND Value LIKE '%Metabolic Syndrome'
OR Key = 'QUESTION_CHRONIC_3' AND Value LIKE '%Metabolic Syndrome'
OR Key = 'QUESTION_HYPERTENSION' AND Value LIKE '%Metabolic Syndrome'
OR Key = 'QUESTION_HYPERTENSION_2' AND Value LIKE '%Metabolic Syndrome'
OR Key = 'QUESTION_HYPERTENSION_3' AND Value LIKE '%Metabolic Syndrome'")
metabo['metabo']<-1
other=sqldf("SELECT UserId FROM condition WHERE Key = 'QUESTION_CHRONIC' AND Value LIKE '%Other'
OR Key = 'QUESTION_CHRONIC' AND Value LIKE '%pre- or gestatiolHypertension'
OR Key = 'QUESTION_CHRONIC' AND Value LIKE '%Depression anxiety or stress related'
OR Key = 'QUESTION_CHRONIC' AND Value LIKE '%pre- or gestatiolOsteoarthritis'
OR Key = 'QUESTION_CHRONIC' AND Value LIKE '%Osteoarthritis'
OR Key = 'QUESTION_CHRONIC_2' AND Value LIKE '%Other'
OR Key = 'QUESTION_CHRONIC_2' AND Value LIKE '%pre- or gestatiolHypertension'
OR Key = 'QUESTION_CHRONIC_2' AND Value LIKE '%Depression anxiety or stress related'
OR Key = 'QUESTION_CHRONIC_2' AND Value LIKE '%pre- or gestatiolOsteoarthritis'
OR Key = 'QUESTION_CHRONIC_2' AND Value LIKE '%Osteoarthritis'
OR Key = 'QUESTION_CHRONIC_3' AND Value LIKE '%Other'
OR Key = 'QUESTION_CHRONIC_3' AND Value LIKE '%pre- or gestatiolHypertension'
OR Key = 'QUESTION_CHRONIC_3' AND Value LIKE '%Depression anxiety or stress related'
OR Key = 'QUESTION_CHRONIC_3' AND Value LIKE '%pre- or gestatiolOsteoarthritis'
OR Key = 'QUESTION_CHRONIC_3' AND Value LIKE '%Osteoarthritis'
OR Key = 'QUESTION_HYPERTENSION' AND Value LIKE '%Other'
OR Key = 'QUESTION_HYPERTENSION' AND Value LIKE '%pre- or gestatiolHypertension'
OR Key = 'QUESTION_HYPERTENSION' AND Value LIKE '%Depression anxiety or stress related'
OR Key = 'QUESTION_HYPERTENSION' AND Value LIKE '%pre- or gestatiolOsteoarthritis'
OR Key = 'QUESTION_HYPERTENSION' AND Value LIKE '%Osteoarthritis'
OR Key = 'QUESTION_HYPERTENSION_2' AND Value LIKE '%Other'
OR Key = 'QUESTION_HYPERTENSION_2' AND Value LIKE '%pre- or gestatiolHypertension'
OR Key = 'QUESTION_HYPERTENSION_2' AND Value LIKE '%Depression anxiety or stress related'
OR Key = 'QUESTION_HYPERTENSION_2' AND Value LIKE '%pre- or gestatiolOsteoarthritis'
OR Key = 'QUESTION_HYPERTENSION_2' AND Value LIKE '%Osteoarthritis'
OR Key = 'QUESTION_HYPERTENSION_3' AND Value LIKE '%Other'
OR Key = 'QUESTION_HYPERTENSION_3' AND Value LIKE '%pre- or gestatiolHypertension'
OR Key = 'QUESTION_HYPERTENSION_3' AND Value LIKE '%Depression anxiety or stress related'
OR Key = 'QUESTION_HYPERTENSION_3' AND Value LIKE '%pre- or gestatiolOsteoarthritis'
OR Key = 'QUESTION_HYPERTENSION_3' AND Value LIKE '%Osteoarthritis'")
other['others']<-1
# Merge pre-existing conditions.
user_data=merge(user_data, hiblood, by="UserId", all.x=TRUE)
user_data$HighBloodPressure[user_data$hiblood==1]=1
user_data=merge(user_data, diabetes, by="UserId", all.x=TRUE)
user_data$Diabetes[user_data$diabetes==1]=1
user_data=merge(user_data, highcol, by="UserId", all.x=TRUE)
user_data$HighCholesterol[user_data$hicol==1]=1
user_data=merge(user_data, heartdis, by="UserId", all.x=TRUE)
user_data$HeartDisease[user_data$heartdis==1]=1
user_data=merge(user_data, metabo, by="UserId", all.x=TRUE)
user_data$MetabolicSyndrome[user_data$metabo==1]=1
user_data=merge(user_data, other, by="UserId", all.x=TRUE)
user_data$Other[user_data$others==1]=1
user_data=subset(user_data, select=-c(hiblood, diabetes, hicol, heartdis, metabo, others))
rm(hiblood, diabetes, highcol, heartdis, metabo, other)
user_data=unique(user_data)
stress=sqldf("SELECT UserId, Value AS Stress FROM condition WHERE Key = 'QUESTION_STRESS'")
smoking=sqldf("SELECT UserId, Value AS Smoking FROM condition WHERE Key = 'QUESTION_TOBACCO'")
health=sqldf("SELECT UserId, Value AS Health FROM condition WHERE Key = 'QUESTION_HEALTH'")
user_data=merge(user_data, stress, by="UserId", all.x=TRUE)
user_data=unique(user_data)
user_data=merge(user_data, smoking, by="UserId", all.x=TRUE)
user_data=unique(user_data)
user_data=merge(user_data, health, by="UserId", all.x=TRUE)
user_data=unique(user_data)
rm(stress, smoking, health)
user_data=user_data[!duplicated(user_data[,1]),]
colnames(user_data)[1] <- "user"
colnames(user_data)[2] <- "state"
|
2f10c8e41d76b2b1277bf805e1761bbfef3a244c
|
dc284fe45eea59ade9e1a75095af6285be51af3c
|
/C/plot_sing.R
|
ac29016b63e41b9dee08f975193b3e5c565c586e
|
[] |
no_license
|
gui11aume/analytic_combinatorics_seeding
|
c6d47ecd4a7df4428e0d0a24a578bc276bb01004
|
f6dabd7cf074f7069bcf12bca996fe35530b19f0
|
refs/heads/master
| 2020-05-26T00:29:23.183443
| 2017-10-18T16:09:52
| 2017-10-18T16:09:52
| 84,981,428
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 572
|
r
|
plot_sing.R
|
d = 17
p = .1
q = 1-p
Q = function(z) 1-p*z*(1-(q*z)^d)/(1-q*z)
pdf("Q.pdf", width=5, height=5)
z = seq(-1.5, 1.5, 0.01)
plot(z, Q(z), type="l", ylim=c(-2,2), lwd=2, plot.first=grid())
abline(h=0, col="grey50")
dev.off()
mat = matrix(NA, nrow=512, ncol=512)
for (x in 1:512) {
for (y in 1:512) {
z = 1.5 * (x-256) / 256 + (0+1i) * 1.5 * (y-256) / 256
mat[x,y] = Mod(1/Q(z))
}
}
bw = colorRampPalette(c("white", "black"))(1024)
mat[mat > 7] = 7
png("singS.png", width=512, height=512)
par(mar=c(0,0,0,0))
image(mat, col=bw, bty="n", xaxt="n", yaxt="n")
dev.off()
|
4948a1101e6ba58e037cabae185976f75d5314a8
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/bayesDP/examples/bdpnormal.Rd.R
|
17d8537ebcbef88bfede16deebf7e377bb06d851
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 736
|
r
|
bdpnormal.Rd.R
|
library(bayesDP)
### Name: bdpnormal
### Title: Bayesian Discount Prior: Gaussian mean values
### Aliases: bdpnormal bdpnormal,ANY-method
### ** Examples
# One-arm trial (OPC) example
fit <- bdpnormal(mu_t = 30, sigma_t = 10, N_t = 50,
mu0_t = 32, sigma0_t = 10, N0_t = 50,
method = "fixed")
summary(fit)
## Not run:
##D plot(fit)
## End(Not run)
# Two-arm (RCT) example
fit2 <- bdpnormal(mu_t = 30, sigma_t = 10, N_t = 50,
mu0_t = 32, sigma0_t = 10, N0_t = 50,
mu_c = 25, sigma_c = 10, N_c = 50,
mu0_c = 25, sigma0_c = 10, N0_c = 50,
method = "fixed")
summary(fit2)
## Not run:
##D plot(fit2)
## End(Not run)
|
a71988b220d9426747cfdd5b9b2505059a2b1c2d
|
10fbd1788ed37fd0c61403f40e8233853bc00cfc
|
/man/QAW_eff.Rd
|
36c5e505b66418728152a60db21b77d67bf3f727
|
[] |
no_license
|
lmmontoya/SL.ODTR
|
50dafaa45376dc1f7da74816585ce77047e03a02
|
9ffe0a3021f7c248f59f038a2f82fab135887da2
|
refs/heads/master
| 2023-03-06T12:48:47.929749
| 2023-02-20T22:14:59
| 2023-02-20T22:14:59
| 214,280,056
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 365
|
rd
|
QAW_eff.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/1DGPfunctions.R
\name{QAW_eff}
\alias{QAW_eff}
\title{Simulate with eff}
\usage{
QAW_eff(A, W)
}
\arguments{
\item{A}{Vector of treatment}
\item{W}{Data frame of observed baseline covariates}
}
\value{
conditional mean of Y given A and W
}
\description{
Generate QAW according to eff
}
|
2f99420dfb746bc49c24768d6a001e4e4e417018
|
2d1655d6f0cf1d00b9cd470a95e06245311e4c89
|
/Problem_2.R
|
9658060d4fa6798d521eb9bdc61101fab8c7426b
|
[] |
no_license
|
feb-uni-sofia/homework-1-r-basics-elidakacheva
|
626644402f4ac0081d4e6a35ffc8dfea32b4119d
|
1c1fcc0fa3ecc5bd7a9aec9f1c5670a68e777959
|
refs/heads/master
| 2021-09-11T01:09:33.274081
| 2018-03-31T20:57:38
| 2018-03-31T20:57:38
| 126,045,589
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 955
|
r
|
Problem_2.R
|
# Problem 2
# a)
xmin <- c(23.0, 20.5, 28.2, 20.3, 22.4, 17.2, 18.2)
xmax <- c(25.0, 22.8, 31.2, 27.3, 28.4, 20.2, 24.1)
# b)
xmax - xmin
# c)
avgxmin <- mean(xmin)
avgxmax <- mean(xmax)
# d)
xmin[xmin < avgxmin]
# e)
xmin[xmax > avgxmax]
# f)
dates <- c('03Mon18', '04Tue18', '05Wed18', '06Thu18', '07Fri18', '08Sat18', '09Sun18')
names(xmin) <- dates
xmin
names(xmax) <- dates
xmax
# g)
temperatures <- data.frame(xmin = xmin, xmax = xmax)
# h)
xminFahrenheit <- 9/5 * xmin + 32
temperatures <- within(temperatures, {xminFahrenheit <- 9/5 * xmin + 32})
# i)
xmaxFahrenheit <- 9/5 * xmax + 32
temperaturesFahrenheit <- data.frame(xminFahrenheit = xminFahrenheit, xmaxFahrenheit = xmaxFahrenheit)
# j)
iMonFri <- data.frame(xminFahrenheit = xminFahrenheit[1:5], xmaxFahrenheit = xmaxFahrenheit[1:5])
iiMonFri <- data.frame(xminFahrenheit = xminFahrenheit[-(6:7)], xmaxFahrenheit = xmaxFahrenheit[-(6:7)])
|
d7b7ab490d8ca7696c59ad76e908ff434d58cfe2
|
6f4432677678937ade732bbbae9829a4113a5e96
|
/man/costfunct.Rd
|
6f8e06cc8e8e7b7ca7922331373e91689b37ad07
|
[
"MIT"
] |
permissive
|
tan92327/nempack2
|
5584aa3f23645c402c8d76cd94893bc99937b6e2
|
03e8d6d271e7a3d969f4f52829a7cd1690a0ae03
|
refs/heads/main
| 2023-01-18T16:29:11.435739
| 2020-11-25T01:04:22
| 2020-11-25T01:04:22
| 315,790,800
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 477
|
rd
|
costfunct.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/costfunct.R
\name{costfunct}
\alias{costfunct}
\title{Cost function implementing C2 in the binary segmentation article}
\usage{
costfunct(data.vec)
}
\arguments{
\item{data.vec}{Input data vector}
}
\value{
The sum of squares of the data vector
}
\description{
Cost function implementing C2 in the binary segmentation article
}
\examples{
test.vec = c(1,2,3,5,7,9,11)
sum.sq = costfunct(test.vec)
}
|
c03e40a511e09e9c97aed7fb457fda3b43891a9a
|
1004816de8f435d930167ec03e7196f3d033db1f
|
/Rpkg/R/KMeans.R
|
00ed6f2a996b87cede57e03bfd9f0acba50b054d
|
[
"Apache-2.0"
] |
permissive
|
zheng-da/FlashX
|
04e0eedb894f8504a51f0d88a398d766909b2ad5
|
3ddbd8e517d2141d6c2a4a5f712f6d8660bc25c6
|
refs/heads/release
| 2021-01-21T09:38:24.454071
| 2016-12-28T08:51:44
| 2016-12-28T08:51:44
| 19,077,386
| 22
| 11
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,878
|
r
|
KMeans.R
|
fm.KMeans <- function(data, K, debug=FALSE)
{
data <- fm.conv.layout(data, TRUE)
data <- fm.materialize(data)
n <- dim(data)[1]
m <- dim(data)[2]
agg.sum <- fm.create.agg.op(fm.bo.add, fm.bo.add, "sum")
agg.which.min <- fm.create.agg.op(fm.bo.which.min, NULL, "which.min")
cal.centers <- function(data, parts) {
centers1 <- fm.groupby(data, 2, parts, agg.sum)
centers1 <- as.matrix(centers1)
cnts <- fm.table(parts)
centers.idx <- as.vector(cnts$val) + 1
# Some centers may not have been initialized if no data points
# are assigned to them. I need to reset those data centers.
centers <- matrix(rep.int(0, length(centers1)), dim(centers1)[1],
dim(centers1)[2])
centers[centers.idx,] <- centers1[centers.idx,]
# Calculate the mean for each cluster, which is the cluster center
# for the next iteration.
sizes <- rep.int(1, dim(centers)[1])
sizes[centers.idx] <- as.vector(cnts$Freq)
centers <- diag(1/sizes) %*% centers
fm.as.matrix(centers)
}
parts <- fm.as.integer(floor(fm.runif(n, min=0, max=K)))
new.centers <- cal.centers(data, fm.as.factor(parts, K))
centers <- fm.matrix(fm.rep.int(0, K * m), K, m)
old.parts <- fm.rep.int(0, n)
iter <- 0
start.time <- Sys.time()
num.moves <- length(parts)
while (num.moves > 0) {
if (debug)
iter.start <- Sys.time()
centers <- new.centers
old.parts <- parts
m <- fm.inner.prod(data, t(centers), fm.bo.euclidean, fm.bo.add)
parts <- fm.as.integer(fm.agg.mat(m, 1, agg.which.min) - 1)
new.centers <- cal.centers(data, fm.as.factor(parts, K))
num.moves <- sum(old.parts != parts)
iter <- iter + 1
if (debug) {
iter.end <- Sys.time()
cat("iteration", iter, "takes", iter.end - iter.start,
"seconds and moves", num.moves, "data points\n")
}
}
end.time <- Sys.time()
cat("KMeans takes", iter , "iterations and", end.time - start.time, "seconds\n")
}
|
634e43aa1cbb068e783336d6e499acdc21bcac42
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/yuima/examples/ybook.Rd.R
|
e42c0d37805a4715a1ce224f476b5f37c5a5685c
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 138
|
r
|
ybook.Rd.R
|
library(yuima)
### Name: ybook
### Title: R code for the Yuima Book
### Aliases: ybook
### Keywords: misc
### ** Examples
ybook(1)
|
308d2abf70c5087b23c6c83aed0204501277bfde
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/SSN/examples/putSSNdata.frame.Rd.R
|
19d490b7647aa2dd36b701844e25a234b774174d
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 794
|
r
|
putSSNdata.frame.Rd.R
|
library(SSN)
### Name: putSSNdata.frame
### Title: putSSNdata.frame
### Aliases: putSSNdata.frame
### ** Examples
library(SSN)
#for examples, copy MiddleFork04.ssn directory to R's temporary directory
copyLSN2temp()
# NOT RUN
# Create a SpatialStreamNetork object that also contains prediction sites
#mf04 <- importSSN(paste0(tempdir(),'/MiddleFork04.ssn', o.write = TRUE))
#use mf04 SpatialStreamNetwork object, already created
data(mf04)
#for examples only, make sure mf04p has the correct path
#if you use importSSN(), path will be correct
mf04 <- updatePath(mf04, paste0(tempdir(),'/MiddleFork04.ssn'))
# Take out the data.frame, make a change and put it back
obs.df <- getSSNdata.frame(mf04)
obs.df$Year_cat <- as.factor(obs.df$SampleYear)
mf04 <- putSSNdata.frame(obs.df, mf04)
|
2a39597fa2b5ba81c44b1114052fdf90927e453b
|
e5b8abb99aa8d5e0d6f6679039a777585d982ca5
|
/R/rcbdCheck.R
|
7157bb4132cc29994ac45a3aba6a9938690c626e
|
[] |
no_license
|
Prof-ThiagoOliveira/planExp
|
88ae76a045bb2d59b2d45f7bbe77cc602cc9caf6
|
a65d2efca012eedfaddb56766ac7c5e03ac44c8b
|
refs/heads/master
| 2023-04-10T08:19:51.778213
| 2020-07-02T07:37:04
| 2020-07-02T07:37:04
| 273,422,811
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,273
|
r
|
rcbdCheck.R
|
#######################################################################
# #
# Package: planExp #
# #
# File: rcbdCheck.R #
# Contains: rcbdCheck function #
# #
# Written by Thiago de Paula Oliveira #
# copyright (c) 2020, Thiago P. Oliveira #
# #
# License: GNU General Public License version 2 (June, 1991) or later #
# #
#######################################################################
##' @title Internal Function to check the arguments
##' @description This is an internally called function to get an initial
##' check
##' @usage NULL
##' @author Thiago de Paula Oliveira,
##' \email{thiago.paula.oliveira@@usp.br}
##' @keywords internal
rcbdCheck <- function(treat, blocks) {
#---------------------------------------------------------------------
# Initial check
#---------------------------------------------------------------------
check.integer <- function(N){
!grepl("[^[:digit:]]", format(N, digits = 20, scientific = FALSE))
}
#---------------------------------------------------------------------
# Checking treatments
#---------------------------------------------------------------------
if (is.numeric(treat) && length(treat) == 1 && (treat <= 0 ||
check.integer(treat) == FALSE)) {
stop("'treat' must be coercible to positive integers", call.= FALSE)
}
#---------------------------------------------------------------------
# Checking blocks
#---------------------------------------------------------------------
if (is.numeric(blocks) && length(blocks) == 1 && (blocks <= 0 ||
check.integer(blocks) == FALSE)) {
stop("'blocks' must be coercible to positive integers",
call.= FALSE)
}
}
|
6962e27fc49295912eac4767db2a30cdd1ab29f7
|
c521edcb2e192a222407de84c82f649c431e4c4f
|
/affairs.R
|
69502d38a5ed600361e0ab933a8c47ace82a5f12
|
[] |
no_license
|
arunailani/DATA-SCIENCE-ASSIGNMENTS
|
5d7e2cb97701689116868b4eac1b643623be515c
|
e04517ba14ce690d038aa7ee0ac819a57c009b12
|
refs/heads/master
| 2022-09-12T04:49:19.735641
| 2020-05-15T13:11:18
| 2020-05-15T13:11:18
| 264,187,158
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 896
|
r
|
affairs.R
|
affairs = read.csv(file.choose())
View(affairs)
attach(affairs)
detach(affairs)
summary(affairs)
str(affairs)
factor(affairs$affairs)
#since the affairs is dependent column so making its value to 0 and 1 for building the confusion matrix
affairs$affairs = ifelse(affairs$affairs > 0, 1, 0)
#model building
model1 = glm(factor(affairs)~gender+age+yearsmarried+children+factor(religiousness)+factor(rating), family = "binomial", data = affairs)
summary(model1)
model2 = glm(affairs~gender+age+yearsmarried+children+religiousness+education+occupation+rating, family = "binomial", data = affairs)
summary(model2)
prob1 = predict(model1,type="response")
confusion1 = table(prob1>0.5,affairs$affairs)
confusion1
Accuracy<-sum(diag(confusion2)/sum(confusion2))
Accuracy
prob2 = predict(model2,type="response")
confusion2 = table(prob2>0.5,affairs$affairs)
confusion2
|
2007f4401a91ab868c6be39989ee96adcb605982
|
18df2d7c536789ad87d82d13145d46a6b262671f
|
/quake_plot_app.R
|
41c74c1403e76c5acc0c7d84c3a6bd6ff2e54b4e
|
[] |
no_license
|
putt-ad/quakes_shiny
|
ca96932f68a9ae96bbedfb22ac2b5c25a97df6f9
|
4e851a5710a667e260b3c7737d7492071f45d0ea
|
refs/heads/master
| 2020-08-31T21:10:04.284552
| 2019-11-01T14:33:09
| 2019-11-01T14:33:09
| 218,786,963
| 0
| 2
| null | 2019-11-01T14:33:10
| 2019-10-31T14:36:26
|
R
|
UTF-8
|
R
| false
| false
| 3,350
|
r
|
quake_plot_app.R
|
########
# shiny quakes app
# A. Putt
# GEOL590 | 2019.10.31
########
library(shiny)
library(tidyverse)
library(ggplot2)
#quakes dataset is a list of earth quake site location lat and long, station location, depth, magnitude (from richter scale)
# We'll limit the range of selectable carats to teh actual range of earthquake magnitudes
min.mag <- min(quakes$mag)
max.mag <- max(quakes$mag)
# Need a vector of axis variables as characters
axis_vars <- names(quakes)
# Create a character vector of those columns of of the quakes dataset are all viewed as logical
factor.indices <- vapply(quakes, is.factor, TRUE)
factor.columns <- axis_vars[factor.indices]
# Define UI for application that greates the page users interact with
ui <- fluidPage(
# add title to app
titlePanel("Earth Quake Grapher"),
# Sidebar with sliders placed into the side panel
sidebarLayout(
sidebarPanel(
# This is a range slider (i.e. there's a max and min). It is set that way by "value" (the starting value), which is a 2-element vector
sliderInput("magrange",
"Earth Quake Magnitude",
min = min.mag,
max = max.mag,
value = c(min.mag, max.mag)),
# Select x and y variables
selectInput(inputId = "xvar",
label = "X axis",
choices = axis_vars,
selected = "x"),
selectInput(inputId = "yvar",
label = "Y axis",
choices = axis_vars,
selected = "y"),
#will not run until you hit the go button. this displays the word
actionButton("go",
"Run",
icon = icon("globe-asia"))
),
# Show a plot of diamonds data frame. This output doesn't care what that plot is, only that it will be associated with output$diamonds_plot
mainPanel(
plotOutput("quake_plot")
)
)
)
# Define server logic required to draw a histogram
server <- function(input, output) {
# Filter diamonds based on carat - this doesn't strictly need to be in reactive,
# ...but maybe we want to expand the app later, in which case it'll be useful to have it in reactive()
filt_mag <- reactive({
quakes %>%
filter(mag >= min(input$magrange)) %>%
filter(mag <= max(input$magrange))
})
# Make the plot
# eventReactive listens for a change in state of input$go, and only runs the code when the state changes
# Note use of aes_string, since input$xvar returns a string, not a reference to the object ("carat" not carat)
p_quake <- eventReactive(input$go, {
ggplot(filt_mag(), aes_string(x = input$xvar, y = input$yvar, colour = "depth")) + # Note that you need () after filt_dia, since filt_dia() is a function to get the object you want, not the actual object
geom_point()
})
# Create diagnostic output window to show what kind of output the double slider creates
output$diagnostic <- renderText(
input$magrange
)
# Create a dynamic plot plot
# I moved the ggplot into its own reactive context.
# Note that creating a ggplot object is very fast - it is actually drawing the plot that is slow.
output$quake_plot <- renderPlot(
p_quake()
)
}
# Run the application
shinyApp(ui = ui, server = server)
|
831661474faad92a7a667cfc54f0b74b0ecdc2b3
|
59debac2c846d50901fbcf74d8487237cdf8d77f
|
/man/itcadd.Rd
|
3daf0d9f91716d144b79654347b0d96c5e1ec0ee
|
[] |
no_license
|
trooper197/tccox
|
bcc306c1b8e9d1fddad3d1fd48f4ae248957220a
|
c3da04d5e0a363671340cc75e03aca621eb2644e
|
refs/heads/master
| 2021-08-23T09:17:45.528914
| 2017-12-01T20:23:45
| 2017-12-01T20:23:45
| 112,348,548
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,226
|
rd
|
itcadd.Rd
|
\name{itcadd}
\alias{itcadd}
%- Also NEED an '\alias' for EACH other topic documented here.
\title{
Determines potential ITC intervals according to expected event criteria or as fixed
}
\description{
Either speciifies the ITC interval endpoints as fixed or else determines candidates based on expected event criteria
}
\usage{
itcadd(dataset, nmaxint, interval_width, min_exp_events, nitc_fixed, n_start_fixed,
n_stop_fixed, interval_stop_beginning)
}
%- maybe also 'usage' for other objects documented here.
\arguments{
\item{dataset}{
data.frame organized as expected by tc()
}
\item{nmaxint}{
maximum number of TC intervals allowed
}
\item{interval_width}{
width of the TC intervals
}
\item{min_exp_events}{
minimum number of events expected of subjects in each cell for determining ITC intervals
}
\item{nitc_fixed}{
indicator that potential ITC intervals are fixed
}
\item{n_start_fixed}{
number of fixed ITC starting intervals (only applicable if nitc_fixed=1)
}
\item{n_stop_fixed}{
number of fixed ITC stopping intervals (only applicable if nitc_fixed=1)
}
\item{interval_stop_beginning}{
smallest ITC stopping interval endpoint (only applicable if nitc_fixed=1)
}
}
\value{
\item{nitc_start }{number of ITC starting intervals}
\item{itc_start_endpoint }{vector containing the ITC starting interval endpoints}
\item{nitc_stop }{number of ITC stopping intervals}
\item{itc_stop_endpoint }{vector containing the ITC stopping interval endpoints}
}
\references{
Troendle, JF, Leifer, E, Zhang Z, Yang, S, and Tewes H (2017) How to Control for Unmeasured Confounding in an Observational Time-To-Event Study With Exposure Incidence Information: the Treatment Choice Cox Model. Statistics in Medicine 36: 3654-3669.
}
\author{
James F. Troendle
}
\examples{
##---- Should be DIRECTLY executable !! ----
##-- ==> Define data, use random,
##-- or do help(data=index) for the standard data sets.
## The function is currently defined as
function (x)
{
}
}
% Add one or more standard keywords, see file 'KEYWORDS' in the
% R documentation directory.
\keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS")
\keyword{ ~kwd2 }% __ONLY ONE__ keyword per line
|
5323335f47a2b3c9795470f04943d1e4c5dc7bae
|
2b9d185f6992e663a9e1eb4890c5e1a23e78e778
|
/docs2/lib/myown-1.0/R/scriptTest.R
|
7468809ba85b19d414d9ab1edf3e5845aaf45037
|
[
"MIT"
] |
permissive
|
Juju1OO1/usr_lobby
|
5930ae17b47fd85f936d61887d48b9d9aef34498
|
eb4f45f3bc0273bb68dc2e8dc75f92411ecb00e5
|
refs/heads/main
| 2023-06-05T06:56:19.789997
| 2021-06-23T12:18:40
| 2021-06-23T12:18:40
| 364,794,159
| 0
| 0
|
MIT
| 2021-06-23T12:18:41
| 2021-05-06T05:24:26
|
JavaScript
|
UTF-8
|
R
| false
| false
| 851
|
r
|
scriptTest.R
|
card_reveal <- function(){
tags$div(class="card",
tags$div(class="card-image waves-effect waves-block waves-light",
tags$img(class="activator", src="https://materializecss.github.io/materialize/images/office.jpg")
),
tags$div(class="card-content",
tags$span(class="card-title activator grey-text text-darken-4", "Card Title", tags$i(class="material-icons right","more_vert")),
tags$p( tags$a(href="#"," This is a link"))
),
tags$div(class="card-reveal",
tags$span(class="card-title grey-text text-darken-4", "Card Title", tags$i(class="material-icons right","close")),
tags$p( "Here is some more information about this product that is only revealed once clicked on.")
)
)
}
|
dd8f1b4cf5200b41012cf87465a762a9f8c7393f
|
1bb4a3b57f8de59e66a325a70d4b30c82b3da0db
|
/plotVI.R
|
0a02174e7e921dfa13903f0823848c1f04be119d
|
[] |
no_license
|
pegah-hafiz/CDSS-infertility
|
2b49fce95262e8eaa48411c0bce19df7b8387327
|
f9b29321f49006cafa945a28b48a91e58af92426
|
refs/heads/main
| 2023-08-30T18:53:16.685612
| 2021-11-06T15:10:37
| 2021-11-06T15:10:37
| 415,905,845
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 550
|
r
|
plotVI.R
|
plotVI <- function(button){
library(randomForest)
#set.seed(2)
filename <- "VI.pdf"
f.address <- ""
main.data<-read.csv(f.address, header=T)
ivf.model <- randomForest(result ~ ., data=main.data, ntree=500,
keep.forest=T, importance=TRUE,
na.action='na.omit')
pdf(filename)
imp <- importance(ivf.model, type=1, scale=T)
varImpPlot(ivf.model,type=2)
dev.off()
file.show(filename, title="Variable Importance for IVF")
}
|
238abe0fd2380451d5181ae359b23f262f8877b8
|
33a76f0df4c925f21d3d2b986c1181bd8a866c84
|
/Practice/NeuralPrac.R
|
7124135b1f1423de4d1ac130a69efda577f5c01d
|
[] |
no_license
|
sjngjang/SJ_MaC_P
|
6a555d1cce7ba4ef306238595a61539ce4af5a73
|
f7e1d1dc26ea70f062ed6d6319184ef9767d7bbf
|
refs/heads/master
| 2021-01-09T05:48:16.090882
| 2017-02-03T09:46:11
| 2017-02-03T09:46:11
| 80,811,282
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 926
|
r
|
NeuralPrac.R
|
getwd()
prob<- read.csv("Practice/problem.csv", header=T, stringsAsFactors = F)
head(prob)
#---------- 1ver to normalize
for (i in 1:30) {
#change variables to 0 to 1
prob[i]<-prob[i]*(1/5)
}
head(prob)
#------ 2ver to normalize
#normalization function
normalize<-function(x){
return ((x-min(x))/diff(range(x)))
}
#change the variables 'general, suicide, violence' --> 'yes, no'
prob$accident2<-with(prob, ifelse(accident=="suicide" | accident=="violence", 1,0))
head(prob)
#test the model with 'nnet'
install.packages("nnet")
library(nnet)
prob<-prob[-31]
m1 <-nnet(accident2 ~ ., data=prob, size=10)
r1<-predict(m1, prob)
head(r1)
cbind(prob$accident2, r1>0.5)
sum(as.numeric(r1>0.5) != prob$accident2)
#diff Pack
install.packages("neuralnet")
library(neuralnet)
xnam<-paste0("ans", 1:30)
fmla<-as.formula(paste("accident2~ ", paste(xnam, collapse="+")))
m2<-neuralnet(fmla, data=prob, hidden=10)
plot(m2)
|
b1aee03b9c7b6eea9fcb9c600f92419cb617675a
|
fbca0cb26d06e18dd5ff84233b6378633a4340fd
|
/R/scales.R
|
b44a151be42d30cb9b7b983e63633683dc7d673f
|
[] |
no_license
|
bvancil/paletti
|
894381b0d90b869e7278f905d61aca44ff3c4ae8
|
a557e7684e908135c1cc9b711164ce4a9c41afb6
|
refs/heads/master
| 2022-11-03T06:34:57.247058
| 2020-06-23T14:17:44
| 2020-06-23T14:17:44
| 274,422,324
| 0
| 0
| null | 2020-06-23T14:07:25
| 2020-06-23T14:07:24
| null |
UTF-8
|
R
| false
| false
| 6,780
|
r
|
scales.R
|
#' Create the _pal function for ramped colors
#'
#' This wil create a function, that makes the palette with ramped colours.
#'
#' @param hex_object Either a character vector with hex code, or a list that
#' contains the character vectors with hex codes. List elements should be
#' named.
#'
#' @return A function that will create the ramped colors from the color vector,
#' or of one of the vectors in the list. The function has the following
#' parameters:
#'
#' `palette`, a character that indicates which of the color vectors to use.
#'
#' `alpha`, the desired transparancy.
#'
#' `reverse`, if TRUE, the direction of the colours is reversed.
#'
#' @examples
#' # devtools::install_github("RopenScilabs/ochRe")
#' # devtools::install_github("EdwinTh/dutchmasters")
#'
#' # use get_pal on a single character vector
#' my_pal <- get_pal(c("#701B06", "#78A8D1", "#E3C78F"))
#' filled.contour(volcano,
#' color.palette = my_pal(),
#' asp=1)
#'
#' # or on a list with multiple vectors
#' ochRe_pal <- get_pal(ochRe::ochre_palettes)
#' dutchmasters_pal <- get_pal(dutchmasters::dutchmasters)
#'
#' filled.contour(volcano,
#' color.palette = ochRe_pal(),
#' asp=1)
#'
#' filled.contour(volcano,
#' color.palette = dutchmasters_pal("anatomy", reverse = TRUE),
#' asp=1)
#' @export
get_pal <- function(hex_object) {
if (is.list(hex_object)) {
check_valid_list(hex_object)
check_valid_color_list(hex_object)
f <- function(palette,
palette_list,
alpha = 1,
reverse = FALSE) {
pal <- palette_list[[palette]]
if (reverse){
pal <- rev(pal)
}
return(colorRampPalette(pal, alpha))
}
ret <- list(pal_func = f,
palette_list = hex_object)
class(ret) <- "pal_list"
ret
} else if (is.character(hex_object)) {
check_valid_color_vec(hex_object)
f <- function(alpha = 1,
reverse = FALSE) {
if (reverse){
hex_object <- rev(hex_object)
}
return(colorRampPalette(hex_object, alpha))
}
class(f) <- "pal_vec"
f
} else {
stop("hex_object should be either a list or a character vector",
call. = FALSE)
}
}
#' Create the scale_color_ and scale_fill_ functions
#'
#' This wil create the `scale_color_` and `scale_fill_` functions, to be applied
#' in `ggplot2`. It sets up the color palettes.
#'
#' @param pal_object The output of the `get_pal` function.
#' @param palette_list The list that contains the character vectors with color
#' codes. List elements should be named.
#'
#' @return A function that can be used to create color scale in an object of
#' class `ggplot2`. The function has the following parameters:
#'
#' `palette`, a character that indicates which of the color vectors to use.
#'
#' `alpha`, the desired transparancy.
#'
#' `reverse`, if TRUE, the direction of the colours is reversed.
#'
#' `discrete`, whether to use a discrete colour palette.
#'
#' `...`` additional arguments to pass to scale_color_gradientn
#'
#' @importFrom ggplot2 scale_colour_manual
#'
#' @examples
#' # devtools::install_github("RopenScilabs/ochRe")
#' # devtools::install_github("EdwinTh/dutchmasters")
#' library(ggplot2)
#'
#' ochRe_pal <- get_pal(ochRe::ochre_palettes)
#' scale_colour_ochRe <- get_scale_colour(ochRe::ochre_palettes,
#' ochRe_pal)
#'
#' dutchmasters_pal <- get_pal(dutchmasters::dutchmasters)
#' scale_fill_dutchmasters <- get_fill_colour(dutchmasters::dutchmasters,
#' dutchmasters_pal)
#'
#' ggplot(mtcars, aes(mpg, wt)) +
#' geom_point(aes(colour = factor(cyl)), size = 4) +
#' scale_colour_ochRe()
#' ggplot(mtcars, aes(mpg, wt)) +
#' geom_point(aes(colour = hp)) +
#' scale_colour_ochRe(palette = "lorikeet", discrete = FALSE)
#' ggplot(data = mpg) +
#' geom_point(mapping = aes(x = displ, y = hwy, color = class)) +
#' scale_colour_dutchmasters(palette="view_of_Delft")
#' ggplot(diamonds) + geom_bar(aes(x = cut, fill = clarity)) +
#' scale_fill_dutchmasters(palette = "anatomy")
#' @export
#'
#' @importFrom ggplot2 discrete_scale scale_color_gradientn
get_scale_color <- function(pal_object) {
if (inherits(pal_object, "pal_vec")) {
get_scale_vec(pal_object)
} else if (inherits(pal_object, "pal_list")) {
get_scale_list(pal_object)
} else {
stop("`pal_object` should be of class 'pal_vec' or 'pal_list'")
}
}
get_scale_vec <- function(pal_object,
scale_type = "colour") {
function(discrete = TRUE,
alpha = 1,
reverse = FALSE,
...) {
if (discrete) {
discrete_scale(scale_type,
"thank_you_ochRe_team",
palette = pal_object(alpha = alpha,
reverse = reverse))
} else {
func <- ifelse(scale_type == "colour",
scale_color_gradientn,
scale_fill_gradientn)
func(colours = pal_object(alpha = alpha,
reverse = reverse,
...)(256))
}
}
}
get_scale_list <- function(pal_object,
scale_type = "colour") {
palette_func <- pal_object$pal_func
palette_list <- pal_object$palette_list
function(palette = NULL,
discrete = TRUE,
alpha = 1,
reverse = FALSE,
...) {
if (is.null(palette)) palette <- names(palette_list)[1]
if (discrete) {
discrete_scale(scale_type,
"thank_you_ochRe_team",
palette = palette_func(palette = palette,
palette_list = palette_list,
alpha = alpha,
reverse = reverse))
} else {
func <- ifelse(scale_type == "colour",
scale_color_gradientn,
scale_fill_gradientn)
func(colours = palette_func(palette = palette,
palette_list = palette_list,
alpha = alpha,
reverse = reverse,
...)(256))
}
}
}
get_scale_colour <- get_scale_color
#' @rdname get_scale_color
#' @export
get_scale_fill <- function(pal_object) {
if (inherits(pal_object, "pal_vec")) {
get_scale_vec(pal_object, "fill")
} else if (inherits(pal_object, "pal_list")) {
get_scale_list(pal_object, "fill")
} else {
stop("`pal_object` should be of class 'pal_vec' or 'pal_list'")
}
}
|
92aea6149c8dc8b0e13a24a94aceb13bf52c4082
|
2a7e77565c33e6b5d92ce6702b4a5fd96f80d7d0
|
/fuzzedpackages/PReMiuM/man/heatDissMat.Rd
|
59655f5d11064eb409db68d03625aa5e8323f6d9
|
[] |
no_license
|
akhikolla/testpackages
|
62ccaeed866e2194652b65e7360987b3b20df7e7
|
01259c3543febc89955ea5b79f3a08d3afe57e95
|
refs/heads/master
| 2023-02-18T03:50:28.288006
| 2021-01-18T13:23:32
| 2021-01-18T13:23:32
| 329,981,898
| 7
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,670
|
rd
|
heatDissMat.Rd
|
\name{heatDissMat}
\alias{heatDissMat}
\title{Plot the heatmap of the dissimilarity matrix}
\description{Function to plot the heatmap of the dissimilarity matrix}
\usage{
heatDissMat(dissimObj, main=NULL, xlab=NULL, ylab=NULL)
}
\arguments{
\item{dissimObj}{An object of class dissimObj.}
\item{main}{The usual plot option, to be passed to the heatmap function.}
\item{ylab}{The usual plot option, to be passed to the heatmap function.}
\item{xlab}{The usual plot option, to be passed to the heatmap function.}
}
\value{
Plot of the heatmap of the dissimilary matrix. This functions uses the function 'heatmap' of package 'stats'. Note that this function has not been optimised for large datasets.
}
\section{Authors}{
Silvia Liverani, Department of Epidemiology and Biostatistics, Imperial College London and MRC Biostatistics Unit, Cambridge, UK
Maintainer: Silvia Liverani <liveranis@gmail.com>
}
\references{
Silvia Liverani, David I. Hastie, Lamiae Azizi, Michail Papathomas, Sylvia Richardson (2015). PReMiuM: An R Package for Profile Regression Mixture Models Using Dirichlet Processes. Journal of Statistical Software, 64(7), 1-30. URL http://www.jstatsoft.org/v64/i07/.
}
\examples{
\dontrun{
# generate simulated dataset
generateDataList <- clusSummaryBernoulliDiscreteSmall()
inputs <- generateSampleDataFile(generateDataList)
# run profile regression
runInfoObj<-profRegr(yModel=inputs$yModel, xModel=inputs$xModel,
nSweeps=10, nBurn=2000, data=inputs$inputData, output="output",
covNames=inputs$covNames,nClusInit=15)
# compute dissimilarity matrix
dissimObj<-calcDissimilarityMatrix(runInfoObj)
# plot heatmap
heatDissMat(dissimObj)
}
}
|
cf4a394db78efd80715f7def271f65c3f6adf059
|
28f0bd2591c656d5fd2b7cc71b0490d50873aae4
|
/man/jobRemove.Rd
|
dc8ef24a09611363977a17cb0cbb45942ff277df
|
[] |
no_license
|
JiaxiangBU/rstudioapi
|
6c05581a4fb03be25fd31d147a18387141da9784
|
e49222ab99dee37e7c7e33827f13dbea47c07693
|
refs/heads/master
| 2020-04-14T00:32:19.794396
| 2018-12-19T17:55:20
| 2018-12-19T17:55:20
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 276
|
rd
|
jobRemove.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/jobs.R
\name{jobRemove}
\alias{jobRemove}
\title{Remove a Job}
\usage{
jobRemove(job)
}
\arguments{
\item{job}{The ID of the job to remove.}
}
\description{
Remove a job from RStudio's Jobs pane.
}
|
4cd51b8505be5c7b96130a3e035198b0803fbbe4
|
7474309f742266ed7a7e120a1888e8f5e4f36aba
|
/project-2/KNN_R_Project.R
|
20a56905dab1b8d0809071e7c1b368cca5026262
|
[] |
no_license
|
hcmora/knn-classification-exercises
|
e9ad4854d495a44ffa3a401060732e73273620b1
|
0fc436fe4b3fdb3fd09d3a72609447e176852809
|
refs/heads/master
| 2020-04-27T14:52:32.000416
| 2019-03-11T21:49:54
| 2019-03-11T21:49:54
| 174,423,694
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,704
|
r
|
KNN_R_Project.R
|
setwd('R Studio/KNN Project')
project = read.csv('KNN_Project_Data',header = TRUE)
head(project,1)
# Still haven't worked out how to make the pairs plot differentiate on the target class, for some reason
# it only colors the first argument
# pairs(project, main = "Project Database", pch=21,bg = c("green3","blue")[unclass(project$TARGET.CLASS)])
# Workaround for colors
project.plot = project
project.plot[11] = project[11]+1
pairs(project.plot, main = "Project Database", pch=21,bg = c("blue","orange")[unclass(project.plot$TARGET.CLASS)])
# We proceed to scale the predictors
project2 = project[-11]
project2 = scale(project2)
set.seed(5)
train = sample(1:1000,700,replace = FALSE)
train.X = cbind(project2)[train,]
test.X = cbind(project2)[-train,]
train.TC = project$TARGET.CLASS[train]
test.TC = project$TARGET.CLASS[-train]
# KNN with k = 1
library(class)
knn.pred = knn(train.X,test.X,train.TC,k=1)
table(test.TC, knn.pred)
mean(test.TC == knn.pred)
# Now we iterate to determine the best k value
error_rate = rep(0,40)
k_range = 1:40
for (i in k_range) {
knn.pred = knn(train.X,test.X,train.TC,k=i)
error_rate[i] = mean(test.TC != knn.pred)
}
plot(error_rate~k_range, pch=21,bg = c("red"), xlab="K", ylab="Error Rate",main="Error Rate vs K")
lines(k_range,error_rate,lty='dashed', col="blue")
grid (NULL,NULL, lty = 6, col = "gray")
# We check that, in this case, for k = 13 we get the best results
# Special note: the results differ from the Python version, because of the randomness when selecting
# the data to train the KNN model!
knn.pred = knn(train.X,test.X,train.TC,k=13)
table(test.TC, knn.pred)
mean(test.TC == knn.pred)
|
88d3246ef8be3fb496373a75c414548642ddc9ac
|
4885d9e77e11d5b63d0b1bfaf08f33934b60d770
|
/R/plot.R
|
6178e7c68039552d14c40016e16c1ded92b6460d
|
[
"Apache-2.0"
] |
permissive
|
r-spatial/sftime
|
7db4f44fe8f425df631867b7b8bd35a902f0015b
|
423f39e1e9d7225392e2b3241e8b7926b7b74f15
|
refs/heads/main
| 2023-07-06T03:45:35.136467
| 2023-06-28T08:34:00
| 2023-06-28T08:34:00
| 212,058,235
| 45
| 4
|
Apache-2.0
| 2023-06-28T07:32:40
| 2019-10-01T09:29:51
|
R
|
UTF-8
|
R
| false
| false
| 2,594
|
r
|
plot.R
|
#' Plots an \code{sftime} object
#'
#' \code{plot.sftime}
#'
#' @aliases plot
#' @param x The \code{\link[=st_sftime]{sftime}} object to be plotted.
#' @param y A character value; The variable name to be plotted; if missing, the
#' first variable is plotted.
#' @param ... Additional arguments; Passed on to \code{\link[sf:plot]{plot.sf}}.
#' @param number A numeric value; The number of panels to be plotted, cannot be
#' larger than the number of timestamps; ignored when \code{tcuts} is provided.
#' @param tcuts predefined temporal ranges assigned to each map; if missing,
#' will be determined as equal spans according to \code{number}.
#'
#' @importFrom graphics plot
#'
#' @return Returns \code{NULL} and creates as side effect a plot for \code{x}.
#' @examples
#' set.seed(123)
#' coords <- matrix(runif(100), ncol = 2)
#' g <- st_sfc(lapply(1:50, function(i) st_point(coords[i, ]) ))
#' sft <- st_sftime(a = 1:50, g, time = as.POSIXct("2020-09-01 00:00:00") + 0:49 * 3600 * 6)
#'
#' plot(sft)
#'
#' @export
plot.sftime <- function(x, y, ..., number = 6, tcuts) {
if (missing(y))
y <- colnames(x)[[1]]
stopifnot(y %in% colnames(x))
ts <- st_time(x)
if(any(is.na(ts))) {
message("[INFO] there are ", sum(is.na(ts)), " `NA` values in the active time column of `x`. These rows are dropped.")
}
x <- x[!is.na(ts), ]
ts <- st_time(x)
if (missing(tcuts)) {
ts_ord <- order(ts)
ts_fac <- tryCatch(as.factor(ts[ts_ord]), error = function(e) e)
if (inherits(ts_fac, "error")) {
ts_fac <-
factor(
as.character(ts[ts_ord]),
levels = unique(as.character(ts[ts_ord])),
ordered = TRUE
)
}
ts_nlv <- length(levels(ts_fac))
if (number > ts_nlv) {
number <- ts_nlv
message("[INFO] Fewer time stamps in the data than asked for; argument 'number' set to: ", ts_nlv)
}
tcuts <- seq(1, ts_nlv, length.out = number + 1)
timeclass <- findInterval(as.numeric(ts_fac), tcuts, rightmost.closed = TRUE)
} else {
number <- length(tcuts) - 1
timeclass <- findInterval(ts, tcuts, rightmost.closed = TRUE)
}
d_ord <- as.data.frame(x)[order(ts), y, drop = FALSE]
data <- d_ord
if (number > 1) {
for (i in 2:number) {
data <- cbind(data, d_ord[, 1])
data[timeclass != i, i] = NA
if (i == number)
data[timeclass != 1, 1] <- NA # deal with first time class
}
}
names(data) <- ts_fac[!duplicated(timeclass)]
d <- sf::st_sf(data, geometry = sf::st_geometry(x))
plot(d, ...)
NULL
}
|
56764126274ca741a7faa9f3ec21347fc5317968
|
9a7a84f823afcece5ff704b5778581f4e8d3d189
|
/Individual_Functions/create_sim_repped.R
|
51cdc30a38218c1be8f9d9d59755d2b43864df87
|
[] |
no_license
|
multach87/FreqInBayes0.1
|
9068f877cc8cec049734f4d32c2f635749d3625d
|
f5155280616e60c87db955e5ed34ccfc3c6bda7e
|
refs/heads/master
| 2020-09-25T02:59:05.538917
| 2020-01-04T17:31:01
| 2020-01-04T17:31:01
| 225,902,525
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 830
|
r
|
create_sim_repped.R
|
create_sim_repped <- function(sim.structure , num.conds = nrow(sim.structure) ,
num.pars , num.iters) {
sim.structure.repped <- as.data.frame(matrix(ncol = (num.pars + 2) ,
nrow = (num.conds * num.iters)))
colnames(sim.structure.repped) <- c("beta", "n" , "eta.x" , "eta.y" ,
"seed" , "iter")
for(i in 1:nrow(sim.structure)) {
sim.structure.repped[((num.iters * (i - 1)) + 1) : (num.iters * i) , (1 : num.pars)] <-
purrr::map_dfr(seq_len(num.iters) , ~sim.structure[i , ])
}
sim.structure.repped[ , "seed"] <- rnorm((num.conds * num.iters))
sim.structure.repped[ , "iter"] <- rep(1:5)
return(sim.structure.repped)
}
|
ca658a83918d27533fc1d582f80c3e39491bbe18
|
7a1fab30064fe5debe1499295c270453610396db
|
/R/vars.R
|
22c7bf790e0e22601f10af660f320044cfbd431f
|
[
"Apache-2.0"
] |
permissive
|
dkgaraujo/brazilianbanks
|
195ef670ebecb2484d3dffdd3d4635678dd2dd72
|
708dd27ec7ab060965fcfb6c8802c0f02cccdd38
|
refs/heads/main
| 2023-05-23T12:31:42.738173
| 2023-02-23T17:12:19
| 2023-02-23T17:12:19
| 373,839,928
| 7
| 3
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,749
|
r
|
vars.R
|
# Copyright 2022 Douglas Kiarelly Godoy de Araujo
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#' @import utils
utils::globalVariables(
c(
".", "Quarter", "c0", "ni",
"var_names", "lid", "c3",
"info_id", "value", "where",
"var_codes", "a", "d", "td",
"ty", "f", "sel", "report",
"trel", "name_trel", "ifd",
"where"
)
)
ifdata_url_base <- "https://www3.bcb.gov.br/ifdata/rest/arquivos?nomeArquivo="
capital_requirements <- tibble::tribble(
# source: https://www.bcb.gov.br/pre/normativos/res/2013/pdf/res_4193_v1_O.pdf
~min_date, ~max_date, ~CET1_req, ~Tier1_req, ~Total_Capital_req, ~CCoB, ~max_CCyB, ~effectiveCCyB, ~DSIB_req, ~LevRatio_req,
as.Date("2013-10-01"), as.Date("2014-12-31"), 0.045, 0.055, 0.11, 0.00625, 0.00625, 0, 0, NA,
as.Date("2015-01-01"), as.Date("2015-12-31"), 0.045, 0.06, 0.11, 0.00625, 0.00625, 0, 0, NA,
as.Date("2016-01-01"), as.Date("2016-12-31"), 0.045, 0.06, 0.09875, 0.00625, 0.00625, 0, 0, NA,
as.Date("2017-01-01"), as.Date("2017-12-31"), 0.045, 0.06, 0.0925, 0.0125, 0.0125, 0, 0.0025, NA,
as.Date("2018-01-01"), as.Date("2018-12-31"), 0.045, 0.06, 0.08625, 0.01875, 0.01875, 0, 0.005, NA,
as.Date("2019-01-01"), NA, 0.045, 0.06, 0.08, 0.025, 0.025, 0, 0.01, NA
)
|
a8d7879fabc54848fa215816f2ae10aff764d8b4
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/GD/examples/riskmean.Rd.R
|
80a145a6459499c31548f2525eee5d7cc59a66bf
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 351
|
r
|
riskmean.Rd.R
|
library(GD)
### Name: riskmean
### Title: Geographical detectors: risk means in risk detector.
### Aliases: riskmean print.riskmean plot.riskmean
### ** Examples
rm1 <- riskmean(NDVIchange ~ Climatezone + Mining, data = ndvi_40)
rm1
plot(rm1)
## No test:
data <- ndvi_40[,1:3]
rm2 <- riskmean(NDVIchange ~ ., data = data)
rm2
## End(No test)
|
0987c4bded8d7f6736e3256e865e67896ce1264e
|
e43e0d9b80fb33919ec9b253b5ebdb0dda7e12f8
|
/R/prepare_data.R
|
2de593aac57d314f2f724d5b796bf345238b2165
|
[] |
no_license
|
jmoss13148/sandpr
|
ce1034e864e73155fd41f3193eb5f213f0e66ade
|
6e4bd6ca262e13f2fcf76133d353040056f4313e
|
refs/heads/master
| 2021-08-23T21:22:20.999081
| 2017-12-06T16:09:41
| 2017-12-06T16:09:41
| 110,973,284
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 3,243
|
r
|
prepare_data.R
|
##' @description Prepare data for neural network model
##'
##' @import dplyr
##' @import tidyr
##'
##' @return a dataframe with the relavent columns
##'
##' @export
prepare_data = function(x, y) {
## Remove variables we don't want
x = x %>% select(-X, -For.Year)
## Coerce to correct classes
x$Ticker.Symbol = as.character(x$Ticker.Symbol)
y$symbol = as.character(y$symbol)
## Already have "%Y-%m-%d" in fundamentals data
y$date = as.Date(as.character(y$date), format = "%m/%d/%y")
## Convert date to date class
x$Period.Ending = as.Date(as.character(x$Period.Ending), format = "%Y-%m-%d")
## Remove information for anything after 2015
x = x %>% filter(Period.Ending <= "2015-12-31" & Period.Ending >= "2012-01-01")
## Only work with companies with price data that spans end of 2012 to end of 2016
## Get list of tickers that we want
companies = unique(y$symbol)
companies_keep = character()
lower_dates = as.Date(c("2013-12-31", "2013-12-30", "2013-12-29"), format = "%Y-%m-%d")
upper_dates = as.Date(c("2016-12-31", "2016-12-30", "2016-12-29"), format = "%Y-%m-%d")
## Loop through all companies
for(i in 1:length(companies)) {
this_company = filter(y, symbol == companies[i])
this_company_dates = this_company$date
## If we have the lower and upper range of our
if(sum(lower_dates %in% this_company_dates) > 0 && sum(upper_dates %in% this_company_dates) > 0) {
companies_keep = c(companies_keep, companies[i])
}
}
## Subset for the companies we want
x = x %>% filter(Ticker.Symbol %in% companies_keep)
## Filter out all companies that do not contain fundamentals data
## for the date of "2015-12-31"
new_companies_keep = character()
possible_companies = unique(x$Ticker.Symbol)
for(i in 1:length(possible_companies)) {
this_company_new = filter(x, Ticker.Symbol == possible_companies[i]) %>%
select(Period.Ending)
## Check if we have date
if(as.Date("2015-12-31") %in% this_company_new$Period.Ending) {
new_companies_keep = c(new_companies_keep, possible_companies[i])
}
}
## Subset for the companies we want
x = x %>% filter(Ticker.Symbol %in% new_companies_keep)
## Deal with missing values
## Set to average of dataset
columns = c("Cash.Ratio",
"Current.Ratio",
"Quick.Ratio",
"Earnings.Per.Share",
"Estimated.Shares.Outstanding")
for(i in 1:length(columns)) {
x[[columns[i]]][is.na(x[[columns[i]]])] = mean(x[[columns[i]]], na.rm = TRUE)
}
## We should also normalize all fundamentals variables
## Get column names of numeric variables
numeric_names = character()
for(i in 1:(length(colnames(x)))) {
if(is.numeric(x[, i])) {
numeric_names = c(numeric_names, colnames(x[i]))
}
}
## Scale the numeric columns
x <- x %>% mutate_each_(funs(scale(.) %>% as.vector), vars=numeric_names)
return(list("x" = x, "y" = y))
}
|
9dbf8c62591b05b666899ea37312feaa823a4f78
|
bb6e0f698c434945a622b5b605a025ae92dc7729
|
/ntc-presentation/Helper Basic Plots.R
|
73ba0e7ff43382d811a5c2198beb29282fe964b5
|
[
"CC0-1.0"
] |
permissive
|
mvparrot/vis-serve
|
933f2f211b817b00177cd7b53b89f324ed882c0e
|
f2c307837d4d89054073864dedc8b352613c7920
|
refs/heads/master
| 2021-01-17T10:19:33.786055
| 2016-09-29T11:09:43
| 2016-09-29T11:09:43
| 56,582,980
| 2
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,035
|
r
|
Helper Basic Plots.R
|
#================================================
#--- Basic Plots
#--- Alwin Wang
#================================================
#--- Packages Required
# require(ggplot2)
# require(plotly)
#--- Outline of the court
court_trace <- data.frame(x = c(-11.89, -11.89, 0, 0, 0, 11.89, 11.89, -11.89, -11.89, 11.89, 11.89, -11.89, -6.4, -6.4, 6.4, 6.4, 6.4, -6.4),
y = c(5.49, -5.49, -5.49, 5.49, -5.49, -5.49, 5.49, 5.49, 4.115, 4.115, -4.115, -4.115, -4.115, 4.115, 4.115, -4.115, 0, 0),
z = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0))
net_trace <- data.frame(x = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
y = c(-5.49,-5.49, -6.4, -6.4, -5.49, 0, 5.49, 6.4, 6.4, 5.49, 5.49),
z = c(1.07, 0, 0, 1.07, 1.07, 0.914, 1.07, 1.07, 0, 0, 1.07))
service_trace <- data.frame(x = c(-8, 0, 0, 0, -6.4, -6.4, 0, -6.4, -6.4, -6.4, -6.4, -6.4, 0, 0, -8),
y = c(-5.49, -5.49, -4.115, 4.115, 4.115, 0, 0, 0, -4.115, -5.49, 5.49, -4.115, -4.115, 5.49, 5.49),
z = c(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ,0 ,0))
axis_labels <- data.frame(x.break = c(-21.89:-11.89, -6.4, 0, 6.4, 11.89),
x.label = c("-10m","","","","","-5m","","","","",
"Baseline","Service Line","Net","Service Line","Baseline"),
y.break = c(-5.49,-4.115,0,4.115,5.49),
y.label = c("Doubles", "Singles","Centre","Singles","Doubles"),
z.break = c(0,0.992,2,3,4),
z.label = c("Ground", "Net", "2m", "3m", "4m"))
#--- ggplot2 global options
theme_set(theme_bw())
# Court background theme
theme_court <- theme_bw()
theme_court$line <- element_blank()
theme_court$axis.text <- element_blank()
theme_court$axis.title <- element_blank()
#--- Top down court view
court_topdown <- ggplot() +
labs(x = "x direction", y = "y direction") +
scale_x_continuous(breaks = axis_labels$x.break,
labels = axis_labels$x.label) +
scale_y_continuous(breaks = axis_labels$y.break,
labels = axis_labels$y.label) +
geom_path(data = court_trace, aes(x = x, y = y), color = 'darkgrey', size = 1, alpha = 0.75) +
geom_path(data = net_trace, aes(x = x, y = y), color = 'darkgrey', size = 1, lineend = 'round') +
coord_fixed() +
theme(axis.title = element_blank())
#--- Service box
court_service <- ggplot() +
labs(x = "x direction", y = "y direction") +
scale_x_continuous(breaks = axis_labels$x.break,
labels = axis_labels$x.label) +
scale_y_continuous(breaks = axis_labels$y.break,
labels = axis_labels$y.label) +
geom_path(data = service_trace, aes(x = x, y = y), color = 'darkgrey', size = 1, alpha = 0.75) +
geom_path(data = net_trace, aes(x = x, y = y), color = 'darkgrey', size = 1, lineend = 'round') +
coord_fixed() +
theme(axis.title = element_blank())
#--- Side on court view
court_sideon <- ggplot() +
labs(x = "x direction", y = "z direction") +
scale_x_continuous(breaks = axis_labels$x.break,
labels = axis_labels$x.label) +
scale_y_continuous(breaks = axis_labels$z.break,
labels = axis_labels$z.label) +
geom_path(data = court_trace, aes(x = x, y = z), color = 'darkgrey', size = 1) +
geom_path(data = net_trace, aes(x = x, y = z), color = 'darkgrey', size=1, lineend = 'round') +
coord_fixed() +
theme(axis.title = element_blank())
#--- Behind court view
court_behind <- ggplot() +
scale_x_continuous(breaks = axis_labels$y.break,
labels = axis_labels$y.label) +
scale_y_continuous(breaks = axis_labels$z.break,
labels = axis_labels$z.label) +
labs(x = "y direction", y = "z direction") +
geom_path(data = court_trace, aes(x = y, y = z), color = 'darkgrey', size = 1) +
geom_path(data = net_trace, aes(x = y, y = z), color = 'darkgrey', size = 1) +
coord_fixed() +
theme(axis.title = element_blank())
#--- 3D court view
court_3d <- plot_ly(x=x, y=y, z=z, data=court_trace, type="scatter3d", mode="lines") %>%
add_trace(x=x, y=y, z=z, data=net_trace, type="scatter3d", mode="lines") %>%
layout(scene=list(aspectmode="data"))
#--- Parallel Coordinates
parcoordlabel<-function (x, col = 1, lty = 1, lblcol="grey",...)
{
df <- as.data.frame(x)
pr <- lapply(df, pretty)
rx <- lapply(pr, range, na.rm = TRUE)
x <- mapply(function(x,r) {
(x-r[1])/(r[2]-r[1])
},
df, rx)
matplot(1L:ncol(x), t(x), type = "l", col = col, lty = lty,
xlab = "", ylab = "", axes = FALSE, ...)
axis(1, at = 1L:ncol(x), labels = colnames(x))
for (i in 1L:ncol(x)) {
lines(c(i, i), c(0, 1), col = "grey70")
text(c(i, i), seq(0,1,length.out=length(pr[[i]])), labels = pr[[i]],
xpd = NA, col=lblcol)
}
invisible()
}
|
c79783ec16450625689f2a03b2de104e8896205a
|
3d96222bc3bb07f94c074794aab6a7a79e0fdb40
|
/man/kh_clr.Rd
|
c5fd9773e6828f5f206a71cc71e94f8db50df3b1
|
[] |
no_license
|
k-hench/kh3d
|
2868a9e2d367515cada65d809e61da1b34f21fbe
|
570611316ff1495e5114525f8b2b6eac944d3ba9
|
refs/heads/master
| 2020-06-15T10:03:53.306677
| 2019-07-05T12:48:21
| 2019-07-05T12:48:21
| 195,268,389
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 365
|
rd
|
kh_clr.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/kh3d_basic.R
\docType{data}
\name{kh_clr}
\alias{kh_clr}
\title{kh color vector}
\format{An object of class \code{character} of length 2.}
\usage{
kh_clr
}
\description{
\code{kh_clr} is a combination of colors I like.
}
\details{
A vector containg blue and orange.
}
\keyword{datasets}
|
9720bf58461a130648b50f0cc0a07eddb4101962
|
4f769a3a6bb0a81892754a91618881d3c4ded208
|
/Week4/best.R
|
6f5c2701c3cb2aece867a936013d95b0e266a82c
|
[] |
no_license
|
frmont/CourseraRProgramming
|
24a5ea1cbacaaec818888cb1cd52920759b19f5e
|
0975be0f1c4c52b357fe3684550571218638eeda
|
refs/heads/master
| 2022-11-25T04:55:29.835236
| 2020-08-02T16:26:19
| 2020-08-02T16:26:19
| 262,094,350
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,638
|
r
|
best.R
|
setwd("~/Desktop/GoogleDrive/R_Programming/CourseraRProgramming/Week4/rprog_data_ProgAssignment3-data")
best <-function(state, outcome) {
data <- read.csv("outcome-of-care-measures.csv", colClasses = "character") ## Read outcome data
data1 <- as.data.frame(cbind(data[, 2], # extracting particular outcomes: hospital
data[, 7], # state
data[, 11], # heart attack
data[, 17], # heart failure
data[, 23]), # pneumonia
stringsAsFactors = FALSE)
colnames(data1) <- c("hospital", "state", "heart attack", "heart failure", "pneumonia") #rename columns
if (!state %in% data1[,'state']){ # Check state is valid
stop('invalid state')
}
else if (!outcome %in% c('hospital','state','heart attack',"heart failure", 'pneumonia')){ # Check outcome is valid
stop('invalid outcome')
}
else{
wh<-which(data1[,'state']==state) # identify rows from the required state
st<-data1[wh,] # store that data in st
nume<-as.numeric(st[,outcome]) # subsects st based on the outcome (function input)
minval<-min(nume , na.rm=TRUE) # minimum value (lowest 30-day death) in numn is stored
result<-st[,'hospital'][which(nume==minval)] ## Return hospital name in that state with lowest 30-day death
output<-result[order(result)]
}
return(output)
}
|
265f31e7c6ef853f0b10291304337bdb86324d30
|
43c75ab90ac98e0a15b354d06660d8ebbc5b9175
|
/Archive/windowplots.tmp.R
|
6e443b5ac257f3ab1b483474d2e8f0819c93b9ff
|
[] |
no_license
|
pnandak/eRNA_project
|
c061a00f25c05289d3a7846598025cc9275cc28a
|
7ca1f033050444e777a7b135cdcc0d51d3ab7d28
|
refs/heads/master
| 2020-04-03T13:47:49.910471
| 2014-03-12T10:08:57
| 2014-03-12T10:08:57
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 17,757
|
r
|
windowplots.tmp.R
|
setwd('~/Harnett/TSS_CAGE_myfolder/')
rm(list=ls())
source('src/tss_cage_functions.R')
library(ggplot2)
#Load ROCR library for roc plots
library(ROCR,lib.loc='~/Harnett/R')
#define the number of rpgc and pvalue thresholds we'll try
rpgc.step.num<-100
pval.step.num<-20
#load the window table as 'z'
load('windowtable.robject.R')
load(file.accession.df)#load our data on the cage lines
load(file.cage.tag.rles)#load the actual cage data
cg<-cage.tag.rles
#load the regions we've defined in the makeregions script
regs=list(
pos=import(con='analysis/positive_8008.bed',asRangedData=F,seqinfo=si),
neg=import(con='analysis/negative_8008.bed',asRangedData=F,seqinfo=si),
int=import(con='analysis/random.intergenic.bed',asRangedData=F,seqinfo=si),
cad3pos=import(con='analysis/cad3.pos.bed',asRangedData=F,seqinfo=si),
cad3neg=import(con='analysis/cad3.neg.bed',asRangedData=F,seqinfo=si)
)
#backup z for when we subset
z.bak<-z
#create matrices for recording parameter thresholds
rpgc.wind.matrix<-matrix(NA,nrow=rpgc.step.num,ncol=length(unique(z.bak$windowsize)))
pval.wind.matrix<-matrix(NA,nrow=pval.step.num,ncol=length(unique(z.bak$windowsize)))
#window sizes as columns
colnames(rpgc.wind.matrix)<-as.character( unique(z.bak$windowsize))
colnames(pval.wind.matrix)<-as.character( unique(z.bak$windowsize))
#duplicate these matrices for accuracies instead of aucs
rpgc.wind.matrix.acc<-rpgc.wind.matrix
pval.wind.matrix.acc<-pval.wind.matrix
#lists for when we record out matrix for results
cagecountmatlist<-cagecountmatlist.r<-list()
stopifnot(is.data.frame(z))
stopifnot(is.numeric(unique(z.bak$windowsize)))
stopifnot((unique(z.bak$acc))%in%names(cage.tag.rles))
# data in matrix format ---------------------------------------------------
for (w in unique(z.bak$windowsize)){
cagecountmatlist[[w]]<-sapply(unique(z.bak$region),function(reg){
sapply(unique(z.bak$acc),function(acc){
z.bak[ z.bak$windowsize==w & z.bak$region ,]
})
})
cagecountmatlist.r[[w]]<-sapply(unique(z.bak$region),function(reg){
sapply(unique(z.bak$acc),function(acc){
z.bak[ z.bak$windowsize==w & z.bak$region ,]$val.libnorm
})
})
}
message('Done with data matrices')
# first kind of fdr - total reads -----------------------------------------
for (w in unique(z.bak$windowsize)){
#select relevant matrix
cagecountmats<-cagecountmatlist[[w]]
cagecountmats.r<-cagecountmatlist.r[[w]]
stopifnot(!NA%in%cagecountmats.r )
#calculate the summed
sums<-sapply(simplify=F,cagecountmats,function(reg){rowSums(reg)})#total in all lines
sums.r<-sapply(simplify=F,cagecountmats.r,function(reg){rowSums(reg)})#normalized
pdf(paste0('analysis/summ.roc.',w,'.pdf'))
#8008 positive plus negative set
#par(mfrow=c(2,2))
scores<-stack(list(pos=sums[[1]],neg=sums[[2]]))
sumpred<-prediction(scores$values,scores$ind)
perf<-performance(sumpred,'tpr','fpr')
plot(perf,colorize=T)
aucstring=paste0('AUC = ',round(performance(sumpred,'auc')@y.values[[1]],4))
text(x=0.90,y=0,labels=aucstring)
title('ROC curve for Summed Unnormalized Tags,8008 set',sub=paste0('windowsize ',w))
# plot(performance(sumpred,'prec','rec'),colorize=T)
# plot(performance(sumpred,'cal',window.size=5))
# performance(sumpred,'auc')@y.values[[1]]
abline(coef=c(0,1),lty=3)
#8008 positive vs the intergenic set
#par(mfrow=c(2,2))
scores<-stack(list(pos=sums[[1]],neg=sums[[3]]))
sumpred<-prediction(scores$values,scores$ind)
perf<-performance(sumpred,'tpr','fpr')
plot(perf,colorize=T)
aucstring=paste0('AUC = ',round(performance(sumpred,'auc')@y.values[[1]],4))
text(x=0.90,y=0,labels=aucstring)
title('ROC curve for Summed Unnormalized Tags,crms vs random inactive',sub=paste0('windowsize ',w))
# plot(performance(sumpred,'prec','rec'),colorize=T)
# plot(performance(sumpred,'cal',window.size=5))
# performance(sumpred,'auc')@y.values[[1]]
abline(coef=c(0,1),lty=3)
#crm set with low H3k27ac
scores<-stack(list(pos=sums[[1]],neg=sums[[2]][ regs[[2]]$score==0 ]))
sumpred<-prediction(scores$values,scores$ind)
perf<-performance(sumpred,'tpr','fpr')
plot(perf,colorize=T)
aucstring=paste0('AUC = ',round(performance(sumpred,'auc')@y.values[[1]],4))
text(x=0.90,y=0,labels=aucstring)
title('ROC curve for Summed Tags,8008 low k27ac negative set',sub=paste0('windowsize ',w))
abline(coef=c(0,1),lty=3)
#cad3
scores<-stack(list(pos=sums[[4]],neg=sums[[5]]))
sumpred<-prediction(scores$values,scores$ind)
perf<-performance(sumpred,'tpr','fpr')
plot(perf,colorize=T)
aucstring=paste0('AUC = ',round(performance(sumpred,'auc')@y.values[[1]],4))
text(x=0.90,y=0,labels=aucstring)
title('ROC curve for Summed Tags, CAD3 set',sub=paste0('windowsize ',w))
abline(coef=c(0,1),lty=3)
#crms with normalized reads
scores<-stack(list(pos=sums.r[[1]],neg=sums.r[[2]]))
sumpred<-prediction(scores$values,scores$ind)
perf<-performance(sumpred,'tpr','fpr')
plot(perf,colorize=T)
aucstring=paste0('AUC = ',round(performance(sumpred,'auc')@y.values[[1]],4))
text(x=0.90,y=0,labels=aucstring)
title('ROC curve for Summed RPGC Tags,8008 set',sub=paste0('windowsize ',w))
abline(coef=c(0,1),lty=3)
#crms vs intergenic
scores<-stack(list(pos=sums.r[[1]],neg=sums.r[[3]]))
sumpred<-prediction(scores$values,scores$ind)
perf<-performance(sumpred,'tpr','fpr')
plot(perf,colorize=T)
aucstring=paste0('AUC = ',round(performance(sumpred,'auc')@y.values[[1]],4))
text(x=0.90,y=0,labels=aucstring)
title('ROC curve for Summed RPGC Tags,crms vs random inactive',sub=paste0('windowsize ',w))
abline(coef=c(0,1),lty=3)
#cad
scores<-stack(list(pos=sums.r[[4]],neg=sums.r[[5]]))
sumpred<-prediction(scores$values,scores$ind)
perf<-performance(sumpred,'tpr','fpr')
plot(perf,colorize=T)
aucstring=paste0('AUC = ',round(performance(sumpred,'auc')@y.values[[1]],4))
text(x=0.90,y=0,labels=aucstring)
title('ROC curve for Summed Normalized Tags,CAD3 set',sub=paste0('windowsize ',w))
abline(coef=c(0,1),lty=3)
dev.off()
#let's also try using our 'very negative' set
}
message('Done with Summed tag plots')
# Second type of FDR - lines above flat cutoff ----------------------------
message('Done with flat cutoff plots')
for (w in unique(z.bak$windowsize)){
cagecountmats<-cagecountmatlist[[w]]
cagecountmats.r<-cagecountmatlist.r[[w]]
# Second kind of fdr - number of lines above fixed cutoff -----------------
#define a range to cut off our peaks at
highnum<-20#arbitrarily set a hight limit on our rpgc cutoff
all<-do.call(rbind,cagecountmats)#get the scores in all regions
all.r<-do.call(rbind,cagecountmats.r)#get the scores in all regions
highnum<-all.r[which(all[,1]>20)[1]]
lowvals<-sort((unique(as.vector(all.r[,1]))))#for only the biggest library
lowvals<-lowvals[lowvals < highnum]
lowvals<-lowvals[seq(from=1,to=length(lowvals),length.out=rpgc.step.num)]
lnrange<-1:ncol(cagecountmats.r[[1]])
#first go through our cutoffs and give each crm a linescore
cutoff.linenum<-sapply(simplify=F,as.character((lowvals)),function(val){#now pick an RPGC value
#now go through our normalized libraries
lnmats<-sapply(simplify=F,cagecountmats.r,function(reg){
tmp<-apply(reg,1,function(r){#for each locations
sum(r>as.numeric(val))#count the lines with tagnum over our cutoff
})
})
})
#we now have a matrix with four rows and columns for each cutoff - this can give us an fdr with confidence
#or a roc
#this function takes in a matrix with four columns true positive, false positive, total true, total false.
#it outputs three columns - an fdr, and the 95% jeffreys credible intervals for these.
aucs<-sapply(names(cutoff.linenum),function(cutoff){
scores<-stack(list(pos=cutoff.linenum[[cutoff]][[1]],neg=cutoff.linenum[[cutoff]][[2]]))
sumpred<-prediction(scores$values,scores$ind)
performance(sumpred,'auc')@y.values[[1]]
})
lowcuts<-names(cutoff.linenum)
rownames(rpgc.wind.matrix)<-lowcuts
rpgc.wind.matrix[,as.character(w)]<-aucs
#now do t
#logical vector describing if the cutoff gives at least a nonzero for each region.
cutlog<-sapply(lowcuts,function(bestcut){ all( sapply(cutoff.linenum[[bestcut]][c(1,2,4,5)],function(reg){any(reg>0)}))})
#only show cutoffs with some non zeros
cuts<-lowcuts[cutlog]
#we need to exlclude
cuts<-cuts[seq(length(cuts)/3,length(cuts),length.out=3)]
best<-lowcuts[which.max(aucs)]
#let's do four different cutoffs
pdf(paste0('analysis/rpgc.roc.',w,'.pdf'))
for (bestcut in c(best,cuts)){
#crm set
cutlist<- cutoff.linenum[[bestcut]]
scores<-stack(list(pos=cutlist[[1]],neg=cutlist[[2]]))
lcpred<-prediction(scores$values,scores$ind)
perf<-performance(lcpred,'tpr','fpr')
plot(perf,colorize=T)
title(paste0('ROC curve for Number of lines above cutoff ',round(as.numeric(bestcut),4),' Tags,crm set'),sub=paste0('windowsize ',w))
abline(coef=c(0,1),lty=3)
aucstring=paste0('AUC = ',round(performance(lcpred,'auc')@y.values[[1]],4))
text(x=0.90,y=0,labels=aucstring)
plot(performance(lcpred,'prec','rec'),colorize=T)
title(paste0('Precision/Recall curve for Number of lines above cutoff\n',round(as.numeric(bestcut),4),' Tags,crm set'),sub=paste0('windowsize ',w))
#crm set with low H3k27ac
scores<-stack(list(pos=cutlist[[1]],neg=cutlist[[2]][ regs[[2]]$score==0 ]))
sumpred<-prediction(scores$values,scores$ind)
perf<-performance(sumpred,'tpr','fpr')
plot(perf,colorize=T)
aucstring=paste0('AUC = ',round(performance(sumpred,'auc')@y.values[[1]],4))
text(x=0.90,y=0,labels=aucstring)
title('ROC curve for Number of lines above cutoff,8008 low k27ac negative set',sub=paste0('windowsize ',w))
abline(coef=c(0,1),lty=3)
#crms vs the randoms
cutlist<- cutoff.linenum[[bestcut]]
scores<-stack(list(pos=cutlist[[1]],neg=cutlist[[3]]))
lcpred<-prediction(scores$values,scores$ind)
perf<-performance(lcpred,'tpr','fpr')
plot(perf,colorize=T)
title(paste0('ROC curve for Number of lines above cutoff ',round(as.numeric(bestcut),4),' Tags,crms vs random'),sub=paste0('windowsize ',w))
abline(coef=c(0,1),lty=3)
aucstring=paste0('AUC = ',round(performance(lcpred,'auc')@y.values[[1]],4))
text(x=0.90,y=0,labels=aucstring)
plot(performance(lcpred,'prec','rec'),colorize=T)
title(paste0('Precision/Recall curve for Number of lines above cutoff\n',round(as.numeric(bestcut),4),' Tags,crms vs random '),sub=paste0('windowsize ',w))
#cad
scores<-stack(list(pos=cutlist[[4]],neg=cutlist[[5]]))
lcpred<-prediction(scores$values,scores$ind)
perf<-performance(lcpred,'tpr','fpr')
plot(perf,colorize=T)
title(paste0('ROC curve for Number of lines above cutoff ',round(as.numeric(bestcut),4),' Tags,CAD set'),sub=paste0('windowsize ',w))
abline(coef=c(0,1),lty=3)
aucstring=paste0('AUC = ',round(performance(lcpred,'auc')@y.values[[1]],4))
text(x=0.90,y=0,labels=aucstring)
plot(performance(lcpred,'prec','rec'),colorize=T)
title(paste0('Precision/Recall curve for Number of lines above cutoff\n',bestcut,' Tags,CAD set'),sub=paste0('windowsize ',w))
}
dev.off()
}
message('Done with flat cutoff plots')
# #third kind of fdr - number of lines above fixed pvalue cutoff ----------
#first calculate the
inter=import('analysis/intergenic.inactive.regions.bed')
inter=coverage(inter)>0
#first we'll define lambda for each line
#we can do this easily enough by just defining a
lambdas<-sapply(names(cg),function(acc){
zeros<-sum(as.numeric(sum(cg[[acc]][[1]][inter]==0))+ as.numeric(sum(cg[[acc]][[1]][inter]==0)))
ones<-sum(as.numeric(sum(cg[[acc]][[1]][inter]==1))+ as.numeric(sum(cg[[acc]][[2]][inter]==1)))
ones/(zeros+ones)#The poisson's density will be mostly concentrated on zero and 1
})
v=accession.df$library.size
names(v)<-accession.df$accession
qplot(v[names(lambdas)],lambdas,xlab='Library Size',main='Calculated Poisson Rate Vs. Library Size')
v=accession.df$intergenic.lib.size
names(v)<-accession.df$accession
qplot(v[names(lambdas)],lambdas,xlab='Intergenic Library Size',main='Calculated Poisson Rate Vs. Intergenic Library Size')
for (w in unique(z.bak$windowsize)){
cagecountmats<-cagecountmatlist[[w]]
cagecountmats.r<-cagecountmatlist.r[[w]]
pvalrange<-1/(10^(1:pval.step.num))
#Now we can go through each line and call the windows in a binary way
#first go through our cutoffs and give each crm a linescore
cutoff.linenum<-sapply(simplify=F,as.character(pvalrange),function(pval){#for each pval cutoff
#derive the cutoffs for each library
cutoffs<-sapply(lambdas,function(lam){ qpois(1-as.numeric(pval),lambda=lam*w) })
cutoffs<-cutoffs[colnames(cagecountmats[[1]])]
#now go through our normalized libraries
lnmats<-sapply(simplify=F,cagecountmats,function(reg){
tmp<-apply(reg,1,function(r){#for each locations
sum(r>cutoffs)#count the lines with tagnum over our cutoff
})
})
})
######Or using ROCR
aucs<-sapply(names(cutoff.linenum),function(cutoff){
scores<-stack(list(pos=cutoff.linenum[[cutoff]][[1]],neg=cutoff.linenum[[cutoff]][[2]]))
pred<-prediction(scores$values,scores$ind)
performance(pred,'auc')@y.values[[1]]
})
accs<-sapply(names(cutoff.linenum),function(cutoff){
scores<-stack(list(pos=cutoff.linenum[[cutoff]][[1]],neg=cutoff.linenum[[cutoff]][[2]]))
pred<-prediction(scores$values,scores$ind)
performance(pred,'acc')@y.values[[1]]
})
auc.df<-as.data.frame(aucs)
bestcut<-rownames(auc.df)[which.max(auc.df$aucs)]
bestcut
lowcuts<-names(cutoff.linenum)
rownames(pval.wind.matrix)<-lowcuts
pval.wind.matrix[,as.character(w)]<-aucs
#pval.wind.matrix.acc[,as.character(w)]<-accs
#logical vector describing if the cutoff gives at least a nonzero for each region.
cutlog<-sapply(lowcuts,function(bestcut){ all( sapply(cutoff.linenum[[bestcut]][c(1,2,4,5)],function(reg){any(reg>0)}))})
#only show cutoffs with some non zeros
cuts<-lowcuts[cutlog]
#we need to exlclude
cuts<-cuts[seq(length(cuts)/3,length(cuts),length.out=3)]
best<-lowcuts[which.max(auc.df$aucs)]
#let's do four different cutoffs
pdf(paste0('analysis/pois_pval.roc.',w,'.pdf'))
#let's do four different cutoffs
for (bestcut in c(best,cuts)){
# par(mfrow=c(2,2))
#crms
scores<-stack(list(pos=cutoff.linenum[[bestcut]][[1]],neg=cutoff.linenum[[bestcut]][[2]]))
lcpred<-prediction(scores$values,scores$ind)
perf<-performance(lcpred,'tpr','fpr')
plot(perf,colorize=T)
title(paste0('ROC curve for Number of lines above Pval cutoff ',round(as.numeric(bestcut),4),' Tags,crm set'),sub=paste0('windowsize ',w))
aucstring=paste0('AUC = ',round(performance(lcpred,'auc')@y.values[[1]],4))
text(x=0.90,y=0,labels=aucstring)
abline(coef=c(0,1),lty=3)
plot(performance(lcpred,'prec','rec'),colorize=T)
title(paste0('Precision/Recall curve for Number of lines above Pval cutoff\n',round(as.numeric(bestcut),4),' Tags,crm set'),sub=paste0('windowsize ',w))
#crms
scores<-stack(list(pos=cutoff.linenum[[bestcut]][[1]],neg=cutoff.linenum[[bestcut]][[2]][regs[[2]]$score==1]))
lcpred<-prediction(scores$values,scores$ind)
perf<-performance(lcpred,'tpr','fpr')
plot(perf,colorize=T)
title('ROC curve for Number of lines above Pval cutoff,8008 low k27ac negative set',sub=paste0('windowsize ',w))
aucstring=paste0('AUC = ',round(performance(lcpred,'auc')@y.values[[1]],4))
text(x=0.90,y=0,labels=aucstring)
abline(coef=c(0,1),lty=3)
plot(performance(lcpred,'prec','rec'),colorize=T)
title(paste0('Precision/Recall curve for Number of lines above Pval cutoff\n',round(as.numeric(bestcut),4),' Tags,crm set'),sub=paste0('windowsize ',w))
#crms vs random
scores<-stack(list(pos=cutoff.linenum[[bestcut]][[1]],neg=cutoff.linenum[[bestcut]][[3]]))
lcpred<-prediction(scores$values,scores$ind)
perf<-performance(lcpred,'tpr','fpr')
plot(perf,colorize=T)
title(paste0('ROC curve for Number of lines above Pval cutoff ',round(as.numeric(bestcut),4),' Tags,crm vs random'),sub=paste0('windowsize ',w))
aucstring=paste0('AUC = ',round(performance(lcpred,'auc')@y.values[[1]],4))
text(x=0.90,y=0,labels=aucstring)
abline(coef=c(0,1),lty=3)
plot(performance(lcpred,'prec','rec'),colorize=T)
title(paste0('Precision/Recall curve for Number of lines above Pval cutoff\n',round(as.numeric(bestcut),4),' Tags,crm vs random'),sub=paste0('windowsize ',w))
#cad
scores<-stack(list(pos=cutoff.linenum[[bestcut]][[4]],neg=cutoff.linenum[[bestcut]][[5]]))
lcpred<-prediction(scores$values,scores$ind)
perf<-performance(lcpred,'tpr','fpr')
plot(perf,colorize=T)
title(paste0('ROC curve for Number of lines above Pval cutoff ',round(as.numeric(bestcut),4),' Tags,CAD set'))
aucstring=paste0('AUC = ',round(performance(lcpred,'auc')@y.values[[1]],4))
text(x=0.90,y=0,labels=aucstring)
abline(coef=c(0,1),lty=3)
plot(performance(lcpred,'prec','rec'),colorize=T)
title(paste0('Precision/Recall curve for Number of lines above Pval cutoff\n',bestcut,' Tags,CAD set'),sub=paste0('windowsize ',w))
}
dev.off()
}
write.table(rpgc.wind.matrix,'analysis/rpgc.window.mat.txt')
write.table(pval.wind.matrix,'analysis/pval.window.mat.txt')
dev.off()
save.image('tmp.image.R')
# experimenting with negative binomail distributions ----------------------
|
fe7b1533ca9f8a57f72f959c306146a954b41ef5
|
3224a5f179537503f9c7244ac5c09837aa3eb2cc
|
/man/minnaert.Rd
|
b2476a9bcd51728950afef4080bcdbfd6a4e92b0
|
[] |
no_license
|
cran/landsat
|
b9510fc49610a5124691c3b2f223f590a7c563ec
|
b05d50fe6995d1ee05895fe4dbb5a28facff2c4c
|
refs/heads/master
| 2023-08-31T07:33:21.957052
| 2023-08-24T22:20:06
| 2023-08-24T23:30:43
| 17,696,975
| 9
| 4
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,786
|
rd
|
minnaert.Rd
|
\name{minnaert}
\alias{minnaert}
\title{
Whole-image and pixel-based Minnaert topographic correction of remote sensing data.
}
\description{
Adds several modified Minnaert corrections to the capabilities of topocorr().
}
\usage{
minnaert(x, slope, aspect, sunelev, sunazimuth, na.value = NA, GRASS.aspect=FALSE,
IL.epsilon=0.000001, slopeclass = c(1, 5, 10, 15, 20, 25, 30, 45), coverclass)
}
\arguments{
\item{x}{
Image to be corrected, in matrix, data frame, or SpatialGridDataFrame format.
}
\item{slope}{
Slope image of same size and resolution as x.
}
\item{aspect}{
Aspect image of same size and resolution as x.
}
\item{sunelev}{
Sun elevation in degrees.
}
\item{sunazimuth}{
Sun azimuth in degrees.
}
\item{na.value}{
Value to use for missing data.
}
\item{GRASS.aspect}{
Whether aspect is measured according to GRASS defaults (counterclockwise from east) or is measured clockwise from north. If GRASS.aspect=TRUE, aspect is converted to clockwise from north before analysis.
}
\item{IL.epsilon}{
If IL == 0 (Illumination), some methods will give a topographically-corrected value of Inf due to division by zero. If desired, adding a small increment to zero values eliminates this.
}
\item{slopeclass}{
The classes into which the slope will be divided before calculating k separately for each class.
}
\item{coverclass}{
If present, TRUE/FALSE vector indicating which pixels to use when calculating k. This allows k to be determined separately for different cover classes.
}
}
\details{
Calculates the Minnaert k coefficients for the whole image and for the individual slope classes.
}
\value{
\item{allcoef }{
The Minnaert k for the entire image. This is the value used in topocorr() (though the latter may have been truncated).
}
\item{classcoef }{
A data frame containing the slope class midpoints, number of pixels per class, and k for that class (for the desired cover class, if specified).
}
\item{xout }{
A topographically-corrected image in the same format as x.
}
\item{xout }{
A topographically-corrected image in the same format as x.
}
}
\references{
Lu, D., Ge, H., He, S., Xu, A., Zhou, G., and Du, H. 2008. Pixel-based Minnaert correction method for reducing topographic effects on a Landsat 7 ETM+ image. Photogrammetric Engineering and Remote Sensing 74:1343-1350.
}
\author{
Sarah Goslee
}
\seealso{
\code{\link{topocorr}}
}
\examples{
# require slope and aspect for topographic correction
data(dem)
dem.slopeasp <- slopeasp(dem)
# use cosine method of topographic correction
data(july4)
july4.minpix <- minnaert(july4, dem.slopeasp$slope, dem.slopeasp$aspect,
sunelev=61.4, sunazimuth=125.8, slopeclass=c(1, 5, 10, 15, 50))
july4.minpix$classcoef # all coefficients
}
\keyword{ spatial }
|
b1bb28cd0e63480b1dd39e27a8bc4634482815d4
|
11b2940a026c615b307452a7106bdc9f7866868e
|
/R/plot.R
|
f8a9e0e4a0c618eee72f827640ac54551c0b1e66
|
[
"MIT"
] |
permissive
|
sahirbhatnagar/ggmix
|
6cc770baf21d5711961ac65f0b76338dc4a18325
|
eb1c8a71ddc8f7e450dfbab68e7c562eac0ed487
|
refs/heads/master
| 2021-06-04T04:57:24.322156
| 2021-04-14T00:13:54
| 2021-04-14T00:13:54
| 90,279,895
| 12
| 12
|
NOASSERTION
| 2021-04-14T00:13:55
| 2017-05-04T15:32:36
|
HTML
|
UTF-8
|
R
| false
| false
| 9,087
|
r
|
plot.R
|
#' Plot the Generalised Information Criteria curve produced by \code{gic}
#'
#' @description Plots the Generalised Information Criteria curve, as a function
#' of the lambda values used
#' @param x fitted linear mixed model object of class \code{ggmix_gic} from the
#' \code{\link{gic}} function
#' @param sign.lambda Either plot against log(lambda) (default) or its negative
#' if sign.lambda=-1
#' @param lambda.min the value of lambda which minimizes the gic
#' @param type \code{gic} returns a plot of the GIC vs. log(lambda).
#' \code{QQranef} return a qqplot of the random effects. \code{QQresid}
#' returns a qqplot of the residuals which is \eqn{y - X\beta - b_i} where b_i
#' is the subject specific random effect. \code{predicted} returns a plot of
#' the predicted response (\eqn{X \beta} + b_i) vs. the observed response,
#' where b_i is the subject specific random effect. \code{Tukey-Anscombe}
#' returns a plot of the residuals vs. fitted values (\eqn{X \beta})
#' @param s Value of the penalty parameter \code{lambda} at which predictions
#' are required. Default is the value \code{s="lambda.min"}. If \code{s} is
#' numeric, it is taken as the value of \code{lambda} to be used. Must be a
#' single value of the penalty parameter \code{lambda} at which coefficients
#' will be extracted via the \code{coef} method for objects of class
#' \code{ggmix_gic}. If more than one is supplied, only the first one will be
#' used.
#' @param newy the response variable that was provided to \code{ggmix}. this is
#' only required for \code{type="QQresis"}, \code{type="Tukey-Anscombe"} and
#' \code{type="predicted"}
#' @param newx matrix of values for \code{x} at which predictions are to be
#' made. Do not include the intercept. this is only required for
#' \code{type="QQresis"}, \code{type="Tukey-Anscombe"} and
#' \code{type="predicted"}
#' @param ... Other graphical parameters to plot
#' @return plot depends on the type selected
#' @details A plot is produced, and nothing is returned.
#' @seealso \code{\link{gic}}
#' @examples
#' data("admixed")
#' fit <- ggmix(x = admixed$xtrain,
#' y = admixed$ytrain,
#' kinship = admixed$kin_train)
#' hdbic <- gic(fit)
#'
#' # plot solution path
#' plot(fit)
#'
#' # plot HDBIC curve as a function of lambda
#' plot(hdbic)
#' @export
plot.ggmix_gic <- function(x, ..., sign.lambda = 1,
type = c("gic", "QQranef", "QQresid", "predicted", "Tukey-Anscombe"),
s = "lambda.min", newy, newx) {
type <- match.arg(type, several.ok = FALSE)
if (length(s) > 1) {
s <- s[[1]]
warning("More than 1 s value provided. Only first element will be used for the estimated coefficients.")
}
if (is.numeric(s)) {
lambda <- s
} else
if (is.character(s)) {
s <- match.arg(s)
lambda <- x[[s]]
}
else {
stop("Invalid form for s")
}
if (type == "gic") {
plotGIC(
x = x,
sign.lambda = sign.lambda,
lambda.min = lambda, ...
)
}
if (type == "QQranef") {
stats::qqnorm(ranef(x, s = lambda), main = sprintf("QQ-Plot of the random effects at lambda = %.2f", lambda))
stats::qqline(ranef(x, s = lambda), col = "red")
}
if (type == "QQresid") {
if (missing(newy) | missing(newx))
stop("newy and newx must be provided when type='QQresid'")
resids <- newy -
stats::predict(x, s = lambda, newx = newx) -
ranef(x, s = lambda)
stats::qqnorm(resids, main = sprintf("QQ-Plot of the residuals at lambda = %.2f", lambda))
stats::qqline(resids, col = "red")
}
if (type == "predicted") {
if (missing(newy) | missing(newx))
stop("newy and newx must be provided when type='QQresid'")
preds <- stats::predict(x, s = lambda, newx = newx) +
ranef(x, s = lambda)
graphics::plot(preds, drop(newy),
xlab = "predicted response (XB + b_i)", ylab = "observed response",
main = strwrap(sprintf("Observed vs. Predicted response\n
corr(observed,predicted)^2 = %g", stats::cor(preds, drop(newy))^2))
)
graphics::abline(a = 0, b = 1, col = "red")
}
if (type == "Tukey-Anscombe") {
if (missing(newy) | missing(newx))
stop("newy and newx must be provided when type='QQresid'")
resids <- newy -
stats::predict(x, s = lambda, newx = newx) -
ranef(x, s = lambda)
fitted <- stats::predict(x, s = lambda, newx = newx)
graphics::plot(fitted, resids,
main = "Tukey-Anscombe Plot",
xlab = "fitted values (XB)", ylab = "residuals"
)
graphics::abline(h = 0, col = "red")
}
}
#' @rdname plot.ggmix_gic
plotGIC <- function(x, sign.lambda, lambda.min, ...) {
object <- x
xlab <- "log(Lambda)"
if (sign.lambda < 0) xlab <- paste("-", xlab, sep = "")
plot.args <- list(
x = sign.lambda * log(drop(object[["lambda"]])),
y = drop(object[["gic"]]),
ylim = range(drop(object[["gic"]])),
xlab = xlab,
ylab = "GIC", type = "n"
)
new.args <- list(...)
if (length(new.args)) plot.args[names(new.args)] <- new.args
do.call("plot", plot.args)
graphics::points(sign.lambda * log(drop(object[["lambda"]])),
drop(object[["gic"]]),
pch = 20, col = "red"
)
graphics::axis(
side = 3, at = sign.lambda * log(drop(object[["lambda"]])),
labels = paste(drop(object[["nzero"]])), tick = FALSE, line = 0
)
graphics::abline(v = sign.lambda * log(lambda.min), lty = 3)
}
#' @title Plot Method for \code{ggmix_fit} object
#' @description Produces a coefficient profile plot of the coefficient paths for
#' a fitted \code{ggmix_fit} object.
#' @param x a \code{ggmix_fit} object
#' @param xvar What is on the X-axis. "norm" plots against the L1-norm of the
#' coefficients, "lambda" against the log-lambda sequence, and "dev" against
#' the percent deviance explained.
#' @param label If TRUE, label the curves with variable sequence numbers.
#' @param sign.lambda Either plot against log(lambda) (default) or its negative
#' if sign.lambda=-1
#' @param ... other graphical parameters passed to \code{plot}
#' @param beta fixed effects estimates
#' @param norm l1 norm of fixed effect estimates. if missing, (default) this
#' function will calculate it
#' @param lambda sequence of tuning parameters
#' @param df number of non-zero fixed + random effects
#' @param dev percent deviance
#' @param xlab x-axis label
#' @param ylab y-axis label
#' @details A coefficient profile plot is produced
#' @return A plot is produced and nothing is returned
#' @export
plot.ggmix_fit <- function(x, ...,
xvar = c("norm", "lambda", "dev"),
label = FALSE, sign.lambda = 1) {
xvar <- match.arg(xvar)
plotCoef(x[["beta"]],
lambda = drop(x[["result"]][, "Lambda"]),
df = drop(x[["result"]][,"Df"]), dev = drop(x[["result"]][,"%Dev"]),
label = label, xvar = xvar, ...)
}
#' @rdname plot.ggmix_fit
plotCoef <- function(beta, norm, lambda, df, dev, label = FALSE,
xvar = c("norm", "lambda", "dev"),
xlab = iname, ylab = "Coefficients", ...) {
## beta should be in "dgCMatrix" format
### bystep = FALSE means which variables were ever nonzero
### bystep = TRUE means which variables are nonzero for each step
which <- nonzeroCoef(beta, bystep = FALSE)
nwhich <- length(which)
switch(nwhich + 1, # we add one to make switch work
"0" = {
warning("No plot produced since all coefficients zero")
return()
},
"1" = warning("1 or less nonzero coefficients; glmnet plot is not meaningful")
)
beta <- as.matrix(beta[which, , drop = FALSE])
xvar <- match.arg(xvar)
switch(xvar,
"norm" = {
index <- if (missing(norm)) apply(abs(beta), 2, sum) else norm
# index=apply(abs(beta),2,sum)
iname <- "L1 Norm"
approx.f <- 1
},
"lambda" = {
index <- log(lambda)
iname <- "Log Lambda"
approx.f <- 0
},
"dev" = {
index <- dev
iname <- "Fraction Deviance Explained"
approx.f <- 1
}
)
dotlist <- list(...)
type <- dotlist$type
if (is.null(type)) {
graphics::matplot(index, t(beta), lty = 1, xlab = xlab, ylab = ylab, type = "l", ...)
} else {
graphics::matplot(index, t(beta), lty = 1, xlab = xlab, ylab = ylab, ...)
}
atdf <- pretty(index)
### compute df by interpolating to df at next smaller lambda
### thanks to Yunyang Qian
prettydf <- stats::approx(x = index, y = df, xout = atdf, rule = 2, method = "constant", f = approx.f)$y
# prettydf=ceiling(approx(x=index,y=df,xout=atdf,rule=2)$y)
graphics::axis(3, at = atdf, labels = prettydf, tcl = NA)
if (label) {
nnz <- length(which)
xpos <- max(index)
pos <- 4
if (xvar == "lambda") {
xpos <- min(index)
pos <- 2
}
xpos <- rep(xpos, nnz)
ypos <- beta[, ncol(beta)]
graphics::text(xpos, ypos, paste(which), cex = .5, pos = pos)
}
}
|
53c6cd5c86c90234761471213e8d06556985d594
|
032a64be429d21fa4bbd04bfae163d7236122406
|
/Analyzing Baseball Data With R/Book - First Edition/Chapter 2 - Introduction to R/ch2_exercise5.R
|
dcbc450e333c722319c5d6ff6ab4e500e087d5f0
|
[] |
no_license
|
connormoffatt/Baseball
|
5be37b7e4af62c14d5d25f5a86a9bbd589691d6f
|
83a414091dc1be322e076df087cbb725e1ff6a19
|
refs/heads/master
| 2020-04-12T03:28:42.937403
| 2019-01-06T09:23:10
| 2019-01-06T09:23:10
| 161,848,190
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,743
|
r
|
ch2_exercise5.R
|
rm(list=ls())
# (a)
# Read the Lahman "pitching.csv" data file into R into a dataframe Pitching
chapter_path = "C:/Users/conno/Documents/GitHub/Baseball/Analyzing Baseball Data With R/Book - First Edition/Chapter 2 - Introduction to R"
setwd(chapter_path)
Pitching <- read.csv("pitching.csv")
# (b)
# The following function computes the comulative strikeouts, cumulative walks,
# mid career year, and total innings pitched (in terms of outs) for a pitcher
# whose season statistics are stored in the datafrme d
stats <- function(d){
c.SO <- sum(d$SO, na.rn=TRUE)
c.BB <- sum(d$BB, na.rn=TRUE)
c.IPouts <- sum(d$IPouts, na.rn=TRUE)
c.midYear <- median(d$yearID, na.rn=TRUE)
data.frame(SO=c.SO, BB=c.BB, IPouts=c.IPouts, midYear=c.midYear)
}
# Using the funciton ddply together with the function stats, find the career
# statistics for all pitchers in the pitching dataset. Call this new data
# frame career.pitching
career.pitching <- ddply(Pitching, .(playerID), stats)
# (c)
# Use the merge function to merge the Pitching and career.pitching data frames
Pitching <- merge(Pitching, career.pitching, by="playerID")
# (d)
# Use the subset function to construct a new data frame career.10000
# consisting of data for only those pitchers with at least 10,000 career
# IPouts
career.10000 <- subset(career.pitching, career.pitching$IPouts >= 10000)
# (e)
# For the pitchers with at least 10,000 career IPouts, construct a scatterplot
# of mid career year and ratio of strikeouts to walks. Comment on the general
# pattern in this scatterplot
career.10000$SO.BB.Ratio <- career.10000$SO / career.10000$BB
plot(career.10000$midYear, career.10000$SO.BB.Ratio)
# The strikeout to walk ratio increases over time as Mid Year increases
|
826a76d566c609e97a063151be68d77b818f2e42
|
ff9118e3811dca7f1dd06007a5c96c32d73a68b9
|
/test2.R
|
0b984aea18141a55f1c89d62c6787fb84f504574
|
[] |
no_license
|
JINSUKJEONG/rstudio-p
|
9b3decc9b47b6eb549c5af5c485151f0041787f1
|
7da2f32f6ea3708702eeafb0ccc150a6d189a550
|
refs/heads/master
| 2023-07-02T10:28:42.302228
| 2021-08-12T13:00:44
| 2021-08-12T13:00:44
| 394,989,349
| 0
| 0
| null | null | null | null |
UHC
|
R
| false
| false
| 317
|
r
|
test2.R
|
print("hello")
# about probability
# https://statkclee.github.io/r-algorithm/r-probability-exercise.html
# pnorm function. calculation of probability
pnorm(1.9, mean=1.7, sd = 0.1)
1-pnorm(1.6, mean = 1.7, sd=0.1)
# 난수 함수 rnorm
# 확률밀도함수 dnorm
# 누적분포함수 pnorm
# 분위수함수 qnorm
|
c00ce6fcd6b544978bb135e261c0073a28988470
|
ecf284f4e4ab63fb7b4949e31ed698dfdb1cbad1
|
/R/plan.R
|
bcb09b36aa2a3ab217e8660ed2276d0502fbc57e
|
[] |
no_license
|
jasonvii/perpignan
|
fe96267f3de0ecb0494c249d6baed15bc9975ad4
|
b0b91b5894685a922bf76b192af396931dede97d
|
refs/heads/master
| 2020-04-25T10:07:45.178537
| 2019-02-26T11:48:01
| 2019-02-26T11:48:01
| 172,698,563
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 566
|
r
|
plan.R
|
plan <- drake::drake_plan(
data = read.csv('data/gapminder-FiveYearData.csv', stringsAsFactors = FALSE),
data_1982 = data[data$year == 1982, ],
model = lme4::lmer(log(lifeExp) ~ log(gdpPercap) + (1 + log(gdpPercap) | year), data = data),
out = coef(model),
folder = dir.create('output'),
figure1 = makeFigure1(data_1982, output_filename = 'output/figure1.pdf', main = '1982'),
figure2 = makeFigure1(data, output_filename = 'output/figure2.pdf', main = 'whole time series'),
destroy = "ha ha ha" #gfkhgvhjdgdf
)
|
04d0e55bb344f2d3c7117dd2c76b20d12a46e2d0
|
afdab8f2dbb6d68e0c4bf475efb5b1aed4b9dca3
|
/fs.boundary_full.R
|
76ca340bf0c49cc7fb5c8e5e685521ab63ed6d20
|
[] |
no_license
|
EdDonlon/Geospatial-Functional-Data-Analysis
|
1404d66ad5fa76eca8c34847ae2970bec4134783
|
a6701acf504c7ede13b81c19c41938ceac50682e
|
refs/heads/master
| 2021-09-21T00:57:45.209964
| 2018-08-18T09:48:26
| 2018-08-18T09:48:26
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,820
|
r
|
fs.boundary_full.R
|
fs.boundary_full<- function (r0 = 0.1, r = 0.5, l = 3, n.theta = 20)
{
rr <- r + (r - r0)
theta <- seq(pi, pi/2, length = n.theta)
x <- rr * cos(theta)
y <- rr * sin(theta)
theta <- seq(pi/2, -pi/2, length = 2 * n.theta)
x <- c(x, (r - r0) * cos(theta) + l)
y <- c(y, (r - r0) * sin(theta) + r)
theta <- seq(pi/2, pi, length = n.theta)
x <- c(x, r0 * cos(theta))
y <- c(y, r0 * sin(theta))
n <- length(x)
x <- c(x, x[n:1])
y <- c(y, -y[n:1])
x_m = x;
y_m = y;
x_tmp = x_m[1:(n.theta-1)]; y_tmp = y_m[1:(n.theta-1)]
x_tmp1 = seq(x_m[n.theta],x_m[n.theta+1],length=n.theta)
y_tmp1 = rep(y_m[n.theta],n.theta);
x_tmp2 = x_m[(n.theta+2):(3.*n.theta-1)]
y_tmp2 = y_m[(n.theta+2):(3.*n.theta-1)]
x_tmp2 = x_tmp2[seq(2,length(x_tmp2),by=5)]
y_tmp2 = y_tmp2[seq(2,length(y_tmp2),by=5)]
x_tmp3 = seq(x_m[3.*n.theta],x_m[3.*n.theta+1],length=n.theta)
y_tmp3 = rep(y_m[3.*n.theta],n.theta)
x_tmp4 = x_m[(3.*n.theta+2):(5.*n.theta-1)]
y_tmp4 = y_m[(3.*n.theta+2):(5.*n.theta-1)]
x_tmp4 = x_tmp4[seq(2,length(x_tmp4),by=10)]
y_tmp4 = y_tmp4[seq(2,length(y_tmp4),by=10)]
x_tmp5 = seq(x_m[5.*n.theta],x_m[5.*n.theta+1],length=n.theta);
y_tmp5 = rep(y_m[5.*n.theta],n.theta)
x_tmp6 = x_m[(5.*n.theta+2):(7.*n.theta-1)]
y_tmp6 = y_m[(5.*n.theta+2):(7.*n.theta-1)]
x_tmp6 = x_tmp6[seq(2,length(x_tmp6),by=5)]
y_tmp6 = y_tmp6[seq(2,length(y_tmp6),by=5)]
x_tmp7 = seq(x_m[7.*n.theta],x_m[7.*n.theta+1],length=n.theta)
y_tmp7 = rep(y_m[7.*n.theta],n.theta)
x_tmp8 = x_m[(7.*n.theta+2):(8.*n.theta-1)]
y_tmp8 = y_m[(7.*n.theta+2):(8.*n.theta-1)]
x_m = c(x_tmp,x_tmp1,x_tmp2,x_tmp3,x_tmp4,x_tmp5,x_tmp6,x_tmp7,x_tmp8)
y_m = c(y_tmp,y_tmp1,y_tmp2,y_tmp3,y_tmp4,y_tmp5,y_tmp6,y_tmp7,y_tmp8)
return(list(x = x_m, y = y_m))
}
|
3daabdcb6758cc5ed16b1ae791d30284649dac4c
|
f741757039a32ef1d02253e8efdb7f5e32360f6f
|
/R/RcppExports.R
|
0503d7b0647c5c623a5ab5b664c8e1478e40d2d9
|
[] |
no_license
|
dkahle/bayesRates
|
c3c29649e3dd963ddbf166978de568a57ecdeae9
|
7a0ede7bec8447d600cbbc4d7f1c437175318861
|
refs/heads/master
| 2020-08-04T23:25:17.597835
| 2019-02-02T02:38:18
| 2019-02-02T02:38:18
| 29,612,284
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 687
|
r
|
RcppExports.R
|
# Generated by using Rcpp::compileAttributes() -> do not edit by hand
# Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393
sampleAlphaPoissonCpp <- function(t, a1, b1, a2, b2, a, b, pi0, pi1, c) {
.Call('_bayesRates_sampleAlphaPoissonCpp', PACKAGE = 'bayesRates', t, a1, b1, a2, b2, a, b, pi0, pi1, c)
}
samplePowerBinomialCpp <- function(n, a1, b1, a2, b2, a, b, pi0, pi1, c) {
.Call('_bayesRates_samplePowerBinomialCpp', PACKAGE = 'bayesRates', n, a1, b1, a2, b2, a, b, pi0, pi1, c)
}
samplePowerPoissonCpp <- function(t, a1, b1, a2, b2, a, b, pi0, pi1, c) {
.Call('_bayesRates_samplePowerPoissonCpp', PACKAGE = 'bayesRates', t, a1, b1, a2, b2, a, b, pi0, pi1, c)
}
|
f393abfa222c3d9fe8dbf71945f6a759fd576e33
|
ad6fd375c9f30c81728aca17c7ccd3be2912d098
|
/code/doublefinder.R
|
59d182056cbc39be6f1044f50083b16dfecbb58a
|
[] |
no_license
|
zerostwo/scRNA_tutorial
|
6447bd47b1adb4ce9e745e2020c24a627557db37
|
06046da6610194a25e759760a0a2f10638a52350
|
refs/heads/main
| 2023-06-06T08:07:45.236151
| 2021-06-28T06:09:42
| 2021-06-28T06:09:42
| 380,460,029
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,887
|
r
|
doublefinder.R
|
#### Information ----
# Title : Double cells prediction
# File : doublefinder.R
# Author : Songqi Duan
# Contact : songqi.duan@outlook.com
# License : Copyright (C) by Songqi Duan
# Created : 2021/06/24 16:06:40
# Updated : none
#### 加载包 ----
library(DoubletFinder)
library(tidyverse)
library(Seurat)
library(patchwork)
rm(list = ls())
#### 导入Seurat对象 ----
seurat_obj <- readRDS("./output/seurat/seurat_obj.rds")
dims <- 10
#### 寻找最优pK值 ----
sweep.res.list <- paramSweep_v3(seurat_obj,
PCs = 1:dims,
sct = F,
num.cores = 10)
sweep.stats <- summarizeSweep(sweep.res.list, GT = FALSE)
bcmvn <- find.pK(sweep.stats)
pK_bcmvn <-
bcmvn$pK[which.max(bcmvn$BCmetric)] %>% as.character() %>% as.numeric()
## 排除不能检出的同源doublets,优化期望的doublets数量
DoubletRate = 0.039 # 5000细胞对应的doublets rate是3.9%
homotypic.prop <-
modelHomotypic(seurat_obj$seurat_clusters) # 最好提供celltype
nExp_poi <- round(DoubletRate * ncol(seurat_obj))
nExp_poi.adj <- round(nExp_poi * (1 - homotypic.prop))
## 使用确定好的参数鉴定doublets
seurat_obj <-
doubletFinder_v3(
seurat_obj,
PCs = 1:dims,
pN = 0.25,
pK = pK_bcmvn,
nExp = nExp_poi.adj,
reuse.pANN = F,
sct = F
)
## 结果展示,分类结果在seurat_obj@meta.data中
DimPlot(
seurat_obj,
reduction = "tsne",
group.by = colnames(seurat_obj@meta.data)[grep("^DF", colnames(seurat_obj@meta.data))],
order = T
)
table(seurat_obj@meta.data[, colnames(seurat_obj@meta.data)[grep("^DF", colnames(seurat_obj@meta.data))]]) ##查看双细胞及单细胞数量
seurat_obj <-
seurat_obj[, seurat_obj@meta.data[, colnames(seurat_obj@meta.data)[grep("^DF", colnames(seurat_obj@meta.data))]] == "Singlet"]
|
b089892368d6da2d1f47547aa23160c690b9f5e3
|
23808da268a9b1117119c8a4e1e0eb42550ca057
|
/man/multiplot.Rd
|
9acf29d84a670187fde57dcaa5e825d5b1206128
|
[] |
no_license
|
mdlincoln/multiplot
|
721f2e94cf220916bbf8abc3d06d42a15ccaf21c
|
ce1d38f528141c2e351f7589e20c629e0538d66d
|
refs/heads/master
| 2020-06-05T05:07:20.727611
| 2015-04-29T15:16:26
| 2015-04-29T15:16:26
| 34,801,299
| 2
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 861
|
rd
|
multiplot.Rd
|
% Generated by roxygen2 (4.1.1): do not edit by hand
% Please edit documentation in R/multiplot.R
\name{multiplot}
\alias{multiplot}
\title{Multiple Plots}
\usage{
multiplot(..., plotlist = NULL, file, cols = 1, layout = NULL)
}
\arguments{
\item{...}{ggplot objects can be passed in}
\item{plotlist}{As a list of ggplot objects}
\item{cols}{Number of columns in layout}
\item{layout}{A matrix specifying the layout. If present, 'cols' is ignored.}
}
\value{
Plot object
}
\description{
Arrange multiple ggplot objects in a grid with a specific number of columns
}
\note{
If the layout is something like matrix(c(1,2,3,3), nrow=2, byrow=TRUE),
then plot 1 will go in the upper left, 2 will go in the upper right, and 3
will go all the way across the bottom.
}
\references{
\url{http://www.cookbook-r.com/Graphs/Multiple_graphs_on_one_page_(ggplot2)/}
}
|
0244149a2d8546b5115864ec23e5ab801b25ea94
|
d7f550a841d2a846f5470d5f5dd591c98c2b155e
|
/Lesson 5-2.R
|
fef0b5898adfb67e82b7cb6569c7f25804eb75cd
|
[] |
no_license
|
stevezxyu/R-Language
|
e8effcc411ae24b3819c75bce4e3706fa030b87b
|
e8557b452ddaecf71f0b07db4aaed06cb3a6f8f1
|
refs/heads/master
| 2021-09-23T12:15:38.460386
| 2017-12-01T13:56:30
| 2017-12-01T13:56:30
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 803
|
r
|
Lesson 5-2.R
|
#在一個畫布上畫多個圖形, 使用 gridExtra 套件來幫忙
install.packages("gridExtra")
library(gridExtra)
gg1 <- ggplot(cars, aes(x = speed, y = dist)) + geom_point()
gg2 <- ggplot(iris, aes(x = Petal.Length, y = Petal.Width, colour = Species)) + geom_point()
grid.arrange(gg1, gg2, nrow = 2)
gg1 <- ggplot(mpg, aes(x= class))
gg1 <- gg1 +geom_bar(aes(fill = drv), position = "fill")
gg1 <- gg1 + ylab("Prosition")
gg1 <- gg1 + coord_flip()
gg2 <- ggplot(mpg, aes(x= class)) +
geom_bar(aes(fill = drv), position = "dodge")
grid.arrange(gg1, gg2, nrow = 1) #在一個畫布上畫多個圖形
#ggplotly() 加入互動性
install.packages("plotly")
library(plotly)
static_gg <- ggplot(mpg, aes(x = class)) +
geom_bar(aes(fill = drv))
ggplotly(static_gg)
|
fc7717e5b121e40d4d05e317bdc62005073bd4c3
|
ea2e37abc55ab78978a96b8d7ef96ef1c8f2cd97
|
/R/BasinData.R
|
ea4c3d161015b062a1b437babd839908d70e97b1
|
[] |
no_license
|
kongdd/airGR
|
d2559ffaaaf500ef79dcf23318f06061abdeab23
|
a72a592cc54bef69e5969a4aca27eec2d950cf56
|
refs/heads/master
| 2020-07-11T04:09:05.445644
| 2018-10-10T09:20:03
| 2018-10-10T09:20:03
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,673
|
r
|
BasinData.R
|
#' @name BasinInfo
#' @docType data
#' @title Data sample: characteristics of a fictional catchment (L0123001, L0123002 or L0123003)
#' @description
#' R-object containing the code, station's name, area and hypsometric curve of the catchment.
#' @encoding UTF-8
#' @format
#' List named 'BasinInfo' containing
#' \itemize{
#' \item two strings: catchment's code and station's name
#' \item one float: catchment's area in km2
#' \item one numeric vector: catchment's hypsometric curve (min, quantiles 01 to 99 and max) in metres
#' }
#' @examples
#' require(airGR)
#' data(L0123001)
#' str(BasinInfo)
NULL
#' @name BasinObs
#' @docType data
#' @title Data sample: time series of observations of a fictional catchment (L0123001, L0123002 or L0123003)
#' @description
#' R-object containing the times series of precipitation, temperature, potential evapotranspiration and discharges. \cr
#' Times series for L0123001 or L0123002 are at the daily time-step for use with daily models such as GR4J, GR5J, GR6J, CemaNeigeGR4J, CemaNeigeGR5J and CemaNeigeGR6J.
#' Times series for L0123003 are at the hourly time-step for use with hourly models such as GR4H.
#' @encoding UTF-8
#' @format
#' Data frame named 'BasinObs' containing
#' \itemize{
#' \item one POSIXlt vector: time series dates in the POSIXlt format
#' \item five numeric vectors: time series of catchment average precipitation [mm], catchment average air temperature [degC], catchment average potential evapotranspiration [mm], outlet discharge [l/s], outlet discharge [mm]
#' }
#' @examples
#' require(airGR)
#' data(L0123001)
#' str(BasinObs)
NULL
|
6cf16084217902c73607df770eeb34c37a4fb612
|
01952802d02506e5741019650079870fc70da227
|
/plot3.R
|
dbf8230f6de486a6d83e23988256b4193d3e4d0c
|
[] |
no_license
|
Nadhir10/ExData_Plotting1
|
dff1eec8e3f7483d142799ffb38bb9dcc188b59f
|
e2c0e5bf1b03005b476b99733e0dcba1d3088ae3
|
refs/heads/master
| 2021-01-21T10:52:48.219077
| 2017-05-19T10:19:56
| 2017-05-19T10:19:56
| 91,711,750
| 0
| 0
| null | 2017-05-18T15:50:18
| 2017-05-18T15:50:17
| null |
UTF-8
|
R
| false
| false
| 930
|
r
|
plot3.R
|
## Exploratory Data Analysis
## Week 1 : peer review assignment
## Plot 3
## read and label data
## the text file should be in your working space
x<-read.table("household_power_consumption.txt",sep=";" ,skip=66637, nrows=2880, na.strings="?")
label<-read.table("household_power_consumption.txt",header= TRUE, sep=";", nrows=1)
names(x)<-names(label)
## Generating datetime variable
library(lubridate)
datetime<-dmy_hms(paste(x$Date,x$Time))
## Note : my OS is in French, wich explains why days are (Jeu, Ven, Sam)
## rather than (Thu, Fri, Sat)
## plotting on PNG device
png("plot3.png")
plot(datetime, x$Sub_metering_1,"n", xlab = "", ylab="Energy sub metering")
lines(datetime, x$Sub_metering_1)
lines(datetime, x$Sub_metering_2, col="red")
lines(datetime, x$Sub_metering_3, col="blue")
legend("topright", lty=c(1,1,1), col=c("black","blue","red"), legend=c("Sub_metering_1","Sub_metering_2","Sub_metering_3"))
dev.off()
|
52bfc16d719913ad99a0b13dee03cb4dba208496
|
33a7b35ba0b5f86f7bd64b06155784c31e47b2c5
|
/rmd2md.R
|
be298f882a211489807ff81020ba7596ae0fb69d
|
[] |
no_license
|
bici-sancta/dincerti.github.io
|
02900dc552a478d8676bacb3fdea2efd913aa4f9
|
5059614400d49154628fbc06ae7abbb8fbbb8e86
|
refs/heads/master
| 2020-04-08T14:34:31.183660
| 2018-11-18T18:14:58
| 2018-11-18T18:14:58
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 616
|
r
|
rmd2md.R
|
library("knitr")
opts_chunk$set(fig.path = "figs/", fig.width = 8, fig.height = 5, fig.align = 'center')
opts_knit$set(base.url = "/", root.dir = getwd())
knit("_rmd-posts/twopart.Rmd", "_posts/2015-09-11-twopart.md")
knit("_rmd-posts/markov_cohort.Rmd", "_posts/2015-10-15-markov_cohort.md")
knit("_rmd-posts/bayesian_markov_cohort.Rmd", "_posts/2015-10-15-bayesian_markov_cohort.md")
knit("_rmd-posts/bayesian_meta_analysis.Rmd", "_posts/2015-10-31-bayesian-meta-analysis.md")
knit("_rmd-posts/diabetes_highcost.Rmd", "_posts/2015-10-13-diabetes-highcost.md")
knit("_rmd-posts/psa.Rmd", "_posts/2018-02-10-psa.md")
|
3432a667f720bf5f33760ac5aba9866e5442fe70
|
b44a40ea5eb1ff8b88c7ce630cb7cfa81b109c77
|
/2020-10-08 Error handling in R/compute_change_table.R
|
3bc9b5d2dff1b5d150f23389c9651e13da432caf
|
[] |
no_license
|
DataS-DHSC/coffee-and-coding
|
929c5f5be0163c3d5608b4a17e4d80ddb357ebb2
|
3ce5961b67210c71522488d62f404dac1c27f097
|
refs/heads/master
| 2023-07-10T10:23:29.394206
| 2023-06-29T12:06:18
| 2023-06-29T12:06:18
| 244,601,849
| 18
| 8
| null | 2023-06-29T12:06:20
| 2020-03-03T10:03:42
|
HTML
|
UTF-8
|
R
| false
| false
| 428
|
r
|
compute_change_table.R
|
compute_change_table <- function(input_data, group_col, time_col, comparison_col, start_time) {
input_data$time_marker <- ifelse(input_data[,time_col] == start_time, -1, 1)
input_data$comparison_marker <- input_data[,comparison_col]
input_data$group_marker <- input_data[,group_col]
return(input_data %>%
dplyr::group_by(group_marker) %>%
summarise(change = sum(comparison_marker * time_marker)))
}
|
6b175c1f5d4bbbf326a6411154cf86035246d9cf
|
6ffe510908d9f7df2357e2fd6dc692df70233afb
|
/src/ve_init.r
|
4b1202ad9742d984a0096dfb97022e369c378bd4
|
[] |
no_license
|
willbutler42/VertEgg-R
|
ba2282ccaec2ea222f85330da56e166756d839ac
|
53a5204eb66c45cb78064e91cc3b7d116df0bf5b
|
refs/heads/master
| 2020-03-28T00:20:39.522476
| 2020-01-16T20:54:26
| 2020-01-16T20:54:26
| 147,400,584
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,098
|
r
|
ve_init.r
|
####################################################################################
#################### Runner for VertEgg R package ##################################
####################################################################################
##############################################
## R scripts #################################
##############################################
## Global variables and egg distribution initialisation
source("R.initialise\\ve_grid.r")
source("R.initialise\\spawn.r")
source("R.initialise\\ve_rand.r")
## Stationary coefficients
source("R.functions\\eggsact.r")
source("R.functions\\srcsact.r")
source("R.functions\\sstate.r")
## Terminal velocity
source("R.functions\\eggvelst.r")
source("R.functions\\eggvel.r")
source("R.functions\\molvisk.r")
source("R.functions\\dens0.r")
## Analysis tools
source("R.functions\\ve_int.r")
source("R.functions\\ve_mean.r")
source("R.functions\\ve_std.r")
source("R.functions\\eggmom.r")
source("R.functions\\ve_drint.r")
source("R.functions\\ve_rmsd.r")
source("R.functions\\ve_quantile.r")
## Transient problems
source("R.functions\\lwendrof.r")
source("R.functions\\ftcs.r")
source("R.functions\\minlim.r")
source("R.functions\\posmet.r")
source("R.functions\\upstream.r")
################################################
############## densities and diameters #########
################################################
# load(file="data\\csh.RData")
################################################
############## Variables #######################
################################################
## Gravitational acceleration
g <- 9.81
## Standard density of seawater
rho_s <- 1025
## StartDates & endDates of spawning gadoids in Iceland
cs <- as.Date("15/03/2006", format="%d/%m/%Y")
ce <- as.Date("20/05/2006", format="%d/%m/%Y")
hs <- as.Date("31/03/2006", format="%d/%m/%Y")
he <- as.Date("31/05/2006", format="%d/%m/%Y")
ss <- as.Date("01/02/2006", format="%d/%m/%Y")
se <- as.Date("15/04/2006", format="%d/%m/%Y")
|
fcbcced956178ab9f49543bdbeb5605289a77d84
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/OptimalTiming/examples/SimCml.Rd.R
|
5bc1187d09d71e3612d1b3175d82ba8fce1b680b
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 139
|
r
|
SimCml.Rd.R
|
library(OptimalTiming)
### Name: SimCml
### Title: Simulated data for CML patients
### Aliases: SimCml
### ** Examples
data(SimCml)
|
4f3ab79dd1bb6533900c217f972d7a1f80e45f12
|
ffdea92d4315e4363dd4ae673a1a6adf82a761b5
|
/data/genthat_extracted_code/Renvlp/examples/testcoef.genv.Rd.R
|
aad2b182d9ed3c79d7939459e6bb28460a9fbfa8
|
[] |
no_license
|
surayaaramli/typeRrh
|
d257ac8905c49123f4ccd4e377ee3dfc84d1636c
|
66e6996f31961bc8b9aafe1a6a6098327b66bf71
|
refs/heads/master
| 2023-05-05T04:05:31.617869
| 2019-04-25T22:10:06
| 2019-04-25T22:10:06
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 447
|
r
|
testcoef.genv.Rd.R
|
library(Renvlp)
### Name: testcoef.genv
### Title: Hypothesis test of the coefficients of the groupwise envelope
### model
### Aliases: testcoef.genv
### ** Examples
data(fiberpaper)
X <- fiberpaper[ , c(5, 7)]
Y <- fiberpaper[ , 1:3]
Z <- as.numeric(fiberpaper[ , 6] > mean(fiberpaper[ , 6]))
u <- u.genv(X, Y, Z)
u
m <- genv(X, Y, Z, 2)
m
L <- diag(3)
R <- diag(2)
A <- matrix(0, 3, 2)
test.res <- testcoef.genv(m, L, R, A)
test.res
|
8c1dd24478dd9334f19ab7f89528e55ab16ce926
|
51b40311e652db301aedf34723ee848203764503
|
/tfTarget/man/searchTFBS.Rd
|
9c0dfe3926256005d5c484769988f89561440324
|
[] |
no_license
|
Danko-Lab/tfTarget
|
5298fb32e852863fa5940014dd5a7a5e9dea6e25
|
e4b1994a1c8fce89791f4c8a5776721a09ceaa27
|
refs/heads/master
| 2022-09-12T01:07:02.458145
| 2022-08-27T15:29:13
| 2022-08-27T15:29:13
| 135,333,078
| 6
| 2
| null | 2020-01-17T03:31:50
| 2018-05-29T17:50:52
|
R
|
UTF-8
|
R
| false
| false
| 1,248
|
rd
|
searchTFBS.Rd
|
\name{searchTFBS}
\alias{searchTFBS}
\title{
Search TFBS
}
\description{
Search TFBS
}
\usage{
searchTFBS(tfTar,
tfs,
file.twoBit,
pval.cutoff.up = 0.01,
pval.cutoff.down = 0.1,
half.size = 150,
mTH = 7,
min.size = 150,
run.repeats = 2,
ncores = 1)
}
\arguments{
\item{tfTar}{tfTarget object obtained by \code\link{diffTXN}}
\item{tfs}{RDATA file including the TFBS identified by RtfbsDB package.}
\item{file.twoBit}{File name indicating the binary data of sequence.}
\item{pval.cutoff.up}{Float indicating.}
\item{pval.cutoff.down}{Float indicating.}
\item{half.size}{Number indicating.}
\item{mTH}{Float indicating the threshold used in RtfbsDB methods.}
\item{min.size}{Number indicating.}
\item{run.repeats}{Number indicating.}
\item{ncores}{Number indicating CPU cores in parallel computing.}
}
\details{
}
\value{
The function adds new items to the input tfTarget object, including:
\item{enh.up.bed }{Description of 'comp1'}
\item{enh.down.bed }{Description of 'comp2'}
\item{enh.unc.bed }{Description of 'comp2'}
\item{motif.list.up }{Description of 'comp2'}
\item{motif.list.down }{Description of 'comp2'}
}
\examples{
}
\keyword{ tfTarget }
|
57bb2ed13e95a61d66c91942884f61fce4c1d89d
|
cf7cf9948dc3021d7ee287110f50d1abf470bcf0
|
/DAMM model script.R
|
1c92b79dfbb16b4623461c27b7980086c0a81678
|
[] |
no_license
|
colinaverill/DAMM-model
|
2819913143eca0b28e93a46ec710bad654024d40
|
fab42d60144ba998a256d327416648f7b824ec35
|
refs/heads/master
| 2020-05-07T08:59:23.825746
| 2015-02-25T19:09:14
| 2015-02-25T19:09:14
| 31,327,093
| 1
| 1
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,445
|
r
|
DAMM model script.R
|
##coding the DAMM model###
##Davidson et al. 2012. Global Change Biology. 18: 371-384.
#Vmax parameters
#a.Sx, Ea and Km.S have been calibrated to the field data presented in Davidson et al. 2012.
R <- 0.008314472 #universal gas constant kJ / K / mol
a.Sx <- 5.38*10^10 #pre-exponential factor of Vmax, mg C / cm3 / h
Ea <- 72.26 #activation energy for calculating temperature sensitive Vmax kJ/mol
Km.S <- 9.95*10^-7 #Km of substrate g C / cm3
Km.O <-0.121 #Km of oxygen
#substrate and diffusion parameters
Stot <- 0.048 #total soil C concentration, %C
p <- 0.000414 #fraction of total C pool that is soluble
S.sol<- Stot*p #actual substrate concentration, mg C / cm3
D.liq<- 3.17 #diffusion coefficient of substrate in liquid phase
D.gas<- 1.67 #diffusion coefficient for O2
BD <- 0.8 #bulk density g/cm3
PD <- 2.52 #particle density g/cm3
depth<- 10 #soil depth in centimeters
#data
#replace these with your own raw temperature and moisture data.
temp <- 23 #temperature
moist <- .4 #volumentric water content of soil
#model equations- built as an R function of temperature and moisture.
DAMM.Cflux<- function(temp,moist){
Vmax = a.Sx * exp(-Ea/(R*(temp + 273.15)))
S = S.sol * D.liq * moist
a = 1 - (BD/PD) - moist
oxygen = D.gas * 0.209 * a^(4/3)
R = Vmax * (S / (Km.S + S)) * (oxygen /(Km.O + oxygen))
R.scaled<- R*depth*10000
return(R.scaled)
}
DAMM.Cflux(23,.05)
|
815430a6dfd2da481e0453ec8199c11f3bfa28d7
|
996ade89e9fc1a460fa1ec90f24f548c14d1d241
|
/tests/testthat/test_head2tailratio.R
|
178efdc3add53d45e425defcc6ff5fa357cfc647
|
[
"MIT"
] |
permissive
|
anspiess/PCRedux
|
323788e8da02781363a2c6414ec81a660e1bf645
|
9472df4f9d2f5284aa995e5167abb44d7e7a287a
|
refs/heads/master
| 2021-08-22T21:18:28.342931
| 2017-12-01T09:34:16
| 2017-12-01T09:34:16
| 112,632,885
| 0
| 0
| null | 2017-11-30T16:17:38
| 2017-11-30T16:17:37
| null |
UTF-8
|
R
| false
| false
| 505
|
r
|
test_head2tailratio.R
|
library(PCRedux)
context("head2tailratio")
test_that("head2tailratio gives the correct dimensions and properties", {
library(qpcR)
res <- head2tailratio(y=competimer[, 2], normalize=FALSE, slope_normalizer=TRUE)
res_normalized <- head2tailratio(y=competimer[, 2], normalize=TRUE, slope_normalizer=TRUE)
expect_that(res, is_a("numeric"))
expect_that(res_normalized, is_a("numeric"))
expect_that(res <= 3.589284, is_true())
expect_that(res_normalized >= 23, is_true())
})
|
ed343d3ee40f911bfe8bd5cf6ba44d1327400538
|
d38ab28cf6ee680b5a82f37e7841d31617750da4
|
/Examples/ImageIOSelection/ImageIOSelection.R
|
ada69ddeec48cbcce7b3179de94046d9106c6351
|
[
"Apache-2.0",
"LicenseRef-scancode-unknown-license-reference"
] |
permissive
|
SimpleITK/SimpleITK
|
cdd9f417acc7f7fe20b006a75dc483d6bb6d9b20
|
cfb40ba1149ba9f186793ccdd206f7179c8ba7a3
|
refs/heads/master
| 2023-09-01T15:01:04.024343
| 2023-08-31T19:09:36
| 2023-08-31T19:09:36
| 1,069,177
| 764
| 216
|
Apache-2.0
| 2023-09-13T17:48:23
| 2010-11-10T18:56:04
|
SWIG
|
UTF-8
|
R
| false
| false
| 1,804
|
r
|
ImageIOSelection.R
|
#=========================================================================
#
# Copyright NumFOCUS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#=========================================================================
# Run with:
#
# Rscript --vanilla IOSelection.R
#
library(SimpleITK)
args <- commandArgs( TRUE )
if (length(args) < 1) {
write('Usage arguments: <image_file_name>', stderr())
quit(save="no", status=1)
}
# Find out which image IOs are supported
file_reader <- ImageFileReader()
image_ios <- file_reader$GetRegisteredImageIOs()
cat('The supported image IOs are: ', image_ios, '\n')
cat(rep('-',20),'\n', sep='')
# Another option is to just print the reader and see which
# IOs are supported
print(file_reader)
cat(rep('-',20),'\n', sep='')
# Force the use of a specific IO.
file_reader$SetImageIO('PNGImageIO')
file_reader$SetFileName(args[1])
# If the IO doesn't support reading the image type it
# will throw an exception.
image <- tryCatch(file_reader$Execute(),
warning = function(err) {
message(err)
quit(save="no", status=1)
}
)
cat('Read image:',args[1],'\n')
size <- image$GetSize()
cat ("Image size:", size[1], size[2])
quit(save="no", status=0)
|
2cc49a31ce136593f012b35d16987876f812e2ab
|
46b8efea7116a3808a2009faad5ed5b90238ec04
|
/R/stylesim-package.r
|
4a2e85bdd60165277a102c92b275fbccc5f6407c
|
[] |
no_license
|
erge324/stylesim
|
205e49855323280f16d3c2dc8f604f3827ee1719
|
bd2ee33771ac5c7c480e34e71231f5504a1e488c
|
refs/heads/master
| 2022-02-25T08:20:50.295444
| 2016-03-04T10:57:00
| 2016-03-04T10:57:00
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 632
|
r
|
stylesim-package.r
|
#' Simulate (and Analyse) Data Distorted by Response Styles
#'
#' The most important function is \code{\link{sim_style_data}}, which allows to
#' simulate data distorted by response styles.
#'
#' @references
#' Plieninger, H. (in press). Mountain or molehill: A simulation study on the impact of reponse styles. \emph{Educational and Psychological Measurement}.
#'
#' @examples
#' \dontrun{
#' res_1 <- sim_style_data(n = 123, items = 12, categ = 5, style = "ARS",
#' reversed = 6)
#' }
#'
#' @name stylesim
#' @docType package
#' @author Hansjoerg Plieninger
#' @importClassesFrom ROCR performance
NULL
|
51f7882103429f4de6dfa434876aae8ab01661af
|
701192bbbb772d4448ce469a398ebb7f50047ca1
|
/ISM.Recon_Diagnostics.R
|
efff87e9de2c2ffcd13c419d2fcb025b0c05569b
|
[] |
no_license
|
ecgill/paleoclimate_reconstructor
|
ccda80323ae9fac243d7d74586da4ee7ef9ea10d
|
5d39bdcabff9cfe666d5cf1f36ea1d929434fb2e
|
refs/heads/master
| 2021-05-06T06:46:21.527754
| 2018-05-14T02:20:44
| 2018-05-14T02:20:44
| 113,892,855
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 19,331
|
r
|
ISM.Recon_Diagnostics.R
|
rm(list=ls())
library(RColorBrewer)
library(fields)
myPalette1 <- colorRampPalette(rev(brewer.pal(9, "RdBu")), space="Lab")
myPalette2 <- colorRampPalette(rev(brewer.pal(9, "Greens")), space="Lab")
myPalette3 <- colorRampPalette(rev(brewer.pal(9, "YlOrRd")), space="Lab")
myPalette4 <- colorRampPalette(rev(brewer.pal(9, "BrBG")), space="Lab")
myPalette10 <- colorRampPalette(rev(brewer.pal(9, "Spectral")), space="Lab")
cbPalette <- c("#D55E00","#E69F00", "#56B4E9", "#009E73",
"#F0E442", "#0072B2", "#CC79A7","#999999",
"#CC0000","#9900CC","#66FF66","#996633","#838B9B")
# Read in diagnostic data.
setwd("/Users/emilygill/Documents/University of Colorado/PHD Research/6. SST Reconstruction/REDO Sep2015/")
eofs.curl = read.table("./postfiles/ISM/diagnostics/EqPac_both_summer_zvsd.curl$u.txt",header=TRUE)
pcs.curl = read.table("./postfiles/ISM/diagnostics/EqPac_both_summer_pcs.curl.txt",header=TRUE)
lambdas.curl = scan("./postfiles/ISM/diagnostics/EqPac_both_summer_lambdas.curl.txt")
eofs.rain = read.table("./postfiles/ISM/diagnostics/EqPac_both_summer_zvsd.raindavg$u.txt",header=TRUE)
pcs.rain = read.table("./postfiles/ISM/diagnostics/EqPac_both_summer_pcs.raindavg.txt",header=TRUE, row.names=NULL)
lambdas.rain = scan("./postfiles/ISM/diagnostics/EqPac_both_summer_lambdas.raindavg.txt")
pcs.rain = pcs.rain[,-1]
eofs.p = read.table("./postfiles/ISM/diagnostics/EqPac_both_summer_zvsd.p$u.txt",header=TRUE)
pcs.p = read.table("./postfiles/ISM/diagnostics/EqPac_both_summer_pcs.p.txt",header=TRUE)
lambdas.p = scan("./postfiles/ISM/diagnostics/EqPac_both_summer_lambdas.p.txt")
recordnopt = read.table("./postfiles/EqPac/PC3/diagnostics/EqPac_both_annual_recordnopt",header=TRUE)
eofs.IOsst = read.table("./postfiles/ISM/diagnostics/EqPac_both_summer_zvsd.IOsst$u.txt",header=TRUE)
pcs.IOsst = read.table("./postfiles/ISM/diagnostics/EqPac_both_summer_pcs.IOsst.txt",header=TRUE)
lambdas.IOsst = scan("./postfiles/ISM/diagnostics/EqPac_both_summer_lambdas.IOsst.txt")
icurl = scan("./postfiles/ISM/diagnostics/index-curl.txt")
irain = scan("./postfiles/ISM/diagnostics/index-rain.txt")
iiosst = scan("./postfiles/ISM/diagnostics/index-iosst.txt")
curl_calib_R2 = scan("./postfiles/ISM/diagnostics/calib_r2_unscsig_curl.txt")
curl_calib_b = scan("./postfiles/ISM/diagnostics/calib_b_unscsig_curl.txt")
rain_calib_R2 = scan("./postfiles/ISM/diagnostics/calib_r2_unscsig_rain.txt")
rain_calib_b = scan("./postfiles/ISM/diagnostics/calib_b_unscsig_rain.txt")
curl_valid_R2 = scan("./postfiles/ISM/diagnostics/valid_r2_unscsig_curl.txt")
curl_valid_b = scan("./postfiles/ISM/diagnostics/valid_b_unscsig_curl.txt")
rain_valid_R2 = scan("./postfiles/ISM/diagnostics/valid_r2_unscsig_rain.txt")
rain_valid_b = scan("./postfiles/ISM/diagnostics/valid_b_unscsig_rain.txt")
# Read in reconstruction results.
years = seq(0,10,1)
reconSST.curl = c()
reconSST.rain = c()
for (i in 1:length(years)){
curl = read.table(paste("./postfiles/ISM/EqPac_both_summer_8npc_", i-1, "ka_curl.txt", sep=""),header=TRUE)
rain = read.table(paste("./postfiles/ISM/EqPac_both_summer_4npc_", i-1, "ka_rainperc.txt", sep=""),header=TRUE)
reconSST.curl = rbind(reconSST.curl,curl)
reconSST.rain = rbind(reconSST.rain,rain)
}
xrain = seq(66.5,100.5,by=1)
nxrain = length(xrain) # nrows = 35
yrain = seq(6.5,38.5,by=1)
nyrain = length(yrain) # ncols = 33
xcurl = seq(35.625, 76.875, by=1.875)
nxcurl = length(xcurl) # nrows = 33
ycurl = seq(-6.666573, 31.42808, by=1.904732)
nycurl = length(ycurl) # ncols = 23
xiosst = seq(30,106,by=2)
nxiosst = length(xiosst)
yiosst = seq(-10,28,by=2)
nyiosst = length(yiosst)
xgrid = seq(100,300,by=2)
nx = length(xgrid)
ygrid = seq(-10,10,by=2)
ny = length(ygrid)
### FIGURE PLAN FOR MAIN PAPER
# FIGURE 1 = PROXY MAP (separate code)
# FIGURE 2 = METHODS (powerpoint)
# FIGURE 3 = RECONSTRUCTED CURL AND RAINFALL
# FIGURE 4 = GUPTA COMPARISON (separate code)
# FIGURE 5 = REGIONAL RAINFALL RECONSTRUCTIONS (separate code)
### FIGURE PLAN FOR APPENDIX
# FIG. A1 = INDIAN OCEAN FIRST PC TREND
# FIG. A2 = EIGENVALUE SPECTRA
# FIG. A3 = CURL & RAIN PCS AND EOFS
# FIG. A4 = CALIBRATION & VERIFICATION
# OPTIONAL = LIMITED SST PCS AND EOFS
# OPTIONAL = STANDARD ERRORS?
########################################################## FIG 2. SST proxy coverage.
wplat = c(8.8,6.635,6.3,6.514,1.25,0.32,-1.5,-4.689,-3.56,-5.003,-5.9,-7.4,-9.649,-10.592,
9.233,8.729,5.65,6.158,6.48,1.25,-6.543)
wplon = c(121.3,113.409,125.83,126.498,146.14,159.35,100.1,117.903,119.4,133.445,103.3,115.2,118.338,125.388,
109.383,109.869,110.65,112.213,125.83,146.14,103.833)
wpno = 1:21
wptype = c(rep("M",14), rep("U",7))
wpproxy = as.data.frame(cbind(wpno,wplat,wplon,wptype))
wpused = c("S","Y","Y","S","S","S","Y","Y","Y","Y","Y","Y","Y","Y",
"Y","Y","S","Y","N","S","Y")
dev.new(width=14, height=4)
world(ylim=c(-11,11), xlim=c(95,160), lwd=0.25)
abline(h=c(-10,-5,0,5,10), v=seq(90,160,5), col="grey80", lwd=0.5)
world(ylim=c(-11,11), xlim=c(95,160), fill=TRUE, col="grey80", border="black", lwd=0.5, add=TRUE)
map.axes()
points(wplon, wplat,
bg = ifelse(wpused == "Y","#0072B2",ifelse(wpused == "N", "#E69F00", "red")),
pch = ifelse(wptype == "M",22,21), cex = ifelse(wptype == "M",2,1.25)
)
legend("topright",c("Mg/Ca-based SST","Uk37-based SST"), pch=c(22,21), pt.cex=c(2,1.25), bg="white")
eplat = c(7.85,0.515,0.022,-1.217,-2.51,
8.206,7.85,4.847,2.25,1.5,0.52,-0.467,-1.217,-1.517,-2.51,-1.85,-3.383,-3.59)
eplon = c(-83.608,-92.398,-86.446,-89.683,-84.650,
-84.122,-83.6,-77.963,-90.95,-86.485,-92.40,-82.667,-89.683,-85.817,-84.650,-82.78,-83.517,-81.18)
epno = 22:39
eptype = c(rep("M",5), rep("U",13))
epproxy = as.data.frame(cbind(epno,eplat,eplon,eptype))
epused = c("Y","Y","Y","Y","N",
"Y","N","Y","Y","Y","N","Y","N","S","Y","Y","Y","Y")
dev.new(width=4, height=4)
world(ylim=c(-11,11), xlim=c(-95,-75), lwd=0.25)
abline(h=c(-10,-5,0,5,10), v=seq(-95,-75,5), col="grey80", lwd=0.5)
world(ylim=c(-11,11), xlim=c(-100,-75), fill=TRUE, col="grey80", border="black", lwd=0.5, add=TRUE)
map.axes()
points(eplon, eplat,
bg = ifelse(epused == "Y","#0072B2",ifelse(epused == "N", "#E69F00", "red")),
pch = ifelse(eptype == "M",22,21), cex = ifelse(eptype == "M",2,1.25)
)
########################################################## FIG 3. Reconstructed curl and rain.
prows = 5
pyrs = seq(2,10,2)
for (i in 1:prows){
dev.new(width=6, height=5)
zfull = rep(NaN,(nxcurl*nycurl))
zfull[icurl] = unname(as.numeric(reconSST.curl[(pyrs[i]+1),]))
zmat = matrix(zfull,nrow=nxcurl,ncol=nycurl)
image(xcurl,ycurl,zmat,
ylim=range(min(ycurl),40), xlim=range(min(xcurl),100),
xlab="",ylab="", yaxt="n", xaxt="n", cex.axis=1.5, col=myPalette1(100),
zlim = c(-35,35)
)
image(xcurl,ycurl,zmat,
ylim=range(min(ycurl),40), xlim=range(min(xcurl),100),
xlab="",ylab="", yaxt="n", xaxt="n", cex.axis=1.5, col="firebrick4",
zlim = c(35.1,2500), add=TRUE
)
image(xcurl,ycurl,zmat,
ylim=range(min(ycurl),40), xlim=range(min(xcurl),100),
xlab="",ylab="", yaxt="n", xaxt="n", cex.axis=1.5, col="dodgerblue4",
zlim = c(-375,-35.1), add=TRUE
)
axis(2, cex.axis=1.5)
world(ylim=range(min(ycurl),40), xlim=range(min(xcurl),100),
add=TRUE, fill=TRUE, col="grey80", border="grey50"
)
contour(xcurl,ycurl,zmat,add=TRUE,levels = c(-30,-15,0,15,30),lwd=1.25,labcex=0.9)
points(57.36, 18.05, pch=21, col="black", bg="#009E73", cex=1.5)
abline(h = 0, col="grey50", lty=2)
zfull = rep(NaN, (nxrain*nyrain))
zfull[irain] = unname(as.numeric(reconSST.rain[(pyrs[i]+1),]))
zmat = matrix(zfull,nrow=nxrain,ncol=nyrain)
image(xrain,yrain,zmat,
ylim=range(min(yrain),max(yrain)), xlim=range(min(xrain),max(xrain)),
xlab="",ylab="", yaxt="n", xaxt="n", cex.axis=1.5, col=rev(myPalette4(100)),
zlim = c(-62,62), add=TRUE
)
contour(xrain,yrain,zmat,add=TRUE,levels = seq(-60,60,15),lwd=1.25,labcex=0.9)
world(ylim=range(min(ycurl),40), xlim=range(min(xcurl),100),
add=TRUE, col="grey50"
)
if (i == prows){
axis(1, cex.axis=1.5)
}
}
########################################################## FIG A1. Indian Ocean First PC Trend
dev.new(width=6,height=5)
zfull = rep(NaN,(nxiosst*nyiosst))
zfull[iiosst] = unname(as.numeric(eofs.IOsst[,1]))
zmat = matrix(zfull,nrow=nxiosst,ncol=nyiosst)
image.plot(xiosst,yiosst,zmat,
ylim=range(min(yiosst),max(yiosst)), xlim=range(min(xiosst),max(xiosst)),
xlab="",ylab="", yaxt="n", xaxt="n", cex.axis=1.5, col=rev(myPalette1(100)),
zlim = c(-0.07,0.07), main = "SST EOF no.1", axes=FALSE, horizontal=TRUE
)
contour(xiosst,yiosst,zmat,add=TRUE,levels = seq(-1,1,0.01),lwd=1.25,labcex=0.9)
world(ylim=range(min(yiosst),max(yiosst)), xlim=range(min(xiosst),max(xiosst)),
add=TRUE, col="grey50"
)
axis(1,at=seq(30,100,20), cex.axis=1.5)
axis(2,at=seq(-10,30,10), cex.axis=1.5)
dev.new(width=6, height=4)
plot(1949:2013, scale(pcs.IOsst[,1]), type="l", main="SST PC no.1",
axes=FALSE, ylab="", xlab="")
axis(2, cex.axis=1.5)
axis(1, at = seq(1949,2013,5), cex.axis=1.5)
dev.new(width=4, height=6)
tot=10
plot(100 * lambdas.IOsst[1:tot], type = "l", ylab = "Variance Explained (%)",
main = "Indian Ocean SST Eigenvalue Spectrum",
bty = "n", xaxt = "n", xlab = "", ylim=c(0,100), lwd=2)
abline(v = 1:tot, h=seq(0,100,20), col = "grey")
points(100 * lambdas.IOsst[1:tot], type="p", pch=20, col="black", cex=1)
axis(1, at = seq(1,tot,1))
title(xlab = "Modes")
########################################################## FIG A2. EVS for all three
tot = 10
dev.new(width=4,height=6)
plot(100 * cumsum(lambdas.curl)[1:tot], type = "l", ylab = "Cumulative Percentage of Variance Explained (%)",
main = "Eigenvalue Spectra",
bty = "n", xaxt = "n", xlab = "", ylim=c(0,100), lwd=2, col="#B2182B")
abline(v = 1:tot, h=seq(10,100,10), col = "grey80")
lines(100 * cumsum(lambdas.rain)[1:tot], col="#00665E", lwd=2)
lines(100 * cumsum(lambdas.p)[1:tot], lwd=2)
segments(4,0,4,96.3, lty=2, col="#00665E", lwd=1.5) # RAIN
segments(4,37.3,0,37.3, lty=2, col="#00665E", lwd=1.5)
segments(4,96.3,0,96.3, lty=2, col="#00665E", lwd=1.5)
text(9,50.5,"Precip", srt=20, col="#00665E")
segments(8,0,8,99.1, lty=2, col="#B2182B", lwd=1.5) # CURL
segments(8,75.5,0,75.5, lty=2, col="#B2182B", lwd=1.5)
segments(8,99.1,0,99.1, lty=2, col="#B2182B", lwd=1.5)
text(9,76,"Curl", srt=20, col="#B2182B")
text(9,97,"Lim SST")
text(9,50.5,"Precip", srt=20, col="#00665E")
text(9,76,"Curl", srt=20, col="#B2182B")
text(3.75,12,"4 Modes Retained",srt=90,col="grey40", cex=0.75)
text(7.75,12,"8 Modes Retained",srt=90,col="grey40", cex=0.75)
text(1.3,39,"37.3%", col="#00665E", cex=0.75)
text(1.3,94,"96.3%", col="#00665E", cex=0.75)
text(1.3,77.5,"75.5%", col="#B2182B", cex=0.75)
text(1.3,101,"99.1%", col="#B2182B", cex=0.75)
axis(1, at = seq(1,tot,1))
title(xlab = "Modes")
########################################################## FIG A3. EOFs and PCs for curl and rain
prow = 4
dev.new(height=10,width=3.5)
par(mfrow = c(prow, 1), mar = c(2.5, 2.5, 1, 1))
for (i in 1:prow){
scaleEOF = eofs.curl[,i]*(lambdas.curl[i]/lambdas.curl[1])
zfull = rep(NaN,(nxcurl*nycurl))
zfull[icurl] = scaleEOF
zmat = matrix(zfull,nrow=nxcurl,ncol=nycurl)
image(xcurl,ycurl,zmat,
ylim=range(min(ycurl),40), xlim=range(min(xcurl),100),
xlab="",ylab="", yaxt="n", xaxt="n", cex.axis=1.5, col=myPalette1(100),
zlim = c(-0.15,0.15), main = paste("EOF no.", i, sep="")
)
axis(2, cex.axis=1.5)
world(ylim=range(min(ycurl),40), xlim=range(min(xcurl),100),
add=TRUE, fill=TRUE, col="grey80", border="grey50"
)
contour(xcurl,ycurl,zmat, add=TRUE, levels=c(-0.15,-0.1,-0.05,0,0.05,0.1,0.15), lwd=1.25, labcex=0.9)
points(57.36, 18.05, pch=21, col="black", bg="#009E73", cex=1.5)
abline(h = 0, col="grey50", lty=2)
scaleEOF = eofs.rain[,i]*(lambdas.rain[i]/lambdas.rain[1])
zfull = rep(NaN,(nxrain*nyrain))
zfull[irain] = scaleEOF
zmat = matrix(zfull,nrow=nxrain,ncol=nyrain)
image(xrain,yrain,zmat,
ylim=range(min(yrain),max(yrain)), xlim=range(min(xrain),max(xrain)),
xlab="",ylab="", yaxt="n", xaxt="n", cex.axis=1.5, col=rev(myPalette4(100)),
zlim = c(-0.10,0.10), add=TRUE
)
contour(xrain,yrain,zmat, add=TRUE, levels=c(-0.1,-0.05,0,0.05,0.1), lwd=1.25, labcex=0.9)
world(ylim=range(min(ycurl),40), xlim=range(min(xcurl),100),
add=TRUE, col="grey50"
)
if (i == prow){
axis(1, cex.axis=1.5)
}
}
# PLOT OVERLYING PCs
prow=4
dev.new(height=10,width=4)
par(mfrow = c(prow, 1), mar = c(2.5, 2.5, 1, 2.5))
full = 1901:2013
for (i in 1:prow){
curlpc = rep(NA,length(1901:2013))
curlpc[which(full==1949):which(full==2013)] = pcs.curl[,i]
rainpc = rep(NA,length(1901:2013))
rainpc[which(full==1901):which(full==2004)] = pcs.rain[,i]
plot(full, curlpc, type="l",
col="#B2182B", main=paste("PC no.", i, sep=""), axes=FALSE)
axis(2, cex.axis=1.5, col="#B2182B")
par(new=TRUE)
plot(full, rainpc, type="l",
col="#00665E", axes=FALSE)
axis(4, cex.axis=1.5,col="#00665E")
if (i == prow){
axis(1, at = seq(1901, 2013, 10), cex.axis=1.5)
legend(1901,10,c("Curl","Rain"), lty=c(1,1), c("#B2182B","#00665E"))
}
}
########################################################## FIG A4. Calibration and Validation
#### CALIBRATION #####
dev.new(width=6, height=8)
par(mfrow=c(2,1), mar=c(2,2,2,2))
# Curl R2
zfull = rep(NaN,(nxcurl*nycurl))
zfull[icurl] = unname(as.numeric(curl_calib_R2))
zmat = matrix(zfull,nrow=nxcurl,ncol=nycurl)
image(xcurl,ycurl,zmat,
ylim=range(min(ycurl),40), xlim=range(min(xcurl),100),
xlab="",ylab="", yaxt="n", xaxt="n", cex.axis=1.5, col=rev(myPalette2(100)),
zlim = c(0,1)
)
world(ylim=range(min(ycurl),40), xlim=range(min(xcurl),100),
add=TRUE, fill=TRUE, col="grey80", border="grey50"
)
contour(xcurl,ycurl,zmat,add=TRUE,levels = seq(0,1,0.1),lwd=1.25,labcex=0.9)
# Add Rain R2
zfull = rep(NaN, (nxrain*nyrain))
zfull[irain] = unname(as.numeric(rain_calib_R2))
zmat = matrix(zfull,nrow=nxrain,ncol=nyrain)
image(xrain,yrain,zmat,
ylim=range(min(yrain),max(yrain)), xlim=range(min(xrain),max(xrain)),
xlab="",ylab="", yaxt="n", xaxt="n", cex.axis=1.5, col=rev(myPalette2(100)),
zlim = c(0,1), add=TRUE
)
contour(xrain,yrain,zmat,add=TRUE,levels = seq(0,1,0.1),lwd=1.25,labcex=0.9)
world(ylim=range(min(ycurl),40), xlim=range(min(xcurl),100),
add=TRUE, col="grey50"
)
###
# Curl Beta
zfull = rep(NaN,(nxcurl*nycurl))
zfull[icurl] = unname(as.numeric(curl_calib_b))
zmat = matrix(zfull,nrow=nxcurl,ncol=nycurl)
image.plot(xcurl,ycurl,zmat,
ylim=range(min(ycurl),40), xlim=range(min(xcurl),100),
xlab="",ylab="", yaxt="n", xaxt="n", cex.axis=1.5, col=rev(myPalette3(100)),
zlim = c(-1,1), horizontal=TRUE
)
world(ylim=range(min(ycurl),40), xlim=range(min(xcurl),100),
add=TRUE, fill=TRUE, col="grey80", border="grey50"
)
contour(xcurl,ycurl,zmat,add=TRUE,levels = seq(-1,1,0.2),lwd=1.25,labcex=0.9)
# Add Rain Beta
zfull = rep(NaN, (nxrain*nyrain))
zfull[irain] = unname(as.numeric(rain_calib_b))
zmat = matrix(zfull,nrow=nxrain,ncol=nyrain)
image(xrain,yrain,zmat,
ylim=range(min(yrain),max(yrain)), xlim=range(min(xrain),max(xrain)),
xlab="",ylab="", yaxt="n", xaxt="n", cex.axis=1.5, col=rev(myPalette3(100)),
zlim = c(-1,1), add=TRUE
)
contour(xrain,yrain,zmat,add=TRUE,levels = seq(-1,1,0.1),lwd=1.25,labcex=0.9)
world(ylim=range(min(ycurl),40), xlim=range(min(xcurl),100),
add=TRUE, col="grey50"
)
#### VALIDATION #####
dev.new(width=6, height=8)
par(mfrow=c(2,1), mar=c(2,2,2,2))
# Curl R2
zfull = rep(NaN,(nxcurl*nycurl))
zfull[icurl] = unname(as.numeric(curl_valid_R2))
zmat = matrix(zfull,nrow=nxcurl,ncol=nycurl)
image(xcurl,ycurl,zmat,
ylim=range(min(ycurl),40), xlim=range(min(xcurl),100),
xlab="",ylab="", yaxt="n", xaxt="n", cex.axis=1.5, col=rev(myPalette2(100)),
zlim = c(0,1)
)
world(ylim=range(min(ycurl),40), xlim=range(min(xcurl),100),
add=TRUE, fill=TRUE, col="grey80", border="grey50"
)
contour(xcurl,ycurl,zmat,add=TRUE,levels = seq(0,1,0.1),lwd=1.25,labcex=0.9)
# Add Rain R2
zfull = rep(NaN, (nxrain*nyrain))
zfull[irain] = unname(as.numeric(rain_valid_R2))
zmat = matrix(zfull,nrow=nxrain,ncol=nyrain)
image(xrain,yrain,zmat,
ylim=range(min(yrain),max(yrain)), xlim=range(min(xrain),max(xrain)),
xlab="",ylab="", yaxt="n", xaxt="n", cex.axis=1.5, col=rev(myPalette2(100)),
zlim = c(0,1), add=TRUE
)
contour(xrain,yrain,zmat,add=TRUE,levels = seq(0,1,0.1),lwd=1.25,labcex=0.9)
world(ylim=range(min(ycurl),40), xlim=range(min(xcurl),100),
add=TRUE, col="grey50"
)
###
# Curl Beta
zfull = rep(NaN,(nxcurl*nycurl))
zfull[icurl] = unname(as.numeric(curl_valid_b))
zmat = matrix(zfull,nrow=nxcurl,ncol=nycurl)
image(xcurl,ycurl,zmat,
ylim=range(min(ycurl),40), xlim=range(min(xcurl),100),
xlab="",ylab="", yaxt="n", xaxt="n", cex.axis=1.5, col=rev(myPalette3(100)),
zlim = c(0,1)
)
world(ylim=range(min(ycurl),40), xlim=range(min(xcurl),100),
add=TRUE, fill=TRUE, col="grey80", border="grey50"
)
contour(xcurl,ycurl,zmat,add=TRUE,levels = seq(0,1,0.1),lwd=1.25,labcex=0.9)
# Add Rain Beta
zfull = rep(NaN, (nxrain*nyrain))
zfull[irain] = unname(as.numeric(rain_valid_b))
zmat = matrix(zfull,nrow=nxrain,ncol=nyrain)
image(xrain,yrain,zmat,
ylim=range(min(yrain),max(yrain)), xlim=range(min(xrain),max(xrain)),
xlab="",ylab="", yaxt="n", xaxt="n", cex.axis=1.5, col=rev(myPalette3(100)),
zlim = c(0,1), add=TRUE
)
contour(xrain,yrain,zmat,add=TRUE,levels = seq(0,1,0.1),lwd=1.25,labcex=0.9)
world(ylim=range(min(ycurl),40), xlim=range(min(xcurl),100),
add=TRUE, col="grey50"
)
########################################################## OPTIONAL. Standard Errors
########################################################## OPTIONAL. EOFs and PCs for limited SST
# prow = 4
# plotdata = as.data.frame(unname(cbind(recordnopt,eofs.p[,1:4])))
# names(plotdata) = c("No.","Lat","Lon","EOF1","EOF2","EOF3","EOF4")
# library(seqinr)
# dblue <- col2alpha("#2066AC",alpha=0.5)
# lblue <- col2alpha("#BCDAEA",alpha=0.65)
# lred <- col2alpha("#FBC8AF",alpha=0.65)
# dred <- col2alpha("#B2182B",alpha=0.5)
# dev.new(height=7,width=8)
# par(mfrow = c(prow, 1), mar = c(2.5, 2.5, 1, 1))
# for (i in 1:prow){
# zfull = rep(NaN,(nx*ny))
# zmat = matrix(zfull,nrow=nx,ncol=ny)
# image(xgrid,ygrid,zmat,
# ylim=range(-15,15), xlim=range(95,280), xlab="",ylab="", axes=FALSE,
# cex = 1.75, main = paste("EOF no.", i, sep=""), col=myPalette1(100),
# zlim=c(-0.12,0.12)
# )
# mapnames <- map("world2",
# xlim=c(min(ygrid), max(xgrid)), ylim=c(min(ygrid), max(ygrid)),
# boundary=TRUE, interior=TRUE,
# col="grey50", add=TRUE
# )
# axis(2,seq(-10,10,by=5),cex.axis=1.5)
# # PLOT THE LIMITED FIELD EOF BUBBLES
# EOFlim = plotdata[,i+3]
# ratio = lambdas.p[1:4]/lambdas.p[1]
# EOFrat = abs(EOFlim)*ratio[i]
# radii = sqrt(EOFrat/pi)
# xx = rep(-1,27)
# xx[which(EOFlim > 0)] = 1
# xradii = radii * xx
# print(xradii)
# symbols(x=plotdata$Lon, y=plotdata$Lat,
# circles=abs(xradii), inches = max(abs(xradii)),
# bg = ifelse(xradii < -.2 ,dblue,
# ifelse(xradii >= -.2 & xradii <= 0,lblue,
# ifelse(xradii > 0 & xradii <= 0.2, lred,dred))),
# add=TRUE
# )
# if (i == prow) {
# axis(1,seq(100,280,by=20),
# labels=c("100","120","140","160","180","-160","-140","-120","-100","-80"),
# cex.axis=1.5)
# }
# }
########################################################## OPTIONAL. Standard Errors
|
430e7149bbeb4c9eb36ed010c43e34146901b63a
|
7114d68d53dd95e1ee2a96052ccd395da024b52e
|
/r/man/lift.Rd
|
5ae5c4cf8c4ba553c4ca1f6d54f91dd7eb6fa72c
|
[
"Apache-2.0"
] |
permissive
|
shishehchi/cdh-datascientist-tools
|
b813af52f3aa34f855a9726a3082c0125cbfcd7f
|
129cbab2179f43c077c86a880d4c809a542fbd46
|
refs/heads/master
| 2023-08-23T22:19:24.009605
| 2021-11-15T21:36:41
| 2021-11-15T21:36:41
| 225,455,891
| 0
| 0
|
Apache-2.0
| 2021-11-15T21:38:23
| 2019-12-02T19:49:06
|
Jupyter Notebook
|
UTF-8
|
R
| false
| true
| 606
|
rd
|
lift.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/cdh_utils.R
\name{lift}
\alias{lift}
\title{Calculates lift from counts of positives and negatives.}
\usage{
lift(pos, neg)
}
\arguments{
\item{pos}{Vector with counts of the positive responses}
\item{neg}{Vector with counts of the negative responses}
}
\value{
A vector with lift values. Lift can be any positive number. A return value of 2 means 200% lift.
}
\description{
Lift is defined as the success rate in a single bin divided by the overall success rate.
}
\examples{
100*lift(c(0,119,59,69,0), c(50,387,105,40,37))
}
|
19afafb63c00496c59cc0d66f8484c3e420f6848
|
50df53284f5b28bd2a7462ba3f7de82b0a4009e5
|
/man/libproj_version.Rd
|
90786a2190575aaac743a2377376346c4b4cbdba
|
[
"MIT"
] |
permissive
|
minghao2016/libproj
|
335ef8bf5abc60a04f29269567cf6c5d421265d6
|
45d70fb2fdc0ef21c25c5d5fe47afd9320e45d2a
|
refs/heads/master
| 2022-12-06T17:41:25.139257
| 2020-08-26T15:38:54
| 2020-08-26T15:38:54
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 2,370
|
rd
|
libproj_version.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/libproj-package.R
\name{libproj_version}
\alias{libproj_version}
\alias{libproj_has_libtiff}
\alias{libproj_has_libcurl}
\alias{libproj_temp_dir}
\alias{with_libproj_configuration}
\alias{libproj_configuration}
\alias{libproj_configure}
\title{PROJ configuration}
\usage{
libproj_version()
libproj_has_libtiff()
libproj_has_libcurl()
libproj_temp_dir()
with_libproj_configuration(config, expr)
libproj_configuration()
libproj_configure(
search_path = c(system.file("proj", package = "libproj"),
getOption("libproj.search_path", NULL)),
db_path = getOption("libproj.db_path", system.file("proj/proj.db", package =
"libproj")),
user_writable_dir = getOption("libproj.user_writable_dir", libproj_temp_dir()),
network_endpoint = getOption("libproj.network_endpoint", "https://cdn.proj.org"),
network_enabled = getOption("libproj.network_enabled", FALSE)
)
}
\arguments{
\item{config}{A named \code{list()} with elements used to temporarily override elements of the
current \code{\link[=libproj_configuration]{libproj_configuration()}}.}
\item{expr}{An expression to evaluate with the specified state}
\item{search_path}{A character vector of paths where libproj will
look for resource files (e.g., gridshift, database, init files).
Defaults to the internal database and init files distributed
with the PROJ source.}
\item{db_path}{Independent of \code{search_path}, a character vector of
SQLite databases that may contain updated or alternative
coordinate operations from the default proj.db included in this
package. You must specify at least one database as part of this configuration;
do not rely on \code{search_path} to find this value for you.}
\item{user_writable_dir}{A directory that can safely be written to
by this package. This contains a cache of grid shift files downloaded
from the PROJ CDN at \code{network_endpoint} if using \code{with_libproj_network()}.}
\item{network_endpoint}{A mirror of the PROJ CDN of gridshift files. By default,
this is set to \url{https://cdn.proj.org}.}
\item{network_enabled}{Whether or not to download gridshift files on the fly.
This defaults to \code{FALSE}.}
}
\description{
PROJ configuration
}
\examples{
libproj_version()
libproj_has_libtiff()
libproj_has_libcurl()
libproj_temp_dir()
libproj_configuration()
}
|
1fd5feb9faa7c94ad5a6f65e2b44d63817cfef57
|
ddb833152a50f7c070e3da8842dca4b11c1a9c16
|
/R/control.rds.estimates.R
|
a57217f4121e19b7b53ba1877dd32cd5f3c4db08
|
[] |
no_license
|
Edouard-Legoupil/RDS
|
31f48a91d81254cda645b55ebf1b63a0ea87f4dc
|
9fc932ab45c76fc2232e2cd91e9839592a66ed7a
|
refs/heads/master
| 2023-02-23T17:43:12.623395
| 2021-01-26T20:25:40
| 2021-01-26T20:25:40
| 312,030,910
| 0
| 1
| null | 2021-01-26T20:18:01
| 2020-11-11T16:44:41
|
R
|
UTF-8
|
R
| false
| false
| 4,930
|
r
|
control.rds.estimates.R
|
utils::globalVariables(c(".control.rds.estimates"))
#' Auxiliary for Controlling RDS.bootstrap.intervals
#'
#' Auxiliary function as user interface for fine-tuning RDS.bootstrap.intervals algorithm,
#' which computes interval estimates for via bootstrapping.
#'
#' This function is only used within a call to the \code{\link{RDS.bootstrap.intervals}}
#' function.
#'
#' Some of the arguments are not yet fully implemented. It will evolve slower to incorporate more
#' arguments as the package develops.
#'
#' @param confidence.level The confidence level for the confidence intervals.
#' The default is 0.95 for 95\%.
#' @param SS.infinity The sample proportion, \code{n/N}, below which the computation
#' of the SS weights should simplify to that of the \code{RDS-II} weights.
#' @param lowprevalence Standard confidence interval procedures can be inaccurate when the
#' outcome expected count is close to zero. This sets conditions where alternatives to the
#' standard are used for the \code{ci.type="hmg"} option. See Details for its use.
#' @param discrete.cutoff The minimum proportion of the values of the outcome variable that
#' need to be unique before the variable is judged to be continuous.
#' @param useC Use a C-level implementation of Gile's bootstrap (rather than
#' the R level). The implementations should be computational
#' equivalent (except for speed).
#' @param number.of.bootstrap.samples The number of bootstrap samples to take
#' in estimating the uncertainty of the estimator. If \code{NULL} it defaults
#' to the number necessary to compute the standard error to accuracy 0.001.
#' @param hcg.reltol Relative convergence tolerance for the HCG estimator. The algorithm stops if
#' it is unable to reduce the log-likelihood by a factor of \code{reltol * (abs(log-likelihood) + reltol)}
#' at a step. Defaults to \code{sqrt(.Machine$double.eps)}, typically about \code{1e-8}.
#' @param hcg.BS.reltol Relative convergence tolerance for the bootstrap of the HCG estimator.
#' It has the same interpretation as \code{hcg.reltol} except it is applied to each bootstrap sample.
#' It is typically the same or larger than \code{hcg.reltol}.
#' @param hcg.max.optim The number of iterations on the likelihood optimization for the HCG estimator.
#' @param seed Seed value (integer) for the random number generator. See
#' \code{\link[base]{set.seed}}
#' @return A list with arguments as components.
#' @details Standard confidence interval procedures can be inaccurate when the
#' outcome expected count is close to zero. In these cases
#' the combined Agresti-Coull and the bootstrap-t interval of
#' Mantalos and Zografos (2008) can be used.
#' The \code{lowprevalence} argument is a
#' two vector parameter setting the conditions under which the approximation is used.
#' The first is the penalty term on the differential activity. If the observed number
#' of the rare group minus the product of the first parameter and the differential
#' activity is lower than the
#' second parameter, the low prevalence approximation is used.
#' @seealso \code{\link{RDS.bootstrap.intervals}}
#' @keywords models
#' @export
control.rds.estimates <- function(confidence.level = 0.95,
SS.infinity = 0.01,
lowprevalence = c(8, 14),
discrete.cutoff = 0.8,
useC = TRUE,
number.of.bootstrap.samples = NULL,
hcg.reltol=sqrt(.Machine$double.eps),
hcg.BS.reltol=100000*sqrt(.Machine$double.eps),
hcg.max.optim=500,
seed = NULL) {
formal.args <- formals(sys.function())
if (!exists(".control.rds.estimates")) {
control <- list()
for (arg in names(formal.args))
control[arg] <- list(get(arg))
} else{
control <- .control.rds.estimates
if (!missing(confidence.level)) {
control[["confidence.level"]] <- confidence.level
}
if (!missing(SS.infinity)) {
control[["SS.infinity"]] <- SS.infinity
}
if (!missing(lowprevalence)) {
control[["lowprevalence"]] <- lowprevalence
}
if (!missing(discrete.cutoff)) {
control[["discrete.cutoff"]] <- discrete.cutoff
}
if (!missing(useC)) {
control[["useC"]] <- useC
}
if (!missing(number.of.bootstrap.samples)) {
control[["number.of.bootstrap.samples"]] <-
number.of.bootstrap.samples
}
if (!missing(hcg.reltol)) {
control[["hcg.reltol"]] <- hcg.reltol
}
if (!missing(hcg.BS.reltol)) {
control[["hcg.BS.reltol"]] <- hcg.BS.reltol
}
if (!missing(hcg.max.optim)) {
control[["hcg.max.optim"]] <- hcg.max.optim
}
if (!missing(seed)) {
control[["seed"]] <- seed
}
}
RDS::set.control.class("control.rds.estimates")
}
|
e8e69748ea1b0c72bae7a658860a122ddfd11fe1
|
7eab7f927bf12c6b9cb1bb4ab72fd4d5db63c0dc
|
/lesson2/RE-1.R
|
7458438039b3e1b470e784dc750465f7e2139712
|
[] |
no_license
|
elect000/p-recog
|
63a804f820c573bf6e9548b2f884d7098005abcb
|
61b790682a2355e471345f63b3a55b615101f0a7
|
refs/heads/master
| 2020-03-31T23:59:08.847289
| 2018-12-18T19:06:49
| 2018-12-18T19:06:49
| 152,677,012
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 4,904
|
r
|
RE-1.R
|
# R 3.5.1 で実行確認
# import libraries
library(nnet)
library(MASS)
# レポート課題1.1
# 1)
# 隠れ素子の数を一つづつ増やし、10回の学習で10回とも正しく識別出来るようになった隠れ素子の数を求めなさい。
# 2)
# 隠れ素子の数によって誤識別率の平均がどのように変化するのかをグラフで示しなさい。
# 3)
# 隠れ素子が1個の場合に得られた学習結果について、結合係数の大きさの分布を示しなさい。
# 4)
# 10回とも正しく識別できた場合の学習結果について、結合係数の大きさの分布を示し、隠れ素子が1個の場合と比較検討しなさい。
hidden = c(1:40)
iter = c(1:10)
trave = rep(0, length(hidden))
res_s = c()
tr = matrix(0, length(iter), length(hidden))
z = 0
for (i in 1:length(hidden)) {
for (j in 1:length(iter)) {
res_s = c(list(nnet(classes~., data=xor, size=hidden[i], rang=0.1)), res_s)
out = predict(res_s[i][[1]], xor, type="class")
tr[j, i] = mean(out != xor$classes)
}
trave[i] = mean(tr[,i])
if(trave[i] == 0.0 && z == 0) {
z = i
}
}
res_s = rev(res_s)
# 1) 37
z
# 2) 1-1-2.png
plot(hidden, trave, type="b", lty=1, lwd=2)
# 3) 1-1-3.png
hist(res_s[1][[1]]$wts, breaks=seq(-0.1, 0.1, 0.04), freq=TRUE)
# 4) 1-1-4.png
hist(res_s[z][[1]]$wts, breaks=seq(-0.1, 0.1, 0.04), freq=TRUE)
# レポート課題1.2
# データの用意
ir <- data.frame(rbind(iris3[,,1], iris3[,,2], iris3[,,3]),
species=factor(c(rep("sv", 50), rep("c", 50),
rep("sv", 50))))
samp <- c(sample(1:50, 25), sample(51:100, 25), sample(101:150, 25))
# 1)
# decay=0 に設定し、隠れ素子の数を1にして学習データを用いて10回学習し、学習データに対する誤識別率の平均と、テストデータに対する誤識別率の平均を求めなさい。
# 隠れ素子の数を10まで1つずつ増やして同じことを行い、隠れ素子の数に対する再代入誤りと汎化誤差の変化をグラフ化しなさい。
# 2)
# 再代入誤りが一番小さな場合の結合係数の分布と、汎化誤差が一番小さな場合の結合係数の分布を比較検討しなさい。
# 3)
# decay=0.01 にして同様の実験を行い、隠れ素子数に対する再代入誤りと汎化誤差の変化をグラフ化しなさい。
# 4)
# 再代入誤りが一番小さな場合の結合係数の分布と、汎化誤差が一番小さな場合の結合係数の分布を、decay=0 の場合と比較しなさい。
hidden = c(1:10)
iter = c(1:10)
decay = 0
trave_learn = rep(0, length(hidden))
trave_test = rep(0, length(hidden))
res_s = c()
tr_learn = matrix(0, length(iter), length(hidden))
tr_test = matrix(0, length(iter), length(hidden))
for (i in 1:length(hidden)) {
for (j in 1:length(iter)) {
res_s = c(list(nnet(species~., data=ir[samp,], size=hidden[i], rang=0.5, decay=decay, maxit=200)), res_s)
out_learn = predict(res_s[i][[1]], ir[samp,], type="class")
out_test = predict(res_s[i][[1]], ir[-samp,], type="class")
tr_learn[j, i] = mean(out_learn != ir[samp,]$species)
tr_test[j, i] = mean(out_test != ir[-samp,]$species)
}
trave_learn[i] = mean(tr_learn[, i])
trave_test[i] = mean(tr_test[, i])
}
res_s = rev(res_s)
# 1) 1-2-1-1.png
plot(hidden, trave_learn, type="b", lty=1, lwd=2)
# 1) 1-2-1-2.png
plot(hidden, trave_test, type="b", lty=1, lwd=2)
# 2)
which.min(trave_learn) # 9
which.min(trave_test) # 10
# 2) 1-2-2-1.png
hist(res_s[which.min(trave_learn)][[1]]$wts, breaks=seq(-20, 20, 5), freq=TRUE)
# 2) 1-2-2-2.png
hist(res_s[which.min(trave_test)][[1]]$wts, breaks=seq(-20, 20, 5), freq=TRUE)
# 3)
hidden = c(1:10)
iter = c(1:10)
decay = 0.01
trave_learn = rep(0, length(hidden))
trave_test = rep(0, length(hidden))
res_s = c()
tr_learn = matrix(0, length(iter), length(hidden))
tr_test = matrix(0, length(iter), length(hidden))
for (i in 1:length(hidden)) {
for (j in 1:length(iter)) {
res_s = c(list(nnet(species~., data=ir[samp,], size=hidden[i], rang=0.5, decay=decay, maxit=200)), res_s)
out_learn = predict(res_s[i][[1]], ir[samp,], type="class")
out_test = predict(res_s[i][[1]], ir[-samp,], type="class")
tr_learn[j, i] = mean(out_learn != ir[samp,]$species)
tr_test[j, i] = mean(out_test != ir[-samp,]$species)
}
trave_learn[i] = mean(tr_learn[, i])
trave_test[i] = mean(tr_test[, i])
}
res_s = rev(res_s)
# 3) 1-2-3-1.png
plot(hidden, trave_learn, type="b", lty=1, lwd=2)
# 3) 1-2-3-2.png
plot(hidden, trave_test, type="b", lty=1, lwd=2)
# 4)
which.min(trave_learn) # 4
which.min(trave_test) # 4
# 4) 1-2-4-1.png
hist(res_s[which.min(trave_learn)][[1]]$wts, breaks=seq(-6, 6, 1), freq=TRUE)
# 4) 1-2-4-2.png
hist(res_s[which.min(trave_test)][[1]]$wts, breaks=seq(-6, 6, 1), freq=TRUE)
|
24e5f2099008c6d5518fd836ee632637adf38513
|
92d54f598099f13f7150d8a6fbf39d14e7371ff4
|
/R/dbDataType_PqConnection.R
|
1d9de51c90c4d1d830d45ff695d2cd14b2ee22b2
|
[
"MIT"
] |
permissive
|
r-dbi/RPostgres
|
3c44d9eabe682e866411b44095a4671cbad275af
|
58a052b20f046c95723c332a0bb06fdb9ed362c4
|
refs/heads/main
| 2023-08-18T09:48:04.523198
| 2023-07-11T02:17:42
| 2023-07-11T02:17:42
| 28,823,976
| 230
| 66
|
NOASSERTION
| 2023-08-31T08:20:25
| 2015-01-05T17:43:02
|
R
|
UTF-8
|
R
| false
| false
| 342
|
r
|
dbDataType_PqConnection.R
|
# dbSendQuery()
# dbSendStatement()
# dbDataType()
#' @rdname dbDataType
#' @usage NULL
dbDataType_PqConnection <- function(dbObj, obj, ...) {
if (is.data.frame(obj)) return(vapply(obj, dbDataType, "", dbObj = dbObj))
get_data_type(obj)
}
#' @rdname dbDataType
#' @export
setMethod("dbDataType", "PqConnection", dbDataType_PqConnection)
|
66ab83a2c912c191e93abdd3f62551f6c2c38ebe
|
0762056267ee05a56d6c45ecc6605430e0d4209a
|
/plot1.R
|
9c44ba8dbb60f5d38dbac306324f63b2a1258492
|
[] |
no_license
|
amrutaghare/Coursera-Exploratory-Data-Analysis-Week-1-Assignment
|
15a6779104a42e5cd542e0dc3af0e09d0769df52
|
13d9a67ed4221c3b87dda6e5577619c70773bbff
|
refs/heads/master
| 2021-04-26T23:12:25.587692
| 2018-03-05T16:39:07
| 2018-03-05T16:39:07
| 123,947,574
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 659
|
r
|
plot1.R
|
##Setting directory##
setwd("C:/Users/amrutag542/Documents/Study Material/Coursera/Exploratory Data Analysis/Week 1/Assignment 1")
##Reading data##
data<-read.csv("household_power_consumption.txt", header=T, sep=';', na.strings="?", stringsAsFactors=F,dec=".")
##Subsetting data##
data1 <-subset(data, Date %in% c("1/2/2007","2/2/2007"))
##Converting into date format##
data1$Date <- as.Date(data1$Date, format="%d/%m/%Y")
##Plotting a histogram and exporting it to the directory##
png("plot1.png", width=480, height=480)
hist(data1$Global_active_power, col="red", main="Global Active Power", xlab="Global Active Power (kilowatts)")
dev.off()
|
4c585b2769a09ef48cda90b6a45db78b97ed05c4
|
5a9fd2f7bf916a36f4accf37ea4dadea0577fd9b
|
/01_readData_2020.R
|
19cf6b95fd015a1f8d69d15f0785674b3ffa46bd
|
[] |
no_license
|
hnagaty/airbnb
|
fdebd73be2eead36aba915bc24659f320a8053e3
|
bf1c7209a97a07247f36a5d17a6e084dadec992f
|
refs/heads/main
| 2023-06-04T23:49:45.877888
| 2021-06-16T08:07:43
| 2021-06-16T08:07:43
| 377,419,633
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 5,758
|
r
|
01_readData_2020.R
|
# a redo of reading & basic data munging
# a redo on Jul-2020, during the COVID-19 lockdown
# this is basic exploration of files
# the actual reading is included in the script 03b_features.R
# this file is superceded by 01b_readData_2020.R
library(tidyverse)
library(anytime)
dataPath <- "/home/hnagaty/dataNAS/airbnb/"
dataPath <- '~/data/'
# Reading the users file --------------------------------------------------
trainUsersRaw <- read_csv(paste0(dataPath, "train_users_2.csv"),
col_types = cols(id = col_character(),
date_account_created = col_date(format = "%Y-%m-%d"),
timestamp_first_active = col_character(),
date_first_booking = col_date(format = "%Y-%m-%d"),
gender = col_factor(levels = NULL, ordered = FALSE, include_na = FALSE),
age = col_number(),
signup_method = col_factor(levels = NULL, ordered = FALSE, include_na = FALSE),
signup_flow = col_factor(levels = NULL, ordered = FALSE, include_na = FALSE),
language = col_factor(levels = NULL, ordered = FALSE, include_na = FALSE),
affiliate_channel = col_factor(levels = NULL, ordered = FALSE, include_na = FALSE),
affiliate_provider = col_factor(levels = NULL, ordered = FALSE, include_na = FALSE),
first_affiliate_tracked = col_factor(levels = NULL, ordered = FALSE, include_na = TRUE),
signup_app = col_factor(levels = NULL, ordered = FALSE, include_na = FALSE),
first_device_type = col_factor(levels = NULL, ordered = FALSE, include_na = FALSE),
first_browser = col_factor(levels = NULL, ordered = FALSE, include_na = FALSE),
country_destination = col_factor(levels = NULL, ordered = FALSE, include_na = FALSE)),
na=c("null","empty"))
glimpse(trainUsersRaw)
summary(trainUsersRaw) # %>% write.csv('trainUsersSummary.csv')
colSums(is.na(trainUsersRaw)) # %>% write.csv('caCounts.csv')
# Some levels do exist in train data but not in the test data
# I'm saving the levels here, for use with the train data
## DELAY THIS FOR NOW
# Countries file ----------------------------------------------------------
countries <- read_csv(paste0(dataPath, "countries.csv"),
col_types = cols(
country_destination = col_factor(levels=levels(trainUsersRaw$country_destination),ordered = FALSE, include_na = FALSE),
lat_destination = col_double(),
lng_destination = col_double(),
distance_km = col_double(),
destination_km2 = col_double(),
destination_language = col_factor(levels=levels(trainUsersRaw$language),ordered = FALSE, include_na = FALSE),
#destination_language = col_factor(levels = NULL, ordered = FALSE, include_na = FALSE),
language_levenshtein_distance = col_double()))
glimpse(countries)
summary(countries)
countries
# Age Buckets -------------------------------------------------------------
ageBuckets <- read_csv(paste0(dataPath,"age_gender_bkts.csv"),
col_types = cols(
age_bucket = col_character(),
country_destination = col_factor(levels = levels(trainUsersRaw$country_destination)),
gender = col_factor(levels = tolower(levels(trainUsersRaw$gender))),
population_in_thousands = col_number(),
year = col_skip() # it's alawys 2015, so I skipped it
))
glimpse(ageBuckets)
summary(ageBuckets)
ageBuckets
# Sessions ----------------------------------------------------------------
sessions <- read_csv(paste0(dataPath,"sessions.csv"),
col_types = cols(
user_id = col_character(),
action = col_factor(levels = NULL, ordered = FALSE, include_na = TRUE),
action_type = col_factor(levels = NULL, ordered = FALSE, include_na = TRUE),
action_detail = col_factor(levels = NULL, ordered = FALSE, include_na = TRUE),
device_type = col_factor(levels = NULL, ordered = FALSE, include_na = TRUE),
secs_elapsed = col_double()),
na=c("null","empty"))
glimpse(sessions)
summary(sessions)
str(sessions)
head(sessions)
sessions <- sessions %>%
filter(user_id!="") %>%
select(-action_type,-action)
str(sessions)
#summarize values
sessionsA <- sessions %>%
group_by(user_id,action_detail) %>%
summarise(Cnt=n()) %>%
spread(key=action_detail,value=Cnt,fill=0,sep="-")
sessionsB <- sessions %>%
group_by(user_id,device_type) %>%
summarise(Cnt=n()) %>%
spread(key=device_type,value=Cnt,fill=0,sep="-")
sessionsC <- sessions %>%
group_by(user_id) %>%
summarise(avgSecs=mean(secs_elapsed,na.rm = TRUE),
sumSecs=sum(secs_elapsed,na.rm=TRUE),
cntActions=n())
sessionsD <- sessions %>%
group_by(user_id,device_type) %>%
summarise(deviceCnt=n()) %>%
top_n(1,deviceCnt) %>%
rename(topDevice=device_type)
sessionPerUser <- sessionsA %>%
inner_join(sessionsB) %>%
inner_join(sessionsC)
|
65edccb37cabe8534fc131856df6cbb303e2f28c
|
b65269b268c9c672ce750ffcbbd578eb2ba09d70
|
/man/no_tab_linter.Rd
|
fd87c6e35923f427035b2387abef1a658995bfea
|
[
"MIT"
] |
permissive
|
dpprdan/lintr
|
8b2d2f92de81d033d23d713ea9497e1ce9aa82ea
|
67da292a1c4c81368aa1f0e82b1013f5ae86df4d
|
refs/heads/master
| 2023-01-22T17:55:23.234948
| 2023-01-17T16:33:53
| 2023-01-17T16:33:53
| 242,086,839
| 0
| 0
|
NOASSERTION
| 2020-02-21T08:09:52
| 2020-02-21T08:09:51
| null |
UTF-8
|
R
| false
| true
| 858
|
rd
|
no_tab_linter.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/no_tab_linter.R
\name{no_tab_linter}
\alias{no_tab_linter}
\title{No tab linter}
\usage{
no_tab_linter()
}
\description{
Check that only spaces are used for indentation, not tabs. Much ink has been
spilled on this topic, and we encourage you to check out references for more
information.
}
\examples{
# will produce lints
lint(
text = "\tx",
linters = no_tab_linter()
)
# okay
lint(
text = " x",
linters = no_tab_linter()
)
}
\references{
\itemize{
\item https://www.jwz.org/doc/tabs-vs-spaces.html
\item https://blog.codinghorror.com/death-to-the-space-infidels/
}
}
\seealso{
\link{linters} for a complete list of linters available in lintr.
}
\section{Tags}{
\link[=consistency_linters]{consistency}, \link[=default_linters]{default}, \link[=style_linters]{style}
}
|
5fb929d7c49870c361c705b7ade8fee41335b46d
|
a76e6b446f784d30e8e0eb761b816d92cf056934
|
/man/gmedian.Rd
|
5572d60c23b572195c6b981341c06944f4b5895f
|
[] |
no_license
|
tilltnet/ratingScaleSummary
|
55879033b905cc5370f10d82cb44e040eb94c680
|
c792c882eec2cef0cd0e8983518cf5f48bde6085
|
refs/heads/master
| 2021-01-11T19:48:45.545975
| 2017-01-19T22:59:24
| 2017-01-19T22:59:24
| 79,402,288
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| true
| 1,368
|
rd
|
gmedian.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/gmedian.r
\name{gmedian}
\alias{gmedian}
\alias{gmedian.factor}
\alias{gmedian.numeric}
\title{Estimates Median of a grouped frequency distribution.}
\usage{
gmedian(x, percentile = 0.5, scale_interval = 1, w = NULL)
\method{gmedian}{factor}(x, percentile = 0.5, scale_interval = 1,
w = NULL)
\method{gmedian}{numeric}(x, percentile = 0.5, scale_interval = 1,
w = NULL)
}
\arguments{
\item{x}{factor or numeric vector to draw the frequency distribution from.}
\item{percentile}{value between 0 and 1; default = 0.5 (median)}
\item{scale_interval}{group intervall; default = 1}
\item{w}{an optional vector of weights}
}
\value{
Estimated Median for the given grouped frequency distribution as a
numeric vector.
}
\description{
\code{gmedian} calculates the interpolated median of a grouped
frequency distribution.
}
\details{
\code{gmedian} can be used with \code{factor} and \code{numeric}
vectors.
If the values of \code{x} represent numeric ranges, like 1-10, 11-20, 21-30,
values should be \code{numeric} starting values of range and
\code{scale_interval} should be used to indicate the range intervall.
}
\examples{
lvls <- c("very important", "2", "3", "4", "5", "6", "not important")
x <- sample(lvls, size = 100, replace = T)
gmedian(x)
# 25\% Quartile
gmedian(x, 0.25)
}
|
89951181d5cdb353b99dedf852a5aecbeab63dd3
|
10867272feb49d6de65b0ff8ee29c07994268e2a
|
/Q14.R
|
fdb231b41bf04d0bf1904747517fb5ff4bcd4807
|
[] |
no_license
|
umairhanif00/R-Assignment-2
|
8d47e93176c5187c43857741bb3ed3ec793efd5a
|
36d78223433f50c57fb2724270144e4ee307c3d5
|
refs/heads/master
| 2021-01-23T04:39:44.261903
| 2017-03-27T11:33:47
| 2017-03-27T11:33:47
| 86,233,545
| 0
| 0
| null | 2017-03-26T13:12:26
| 2017-03-26T13:12:26
| null |
UTF-8
|
R
| false
| false
| 162
|
r
|
Q14.R
|
#Question 14
dataf$TotalCharges<-as.numeric(as.character(dataf$TotalCharges))
sum_of_charges <- sum(dataf$TotalCharges, na.rm = TRUE)
print(sum_of_charges)
|
cc7c5f5387cbe0c8ff06220d1f8eff3c5d1136e6
|
b8a6b9459a67085abe2b68dd90b97703eabe1a1f
|
/man/apply_combinations.Rd
|
7f6335effabac0b89dde6a84f7c4170ed725eba6
|
[] |
no_license
|
robjohnnoble/demonanalysis
|
5fd33509bcaa0f7c31ea57a90ffffb5070511a15
|
8ed218bf0b4162f85922a2c398e39dce8de7f90f
|
refs/heads/master
| 2022-11-10T11:48:20.158836
| 2020-06-29T15:12:50
| 2020-06-29T15:12:50
| 126,359,266
| 3
| 1
| null | null | null | null |
UTF-8
|
R
| false
| true
| 625
|
rd
|
apply_combinations.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/demonanalysis.R
\name{apply_combinations}
\alias{apply_combinations}
\title{Apply a function to every combination of some sequences}
\usage{
apply_combinations(vec, fn, ...)
}
\arguments{
\item{vec}{vector of final values of the sequences (initial values are always zero)}
\item{fn}{function to apply to the values}
\item{...}{other arguments passed to fn}
}
\value{
result of applying fn to every combination of vec values
}
\description{
Apply a function to every combination of some sequences
}
\examples{
apply_combinations(c(2, 3), mean)
}
|
f02fced37b3ef6ea9021035ba3af6fcfc211c08f
|
f5b0206714a952321940c5f78378ebe095d1c5a6
|
/1-Analysis-of-Missouri-Sex-Offender-Registry-Data/Missouri-Sex-Offenders.R
|
a6bd9019f29f461010e586d0516e168d4d07c436
|
[] |
no_license
|
EarlGlynn/MO-offenders-near-daycares
|
6175062b05b8a145fd39471272fb8335cc533963
|
546ad402fcff07042124d775c8a768ca4e0a0e1f
|
refs/heads/master
| 2021-01-21T22:26:49.099685
| 2015-01-03T19:58:34
| 2015-01-03T19:58:34
| 28,752,306
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 7,927
|
r
|
Missouri-Sex-Offenders.R
|
# Process Missouri Sex Offender Registry (MSOR) data transferred from bottom of page
# http://www.mshp.dps.mo.gov/MSHPWeb/PatrolDivisions/CRID/SOR/SORPage.html and
# prepare for geocoding the list.
#
# Each line in the Excel file is an offense committed by an offender. There are
# multiple lines of offenses for some offenders.
#
# Earl F Glynn
# Franklin Center for Government & Public Integrity
# 17 July 2011
#
# Setup steps
# 1. Extract msor.xls from msor.zip
# 2. msor.xls is an XML file (20 June 2011). Instead of parsing XML, let's
# open in Excel and re-save as Excel file Missouri-Sex-Offenders.xls
# 3. Delete first 13 rows of summary information in Missouri-Sex-Offenders.xls
# and save modified file.
# 4. Note worksheet name is "Sheet1"
################################################################################
### Read Missouri Sex Offender Registry (MSOR) data
library(RODBC) # Use RODBC to read the Excel file
setwd("C:/Franklin/Projects/2011/KSHB-Sex-Offenders/Missouri-Sex-Offender-Registry/") ### Project directory
connection <- odbcConnectExcel("Missouri-Sex-Offenders.xls")
msor <- sqlFetch(connection, "Sheet1", as.is=TRUE)
odbcClose(connection)
# Look at overview info of data.frame
dim(msor)
# 18234 9
colnames(msor)
str(msor)
################################################################################
# After reviewing the data several street addresses were noted that cannot be geocoded,
# like "Homeless" and "Unknown". Let's review top 10 most frequent addresses.
# Note: Some of the high-frequency adddress are prisons.
counts <- sort(table(msor$Address), decreasing=TRUE)[1:10]
data.frame(counts)
# Let's remove addresses that cannot be geocoded:
# Address counts (20 June 2011)
# --------------------------------------- ------
# Incarcerated in MO 2403
# Previously Exempt/Registration Required 749
# Moved Out of State 593
# Incarcerated Out of State 255
# HOMELESS 106
# UNKNOWN 94
# Other problem addresses found with lower frequencies:
# UNKNOWN ADDRESS
# ADDRESS UNKNOWN [future]
delete.list <- c("Incarcerated in MO", "Previously Exempt/Registration Required",
"Moved Out of State", "Incarcerated Out of State",
"HOMELESS", "UNKNOWN",
"UNKNOWN ADDRESS")
select <- msor$Address %in% delete.list
sum(select)
problem.addresses <- msor[select,]
write.csv(problem.addresses, "msor-problem-addresses.csv", row.names=FALSE)
dim(problem.addresses) # Offenses at problem addresses
# 4203 9
length(unique(problem.addresses$Name)) # Number of names of offenders at problem addresses
# 3543
# maintain original msor data.frame by using offense data.frame here
offense <- msor[!select,]
dim(offense)
# Since focus for now is on Missouri, let's exclude out-of-state names
# Stats on how many are out of state
table(offense$St)
# AR IA IL IN KS MO MS OK PA TN
# 7 5 63 1 102 13833 1 15 1 3
select <- offense$St != "MO"
out.of.state <- offense[select,]
write.csv(out.of.state, "msor-out-of-state.csv", row.names=FALSE)
nrow(out.of.state)
# 198
offense <- offense[!select,]
rownames(offense) <- 1:nrow(offense) # Renumber
nrow(offense)
counts <- sort(table(offense$Offense, useNA="ifany"), decreasing=TRUE)
counts <- data.frame(Offense=names(counts), Count=as.numeric(counts))
write.csv(counts, "msor-offender-offenses-counts.csv", row.names=FALSE)
################################################################################
# Create offender list from offense list
offender <- offense[,c("Name", "Address", "City", "St", "Zip", "County", "Compliant")]
nrow(offender)
offender <- unique(offender)
nrow(offender)
# Note: Remove "Compliant" above and the same number of offenders was found.
# Therefore, there appears to be no inconsistent "Compliant" fields by name.
# R doesn't like to put a header over the row names (or I don't know how), so
# add new first column "N" with a row number.
offender <- data.frame(N=1:nrow(offender), offender)
################################################################################
# Create input file for R geocoding script
geocode <- offender
# Add some fields to be used by the process. By design all "original" data
# starts with an upper case letter and the new geocoding fields are all in
# lower case. So, "Zip" is from the raw data and "zip" will be from geocoding.
# They should match but when they don't that's a sign of a geocoding problem.
# Some of the fields for geocoding are explained here
# http://code.google.com/apis/maps/documentation/geocoding/index.html
geocode$status <- ""
geocode$zip <- ""
geocode$county <- ""
geocode$state <- ""
geocode$lat <- ""
geocode$lng <- ""
geocode$location.type <- ""
geocode$formatted.address <- ""
geocode$result.count <- ""
# Write tab-delimited file
write.table(geocode, file="geocode-MO-offender-in-MASTER.txt",
sep="\t", quote=FALSE, row.names=FALSE)
################################################################################
# Use all fields in offender data.frame as compound key to connect back to
# offense data.frame
offender$key <- paste(offender$Name, offender$Address, offender$City, offender$St,
offender$Zip, offender$County, offender$Compliant, sep="|")
offense$key <- paste(offense$Name, offense$Address, offense$City, offense$St,
offense$Zip, offense$County, offense$Compliant, sep="|")
# Step throug all offenders and create a "Comments" field with the
# concatenated list of offenses. To be used as Comments for offender
# on Google maps.
offender$Comments <- ""
# There may be a way to vectorize this and speed this up, but this brute force
# approach works for now.
for (i in 1:nrow(offender))
{
if (i %% 1000 == 0)
{
cat(i, offender$key[i], "\n")
flush.console() # show progress
}
offenders.offenses <- offense[offender$key[i] == offense$key,]
offender$Comments[i] <- paste(offenders.offenses$Offense, collapse="; ")
}
# Drop key column and write data.frame to file
offender <- offender[,-which(colnames(offender) == "key")]
write.csv(offender, "msor-offender-master-file.csv", row.names=FALSE)
# Drop first column "N"
offender <- offender[,-1]
# These files are suitable for plotting using BatchGeo.com
# Create regional subsets (leave row name indices)
select <- offender$County %in% c("BUCHANAN","CASS", "CLAY", "JACKSON", "PLATTE")
write.csv(offender[select,], "msor-offender-kc-area.csv", row.names=FALSE)
select <- offender$County %in% c("ST. CHARLES", "ST. LOUIS", "ST. LOUIS CITY", "JEFFERSON ")
write.csv(offender[select,], "msor-offender-st-louis-area.csv", row.names=FALSE)
select <- offender$County %in% c("GREENE", "JASPER", "LAWRENCE")
write.csv(offender[select,], "msor-offender-joplin-springfield-area.csv", row.names=FALSE)
################################################################################
# Create summaries
# Offender compliance by county
ByCounty <- table(offender$County, offender$Compliant)
write.csv(ByCounty,"msor-offenders-compliance-by-county.csv")
# Mulitple offenders at same address (could miss variations in spelling, standardization)
complete.address <- paste(offender$Address, offender$City, offender$St, offender$Zip,
offender$County, sep="|")
counts <- table(complete.address)
Above2List <- data.frame(Address=names(counts), Count=as.numeric(counts))
Above2List <- Above2List[Above2List$Count > 2,]
Above2List <- Above2List[order(Above2List$Count,decreasing=TRUE),]
write.csv(Above2List, "msor-more-than-2-offenders-at-address.csv", row.names=FALSE)
|
fa63ece3e953d6ce8f8ded3027930cb97ead4b92
|
63704a6472534c2f1be52f998f6640d0f85f9e77
|
/man/sumStatsPar.Rd
|
164f4fcd3e353acb9fdeafe695fa5b0f28c349be
|
[] |
no_license
|
annavesely/sumSome
|
ebc9f1685918f62cddae78eaaf6502cdbcee4feb
|
f11ef3b60890c46ae92617d3d64ce0c0c6b875b6
|
refs/heads/master
| 2023-06-08T06:39:00.733593
| 2023-06-01T10:14:03
| 2023-06-01T10:14:03
| 324,800,427
| 0
| 1
| null | 2022-08-18T13:18:07
| 2020-12-27T16:19:16
|
R
|
UTF-8
|
R
| false
| true
| 2,588
|
rd
|
sumStatsPar.Rd
|
% Generated by roxygen2: do not edit by hand
% Please edit documentation in R/sumStatsPar.R
\name{sumStatsPar}
\alias{sumStatsPar}
\title{True Discovery Guarantee for Generic Statistics - Parametric}
\usage{
sumStatsPar(g, S = NULL, alpha = 0.05, cvs)
}
\arguments{
\item{g}{numeric vector of statistics.}
\item{S}{vector of indices for the variables of interest (if not specified, all variables).}
\item{alpha}{significance level.}
\item{cvs}{numeric vector of critical values for summed statistics considering \code{1:m} hypotheses.}
}
\value{
\code{sumStatsPar} returns an object of class \code{sumObj}, containing
\itemize{
\item \code{total}: total number of variables (length of \code{g})
\item \code{size}: size of \code{S}
\item \code{alpha}: significance level
\item \code{TD}: lower (1-\code{alpha})-confidence bound for the number of true discoveries in \code{S}
\item \code{maxTD}: maximum value of \code{TD} that could be found under convergence of the algorithm
\item \code{iterations}: number of iterations of the algorithm (\code{NULL})
}
}
\description{
This function uses generic statistics and a suitable vector of critical values
to determine confidence bounds for the number of true discoveries, the true discovery proportion
and the false discovery proportion within a set of interest.
The bounds are simultaneous over all sets, and remain valid under post-hoc selection.
}
\examples{
# generate vector of statistics for 5 variables (Fisher transformation of p-values)
g <- as.vector(simData(prop = 0.6, m = 5, B = 1, alpha = 0.4, seed = 42))
g <- -2 * log(g)
# subset of interest (variables 1 and 2)
S <- c(1,2)
# vector of critical values
cvs <- qchisq(p = 0.4, df = 2 * seq(5), lower.tail=FALSE)
# create object of class sumObj
res <- sumStatsPar(g, S, alpha = 0.4, cvs = cvs)
res
summary(res)
# lower confidence bound for the number of true discoveries in S
discoveries(res)
# lower confidence bound for the true discovery proportion in S
tdp(res)
# upper confidence bound for the false discovery proportion in S
fdp(res)
}
\references{
Goeman J. J. and Solari A. (2011). Multiple testing for exploratory research. Statistical Science, doi: 10.1214/11-STS356.
Tian J., Chen X., Katsevich E., Goeman J. J. and Ramdas A. (2022). Large-scale simultaneous inference under dependence. Scandinavian Journal of Statistics, doi: 10.1111/sjos.12614.
}
\seealso{
True discovery guarantee using p-values (parametric): \code{\link{sumPvalsPar}}
Access a \code{sumObj} object: \code{\link{discoveries}}, \code{\link{tdp}}, \code{\link{fdp}}
}
\author{
Xu Chen.
}
|
238f937da33915f5910c21cdb4808d306cc49987
|
2c1f7e1f84c5580c15b26ee3d1b87805e49c177c
|
/man/makeFoldsGLMcv.Rd
|
3079c49b6e7e8922fdb5c5392cc165a615813525
|
[] |
no_license
|
cran/porridge
|
d3ad06287f6fafd4d9f4ec9cd42f1eb10bf40a9e
|
c29cdbf217c365866ae02800769bd5939657db07
|
refs/heads/master
| 2022-05-29T01:25:25.412977
| 2022-05-20T12:20:02
| 2022-05-20T12:20:02
| 218,986,466
| 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 1,360
|
rd
|
makeFoldsGLMcv.Rd
|
\name{makeFoldsGLMcv}
\alias{makeFoldsGLMcv}
\title{
Generate folds for cross-validation of generalized linear models.
}
\description{
Function that evaluates the targeted ridge estimator of the regression parameter of generalized linear models.
}
\usage{
makeFoldsGLMcv(fold, Y, stratified=TRUE, model="linear")
}
\arguments{
\item{fold}{ An \code{integer}, the number of folds to be generated. }
\item{Y}{ A \code{numeric} being the response vector. }
\item{stratified}{ A \code{logical}. If \code{stratified=TRUE}, the folds are generated such the distribution of the response \code{Y} is (roughly) the same across folds. }
\item{model}{ A \code{character}, either \code{"linear"} and \code{"logistic"} (a reference to the models currently implemented), indicative of the type of response for stratification. }
}
\value{
A \code{list} of length \code{fold}. Each list item is a fold.
}
\author{
W.N. van Wieringen.
}
\examples{
# set the sample size
n <- 50
# set the true parameter
betas <- (c(0:100) - 50) / 20
# generate covariate data
X <- matrix(rnorm(length(betas)*n), nrow=n)
# sample the response
probs <- exp(tcrossprod(betas, X)[1,]) / (1 + exp(tcrossprod(betas, X)[1,]))
Y <- numeric()
for (i in 1:n){
Y <- c(Y, sample(c(0,1), 1, prob=c(1-probs[i], probs[i])))
}
# generate folds
folds <- makeFoldsGLMcv(10, Y, model="logistic")
}
|
a129043cd07764826de6995524c4199e6a0dcbdf
|
6d045a743a3d49ab9377c1a3b16d9b62ebadf401
|
/man/createLocalRepos.Rd
|
8d6bba569f64b20dd5b440406f5df1cc171445ed
|
[] |
no_license
|
rtaph/irutils
|
6388c9cf48d10b2edb2223f180566012d08b3cbe
|
cb980c84c0ef8132d1d8e789e97634bd8f56af33
|
refs/heads/master
| 2021-01-21T08:29:16.551007
| 2013-01-14T13:07:55
| 2013-01-14T13:07:55
| null | 0
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 466
|
rd
|
createLocalRepos.Rd
|
\name{createLocalRepos}
\alias{createLocalRepos}
\title{This package will download all packages for the given type to \code{local.repos}
from the given repository.}
\usage{
createLocalRepos(local.repos,
repos = "http://cran.r-project.org", type = "source")
}
\description{
This package will download all packages for the given
type to \code{local.repos} from the given repository.
}
\seealso{
\code{\link{download.packages}} which this function wraps
}
|
9f4e857a359ac21d468bab5356f34831100ab9fd
|
fa05945647038b74b196b9cc8027576576d413e5
|
/0604.R
|
fa407b580f802c9b2643654ee6b1e227ea234c1c
|
[] |
no_license
|
saeheeeom/RforKoreanJsonData
|
15ccacb35b93da0f25cb7319933dbb52b103cd44
|
02e1043ce7b990712b0d04bdb8bc8e72ebc0c67a
|
refs/heads/main
| 2023-06-06T05:55:53.322141
| 2021-06-24T11:12:59
| 2021-06-24T11:12:59
| 379,896,353
| 1
| 0
| null | null | null | null |
UTF-8
|
R
| false
| false
| 2,732
|
r
|
0604.R
|
# 0604 수업
# anova test: 두 개 이상의 그룹을 비교할 때 많이 사용
# 연속적인 값 비교할 때 (이산적X)
# aov()안에 넣어서 결과 도출
library(tidyverse)
library(jsonlite)
gss_cat
aov(tvhours ~ race, data=gss_cat )#보고자 하는 결과변수(종속변수), 결과변수에 영향을 주는 설명변수(독립변수)
aov(tvhours ~ race, data=gss_cat) %>% summary() # f값이 크면 클수록 설명변수의 영향이 뚜렷하게 나타남
aov(tvhours ~ marital, data=gss_cat) %>% summary()
# 감성분석
# 텍스트에 어떤 감정이 담겨 있는지 뷴석하는 방법
# 가장 기초적인 방법은 감성 사전(긍정/부정 어휘)에 기반하여 분석하는 방법
sentiment = fromJSON("https://raw.githubusercontent.com/park1200656/KnuSentiLex/master/data/SentiWord_info.json")
sentiment %>% str()
# 분석 원하는 데이터에 단어를 key로 삼아 join,
# 그 데이터에서 polarity 다 더해서 계산하든가, 발화자별로 합하든가, 등등
files = list.files(pattern = "^S.+json$", full.names = T)
get_utterance <- function(x) {
json_utterance = fromJSON(x) %>% .$document %>% .$utterance
bind_rows(json_utterance)
}
df_spoken = map(files, get_utterance) %>% bind_rows()
df_spoken = df_spoken %>% select(speaker_id, sentence = form)
# sentnce단위 만드는 법 (구버전)
df_collapse = df_spoken %>% group_by(speaker_id) %>% mutate(whole_sentence = sentence %>% unlist() %>% str_c(collapse = " ")) %>%
select(speaker_id, whole_sentence) %>% unique()
df_sentence = df_collapse %>%
mutate(sentence = str_split(whole_sentence, "(\\.|\\?|//!)")) %>%
unnest(sentence) %>% select(-whole_sentence)
# 새로운 버전
# svd
# 7개의 필름, 6명의 캐릭터에 대한 매트릭스 (6*7)
# idm val: 정사방 행렬이여야 하기 때문에 더 작은 숫자인 6개의 숫자
# 6개의 숫자는 각각 feature의 분산을 나타냄
# 해당 예시에서는 첫 두 숫자의 분산이 가장 크므로, 밑에 있는 매트릭스에서의
# 첫번째 두번째 feature를 보는 것이 정확함.
# u matrix: 6*6 캐릭터에 대한 벡터값. 한 관측값은 한 명의 캐릭터에 배정
name_film = starwars %>% select(name, films) %>% unnest(films)
name_film
names_film2 = name_film %>% count(name, films) %>%
pivot_wider(names_from = films, values_from = n, values_fill = 0 )
names_film2 %>% t()
df = names_film2 %>% filter(name %in% c("Luke Skywalker", "Chewbacca", "Palpatine", "Yoda", "Darth Vader", "Anakin Skywalker"))
df
df$name
df
df %>% colnames()
mat = df %>% select(-name) %>% as.matrix()
mat %>% svd()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.